summaryrefslogtreecommitdiff
path: root/tools
ModeNameSize
-rw-r--r--Makefile2591logplain
d---------cgroup125logplain
d---------firewire188logplain
d---------hv182logplain
d---------include / tools32logplain
d---------lguest183logplain
d---------lib / traceevent37logplain
d---------net80logplain
d---------nfsd43logplain
d---------perf1633logplain
d---------power96logplain
d---------scripts44logplain
d---------testing110logplain
d---------usb151logplain
d---------virtio185logplain
d---------vm152logplain
cpi/apei/einj.c?h=v5.8-rc1&id=6a45a65888393eda692fce0851c40d9f5ce4ef66&id2=7778d8417b74aded842eeb372961cfc460417fa0'>drivers/acpi/apei/einj.c5
-rw-r--r--drivers/acpi/apei/erst.c4
-rw-r--r--drivers/acpi/apei/ghes.c73
-rw-r--r--drivers/acpi/apei/hest.c5
-rw-r--r--drivers/acpi/arm64/gtdt.c4
-rw-r--r--drivers/acpi/arm64/iort.c126
-rw-r--r--drivers/acpi/button.c1
-rw-r--r--drivers/acpi/cppc_acpi.c5
-rw-r--r--drivers/acpi/device_pm.c33
-rw-r--r--drivers/acpi/dptf/dptf_power.c147
-rw-r--r--drivers/acpi/ec.c23
-rw-r--r--drivers/acpi/evged.c22
-rw-r--r--drivers/acpi/numa/srat.c1
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pci_mcfg.c8
-rw-r--r--drivers/acpi/pci_root.c11
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c1
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/processor_idle.c9
-rw-r--r--drivers/acpi/sbs.c3
-rw-r--r--drivers/acpi/scan.c46
-rw-r--r--drivers/acpi/sleep.c24
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/utils.c25
-rw-r--r--drivers/acpi/video_detect.c10
-rw-r--r--drivers/amba/bus.c14
-rw-r--r--drivers/android/binder_alloc.c14
-rw-r--r--drivers/android/binderfs.c4
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-scsi.c30
-rw-r--r--drivers/atm/Kconfig4
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/core.c275
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dd.c33
-rw-r--r--drivers/base/firmware_loader/fallback.c15
-rw-r--r--drivers/base/firmware_loader/fallback.h8
-rw-r--r--drivers/base/firmware_loader/fallback_platform.c2
-rw-r--r--drivers/base/firmware_loader/fallback_table.c2
-rw-r--r--drivers/base/firmware_loader/firmware.h3
-rw-r--r--drivers/base/firmware_loader/main.c14
-rw-r--r--drivers/base/memory.c44
-rw-r--r--drivers/base/node.c8
-rw-r--r--drivers/base/platform-msi.c2
-rw-r--r--drivers/base/platform.c52
-rw-r--r--drivers/base/power/main.c350
-rw-r--r--drivers/base/power/runtime.c6
-rw-r--r--drivers/base/power/sysfs.c4
-rw-r--r--drivers/base/property.c13
-rw-r--r--drivers/base/regmap/regmap-debugfs.c6
-rw-r--r--drivers/base/regmap/regmap-i2c.c61
-rw-r--r--drivers/base/regmap/regmap-irq.c84
-rw-r--r--drivers/base/regmap/regmap.c23
-rw-r--r--drivers/base/soc.c2
-rw-r--r--drivers/base/swnode.c71
-rw-r--r--drivers/base/test/Kconfig3
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/drbd/drbd_bitmap.c4
-rw-r--r--drivers/block/drbd/drbd_int.h28
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c13
-rw-r--r--drivers/block/drbd/drbd_req.c27
-rw-r--r--drivers/block/drbd/drbd_worker.c6
-rw-r--r--drivers/block/floppy.c466
-rw-r--r--drivers/block/loop.c391
-rw-r--r--drivers/block/null_blk_main.c28
-rw-r--r--drivers/block/null_blk_zoned.c37
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/ps3disk.c1
-rw-r--r--drivers/block/rbd.c44
-rw-r--r--drivers/block/rbd_types.h2
-rw-r--r--drivers/block/rnbd/Kconfig28
-rw-r--r--drivers/block/rnbd/Makefile15
-rw-r--r--drivers/block/rnbd/README92
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c639
-rw-r--r--drivers/block/rnbd/rnbd-clt.c1729
-rw-r--r--drivers/block/rnbd/rnbd-clt.h156
-rw-r--r--drivers/block/rnbd/rnbd-common.c23
-rw-r--r--drivers/block/rnbd/rnbd-log.h41
-rw-r--r--drivers/block/rnbd/rnbd-proto.h303
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.c134
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.h92
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c215
-rw-r--r--drivers/block/rnbd/rnbd-srv.c844
-rw-r--r--drivers/block/rnbd/rnbd-srv.h78
-rw-r--r--drivers/block/rsxx/dev.c19
-rw-r--r--drivers/block/swim.c6
-rw-r--r--drivers/block/z2ram.c2
-rw-r--r--drivers/block/zram/zcomp.c51
-rw-r--r--drivers/block/zram/zcomp.h5
-rw-r--r--drivers/block/zram/zram_drv.c24
-rw-r--r--drivers/bluetooth/btbcm.c142
-rw-r--r--drivers/bluetooth/btbcm.h10
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c18
-rw-r--r--drivers/bluetooth/btmtksdio.c4
-rw-r--r--drivers/bluetooth/btmtkuart.c17
-rw-r--r--drivers/bluetooth/btqca.c32
-rw-r--r--drivers/bluetooth/btqca.h3
-rw-r--r--drivers/bluetooth/btrtl.c10
-rw-r--r--drivers/bluetooth/btusb.c205
-rw-r--r--drivers/bluetooth/hci_bcm.c35
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_qca.c178
-rw-r--r--drivers/bluetooth/hci_serdev.c4
-rw-r--r--drivers/bus/Kconfig41
-rw-r--r--drivers/bus/Makefile4
-rw-r--r--drivers/bus/arm-integrator-lm.c128
-rw-r--r--drivers/bus/bt1-apb.c421
-rw-r--r--drivers/bus/bt1-axi.c314
-rw-r--r--drivers/bus/mhi/core/boot.c75
-rw-r--r--drivers/bus/mhi/core/init.c8
-rw-r--r--drivers/bus/mhi/core/internal.h9
-rw-r--r--drivers/bus/mhi/core/main.c197
-rw-r--r--drivers/bus/mhi/core/pm.c229
-rw-r--r--drivers/bus/ti-sysc.c25
-rw-r--r--drivers/bus/vexpress-config.c354
-rw-r--r--drivers/cdrom/cdrom.c89
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/frontend.c1
-rw-r--r--drivers/char/agp/generic.c1
-rw-r--r--drivers/char/agp/intel-gtt.c21
-rw-r--r--drivers/char/bsr.c1
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/cctrng.c735
-rw-r--r--drivers/char/hw_random/cctrng.h72
-rw-r--r--drivers/char/hw_random/omap-rng.c5
-rw-r--r--drivers/char/hw_random/optee-rng.c2
-rw-r--r--drivers/char/hw_random/xgene-rng.c4
-rw-r--r--drivers/char/ipmi/Kconfig2
-rw-r--r--drivers/char/ipmi/bt-bmc.c21
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c9
-rw-r--r--drivers/char/ipmi/ipmi_si_hotmod.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c2
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c24
-rw-r--r--drivers/char/mem.c101
-rw-r--r--drivers/char/mspec.c3
-rw-r--r--drivers/char/nvram.c4
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c14
-rw-r--r--drivers/char/random.c12
-rw-r--r--drivers/char/tlclk.c17
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c12
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c2
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/clk/Kconfig33
-rw-r--r--drivers/clk/Makefile8
-rw-r--r--drivers/clk/at91/at91rm9200.c12
-rw-r--r--drivers/clk/at91/at91sam9260.c13
-rw-r--r--drivers/clk/at91/at91sam9g45.c10
-rw-r--r--drivers/clk/at91/at91sam9n12.c12
-rw-r--r--drivers/clk/at91/at91sam9rl.c10
-rw-r--r--drivers/clk/at91/at91sam9x5.c10
-rw-r--r--drivers/clk/at91/pmc.c47
-rw-r--r--drivers/clk/at91/pmc.h8
-rw-r--r--drivers/clk/at91/sam9x60.c10
-rw-r--r--drivers/clk/at91/sama5d2.c13
-rw-r--r--drivers/clk/at91/sama5d3.c10
-rw-r--r--drivers/clk/at91/sama5d4.c10
-rw-r--r--drivers/clk/baikal-t1/Kconfig42
-rw-r--r--drivers/clk/baikal-t1/Makefile3
-rw-r--r--drivers/clk/baikal-t1/ccu-div.c602
-rw-r--r--drivers/clk/baikal-t1/ccu-div.h110
-rw-r--r--drivers/clk/baikal-t1/ccu-pll.c558
-rw-r--r--drivers/clk/baikal-t1/ccu-pll.h64
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-div.c485
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-pll.c204
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c80
-rw-r--r--drivers/clk/clk-ast2600.c31
-rw-r--r--drivers/clk/clk-hsdk-pll.c70
-rw-r--r--drivers/clk/clk-qoriq.c30
-rw-r--r--drivers/clk/clk-si5341.c69
-rw-r--r--drivers/clk/clk-versaclock5.c11
-rw-r--r--drivers/clk/clk.c10
-rw-r--r--drivers/clk/imx/Kconfig8
-rw-r--r--drivers/clk/imx/clk-composite-8m.c56
-rw-r--r--drivers/clk/imx/clk-gate2.c31
-rw-r--r--drivers/clk/imx/clk-imx6ul.c2
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c6
-rw-r--r--drivers/clk/imx/clk-imx8mm.c27
-rw-r--r--drivers/clk/imx/clk-imx8mn.c25
-rw-r--r--drivers/clk/imx/clk-imx8mp.c148
-rw-r--r--drivers/clk/imx/clk-imx8mq.c29
-rw-r--r--drivers/clk/imx/clk-pll14xx.c8
-rw-r--r--drivers/clk/imx/clk-pllv3.c16
-rw-r--r--drivers/clk/imx/clk-sscg-pll.c10
-rw-r--r--drivers/clk/imx/clk.h62
-rw-r--r--drivers/clk/ingenic/Kconfig10
-rw-r--r--drivers/clk/ingenic/Makefile1
-rw-r--r--drivers/clk/ingenic/cgu.c28
-rw-r--r--drivers/clk/ingenic/cgu.h4
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c4
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c4
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c8
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c3
-rw-r--r--drivers/clk/ingenic/tcu.c2
-rw-r--r--drivers/clk/ingenic/x1000-cgu.c123
-rw-r--r--drivers/clk/ingenic/x1830-cgu.c448
-rw-r--r--drivers/clk/mediatek/Kconfig93
-rw-r--r--drivers/clk/mediatek/Makefile8
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mt6765-audio.c100
-rw-r--r--drivers/clk/mediatek/clk-mt6765-cam.c74
-rw-r--r--drivers/clk/mediatek/clk-mt6765-img.c70
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mipi0a.c68
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mm.c96
-rw-r--r--drivers/clk/mediatek/clk-mt6765-vcodec.c70
-rw-r--r--drivers/clk/mediatek/clk-mt6765.c922
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mt6797-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mt8173-mm.c146
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c104
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mux.c2
-rw-r--r--drivers/clk/meson/g12a.c30
-rw-r--r--drivers/clk/meson/gxbb.c40
-rw-r--r--drivers/clk/meson/meson8b.c120
-rw-r--r--drivers/clk/meson/meson8b.h5
-rw-r--r--drivers/clk/mmp/Makefile3
-rw-r--r--drivers/clk/mmp/clk-audio.c443
-rw-r--r--drivers/clk/mmp/clk-frac.c27
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c104
-rw-r--r--drivers/clk/mmp/clk.h11
-rw-r--r--drivers/clk/mmp/pwr-island.c115
-rw-r--r--drivers/clk/qcom/Kconfig8
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c8
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c3988
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c27
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c94
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c148
-rw-r--r--drivers/clk/qcom/gdsc.c23
-rw-r--r--drivers/clk/qcom/gdsc.h4
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c2
-rw-r--r--drivers/clk/renesas/Kconfig8
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r8a7742-cpg-mssr.c275
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c6
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c14
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h1
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c18
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c3
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c2
-rw-r--r--drivers/clk/socfpga/Makefile2
-rw-r--r--drivers/clk/socfpga/clk-agilex.c454
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c5
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c10
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c4
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c78
-rw-r--r--drivers/clk/socfpga/clk-pll.c4
-rw-r--r--drivers/clk/socfpga/clk-s10.c160
-rw-r--r--drivers/clk/socfpga/stratix10-clk.h10
-rw-r--r--drivers/clk/sprd/gate.c7
-rw-r--r--drivers/clk/sprd/gate.h9
-rw-r--r--drivers/clk/sprd/pll.c2
-rw-r--r--drivers/clk/sprd/sc9863a-clk.c64
-rw-r--r--drivers/clk/st/clk-flexgen.c1
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c2
-rw-r--r--drivers/clk/tegra/Kconfig4
-rw-r--r--drivers/clk/tegra/Makefile4
-rw-r--r--drivers/clk/tegra/clk-pll.c12
-rw-r--r--drivers/clk/tegra/clk-tegra-super-cclk.c212
-rw-r--r--drivers/clk/tegra/clk-tegra124-emc.c (renamed from drivers/clk/tegra/clk-emc.c)0
-rw-r--r--drivers/clk/tegra/clk-tegra20.c7
-rw-r--r--drivers/clk/tegra/clk-tegra210-emc.c369
-rw-r--r--drivers/clk/tegra/clk-tegra210.c94
-rw-r--r--drivers/clk/tegra/clk-tegra30.c6
-rw-r--r--drivers/clk/tegra/clk.h24
-rw-r--r--drivers/clk/ti/clk-44xx.c14
-rw-r--r--drivers/clk/ti/clk-54xx.c14
-rw-r--r--drivers/clk/ti/clk-7xx.c15
-rw-r--r--drivers/clk/ti/clk-816x.c1
-rw-r--r--drivers/clk/ti/composite.c1
-rw-r--r--drivers/clk/versatile/Kconfig21
-rw-r--r--drivers/clk/versatile/clk-impd1.c121
-rw-r--r--drivers/clk/versatile/clk-versatile.c2
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c20
-rw-r--r--drivers/clk/x86/Kconfig8
-rw-r--r--drivers/clk/x86/Makefile1
-rw-r--r--drivers/clk/x86/clk-cgu-pll.c156
-rw-r--r--drivers/clk/x86/clk-cgu.c636
-rw-r--r--drivers/clk/x86/clk-cgu.h335
-rw-r--r--drivers/clk/x86/clk-lgm.c475
-rw-r--r--drivers/clk/zynqmp/clk-gate-zynqmp.c9
-rw-r--r--drivers/clk/zynqmp/clk-mux-zynqmp.c6
-rw-r--r--drivers/clk/zynqmp/clk-zynqmp.h1
-rw-r--r--drivers/clk/zynqmp/clkc.c41
-rw-r--r--drivers/clk/zynqmp/divider.c39
-rw-r--r--drivers/clk/zynqmp/pll.c29
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arc_timer.c4
-rw-r--r--drivers/clocksource/arm_arch_timer.c4
-rw-r--r--drivers/clocksource/dw_apb_timer.c5
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c8
-rw-r--r--drivers/clocksource/mips-gic-timer.c50
-rw-r--r--drivers/clocksource/timer-atmel-st.c3
-rw-r--r--drivers/clocksource/timer-davinci.c24
-rw-r--r--drivers/clocksource/timer-imx-tpm.c8
-rw-r--r--drivers/clocksource/timer-riscv.c43
-rw-r--r--drivers/clocksource/timer-ti-32k.c48
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c727
-rw-r--r--drivers/clocksource/timer-ti-dm.c4
-rw-r--r--drivers/clocksource/timer-versatile.c3
-rw-r--r--drivers/connector/cn_proc.c21
-rw-r--r--drivers/cpufreq/Kconfig3
-rw-r--r--drivers/cpufreq/Kconfig.arm7
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c14
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c39
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c4
-rw-r--r--drivers/cpufreq/cpufreq.c58
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c84
-rw-r--r--drivers/cpufreq/intel_pstate.c3
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c22
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c2
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c76
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c3
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c217
-rw-r--r--drivers/cpuidle/Kconfig.arm13
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-psci.c8
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c39
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c (renamed from drivers/soc/qcom/spm.c)138
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c1
-rw-r--r--drivers/cpuidle/sysfs.c73
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c4
-rw-r--r--drivers/crypto/atmel-sha.c1
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c10
-rw-r--r--drivers/crypto/bcm/cipher.c27
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c6
-rw-r--r--drivers/crypto/ccp/Kconfig3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c9
-rw-r--r--drivers/crypto/ccp/sev-dev.c58
-rw-r--r--drivers/crypto/ccree/cc_cipher.c9
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c4
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c103
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h4
-rw-r--r--drivers/crypto/chelsio/chcr_core.c23
-rw-r--r--drivers/crypto/chelsio/chcr_core.h10
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h1
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c6
-rw-r--r--drivers/crypto/chelsio/chcr_ktls.c107
-rw-r--r--drivers/crypto/chelsio/chcr_ktls.h9
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c221
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.h1
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c16
-rw-r--r--drivers/crypto/hisilicon/Kconfig4
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h18
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c99
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c355
-rw-r--r--drivers/crypto/hisilicon/qm.c2433
-rw-r--r--drivers/crypto/hisilicon/qm.h120
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h5
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c20
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c379
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h8
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c20
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c362
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_main.c4
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c12
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c95
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c6
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c12
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c10
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c7
-rw-r--r--drivers/crypto/n2_core.c7
-rw-r--r--drivers/crypto/nx/Makefile2
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c (renamed from drivers/crypto/nx/nx-842-powernv.c)204
-rw-r--r--drivers/crypto/omap-aes-gcm.c1
-rw-r--r--drivers/crypto/omap-aes.c8
-rw-r--r--drivers/crypto/omap-crypto.c10
-rw-r--r--drivers/crypto/omap-sham.c122
-rw-r--r--drivers/crypto/s5p-sss.c39
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c230
-rw-r--r--drivers/crypto/stm32/stm32-hash.c38
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c21
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c22
-rw-r--r--drivers/dax/dax-private.h1
-rw-r--r--drivers/dax/device.c1
-rw-r--r--drivers/dax/kmem.c28
-rw-r--r--drivers/dca/dca-sysfs.c4
-rw-r--r--drivers/devfreq/Kconfig8
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq.c19
-rw-r--r--drivers/devfreq/imx-bus.c179
-rw-r--r--drivers/devfreq/tegra30-devfreq.c7
-rw-r--r--drivers/dma-buf/Makefile3
-rw-r--r--drivers/dma-buf/dma-buf.c2
-rw-r--r--drivers/dma-buf/dma-fence-chain.c10
-rw-r--r--drivers/dma-buf/dma-fence.c2
-rw-r--r--drivers/dma-buf/dma-resv.c5
-rw-r--r--drivers/dma-buf/selftests.h1
-rw-r--r--drivers/dma-buf/st-dma-fence-chain.c715
-rw-r--r--drivers/dma/Kconfig4
-rw-r--r--drivers/dma/at_hdmac_regs.h2
-rw-r--r--drivers/dma/at_xdmac.c2
-rw-r--r--drivers/dma/dmaengine.c98
-rw-r--r--drivers/dma/dmatest.c24
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c65
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h4
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c10
-rw-r--r--drivers/dma/idxd/sysfs.c11
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/ioat/dma.c85
-rw-r--r--drivers/dma/ioat/dma.h10
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/mmp_tdma.c26
-rw-r--r--drivers/dma/moxart-dma.c2
-rw-r--r--drivers/dma/qcom/bam_dma.c2
-rw-r--r--drivers/dma/qcom/hidma.c3
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c25
-rw-r--r--drivers/dma/stm32-dma.c41
-rw-r--r--drivers/dma/ti/Kconfig4
-rw-r--r--drivers/dma/ti/k3-udma.c34
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/edac/amd8131_edac.c8
-rw-r--r--drivers/edac/armada_xp_edac.c14
-rw-r--r--drivers/edac/i10nm_base.c29
-rw-r--r--drivers/edac/skx_base.c33
-rw-r--r--drivers/edac/skx_common.c17
-rw-r--r--drivers/edac/skx_common.h13
-rw-r--r--drivers/edac/thunderx_edac.c8
-rw-r--r--drivers/edac/xgene_edac.c3
-rw-r--r--drivers/extcon/extcon-adc-jack.c3
-rw-r--r--drivers/extcon/extcon-arizona.c17
-rw-r--r--drivers/extcon/extcon-max14577.c10
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firmware/Kconfig9
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/arm_scmi/Makefile4
-rw-r--r--drivers/firmware/arm_scmi/base.c7
-rw-r--r--drivers/firmware/arm_scmi/common.h11
-rw-r--r--drivers/firmware/arm_scmi/driver.c133
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c17
-rw-r--r--drivers/firmware/arm_scmi/perf.c5
-rw-r--r--drivers/firmware/arm_scmi/power.c6
-rw-r--r--drivers/firmware/arm_scmi/sensors.c4
-rw-r--r--drivers/firmware/arm_scmi/shmem.c15
-rw-r--r--drivers/firmware/arm_scmi/smc.c153
-rw-r--r--drivers/firmware/arm_sdei.c49
-rw-r--r--drivers/firmware/dmi-id.c6
-rw-r--r--drivers/firmware/dmi_scan.c30
-rw-r--r--drivers/firmware/efi/Kconfig15
-rw-r--r--drivers/firmware/efi/arm-init.c4
-rw-r--r--drivers/firmware/efi/arm-runtime.c2
-rw-r--r--drivers/firmware/efi/efi.c46
-rw-r--r--drivers/firmware/efi/efivars.c4
-rw-r--r--drivers/firmware/efi/libstub/Makefile50
-rw-r--r--drivers/firmware/efi/libstub/alignedmem.c57
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c53
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c106
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c381
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c (renamed from drivers/firmware/efi/libstub/arm-stub.c)93
-rw-r--r--drivers/firmware/efi/libstub/efistub.h191
-rw-r--r--drivers/firmware/efi/libstub/fdt.c24
-rw-r--r--drivers/firmware/efi/libstub/file.c48
-rw-r--r--drivers/firmware/efi/libstub/gop.c583
-rw-r--r--drivers/firmware/efi/libstub/mem.c191
-rw-r--r--drivers/firmware/efi/libstub/pci.c10
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c6
-rw-r--r--drivers/firmware/efi/libstub/relocate.c174
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c4
-rw-r--r--drivers/firmware/efi/libstub/tpm.c2
-rw-r--r--drivers/firmware/efi/libstub/vsprintf.c564
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c155
-rw-r--r--drivers/firmware/efi/test/efi_test.c12
-rw-r--r--drivers/firmware/imx/imx-scu.c64
-rw-r--r--drivers/firmware/psci/psci.c21
-rw-r--r--drivers/firmware/qcom_scm-legacy.c2
-rw-r--r--drivers/firmware/qcom_scm.c11
-rw-r--r--drivers/firmware/raspberrypi.c73
-rw-r--r--drivers/firmware/smccc/Kconfig16
-rw-r--r--drivers/firmware/smccc/Makefile3
-rw-r--r--drivers/firmware/smccc/smccc.c31
-rw-r--r--drivers/firmware/stratix10-rsu.c10
-rw-r--r--drivers/firmware/stratix10-svc.c62
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c4
-rw-r--r--drivers/firmware/tegra/bpmp.c9
-rw-r--r--drivers/firmware/trusted_foundations.c21
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c5
-rw-r--r--drivers/firmware/xilinx/zynqmp.c607
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/Makefile1
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c8
-rw-r--r--drivers/fpga/dfl-afu-main.c35
-rw-r--r--drivers/fpga/dfl-fme-main.c23
-rw-r--r--drivers/fpga/dfl-fme-perf.c1020
-rw-r--r--drivers/fpga/dfl-fme-pr.c4
-rw-r--r--drivers/fpga/dfl-fme.h2
-rw-r--r--drivers/fpga/dfl.c15
-rw-r--r--drivers/fpga/dfl.h39
-rw-r--r--drivers/fpga/ice40-spi.c10
-rw-r--r--drivers/fpga/machxo2-spi.c12
-rw-r--r--drivers/fpga/stratix10-soc.c28
-rw-r--r--drivers/fpga/zynqmp-fpga.c14
-rw-r--r--drivers/gnss/serial.h2
-rw-r--r--drivers/gnss/sirf.c8
-rw-r--r--drivers/gpio/Kconfig24
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/TODO4
-rw-r--r--drivers/gpio/gpio-aggregator.c568
-rw-r--r--drivers/gpio/gpio-dwapb.c248
-rw-r--r--drivers/gpio/gpio-f7188x.c33
-rw-r--r--drivers/gpio/gpio-ftgpio010.c2
-rw-r--r--drivers/gpio/gpio-ich.c2
-rw-r--r--drivers/gpio/gpio-max730x.c12
-rw-r--r--drivers/gpio/gpio-mb86s7x.c28
-rw-r--r--drivers/gpio/gpio-merrifield.c10
-rw-r--r--drivers/gpio/gpio-mlxbf2.c5
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c2
-rw-r--r--drivers/gpio/gpio-mockup.c53
-rw-r--r--drivers/gpio/gpio-pca953x.c96
-rw-r--r--drivers/gpio/gpio-pch.c73
-rw-r--r--drivers/gpio/gpio-pl061.c9
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-regmap.c349
-rw-r--r--drivers/gpio/gpio-tegra186.c1
-rw-r--r--drivers/gpio/gpio-xgene-sb.c14
-rw-r--r--drivers/gpio/gpiolib-acpi.c6
-rw-r--r--drivers/gpio/gpiolib-devprop.c5
-rw-r--r--drivers/gpio/gpiolib-of.c31
-rw-r--r--drivers/gpio/gpiolib.c165
-rw-r--r--drivers/gpio/gpiolib.h27
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c415
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c728
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c447
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c408
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2883
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c195
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c4
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c302
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nvd.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c419
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c262
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c129
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c471
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c55
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c47
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h23
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h6
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c417
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c73
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c644
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c225
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c299
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h125
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c319
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h (renamed from drivers/gpu/drm/amd/display/dc/basics/log_helpers.c)25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c110
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c9
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h (renamed from drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h)23
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h14
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h15
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c26
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c33
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c33
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c101
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h30
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c482
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_shared.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c103
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c448
-rw-r--r--drivers/gpu/drm/amd/display/modules/vmid/vmid.c7
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h27
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h33
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h114
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c328
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c49
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c184
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c61
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c135
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c40
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c157
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c71
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c141
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c149
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c181
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h40
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_types.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c107
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c145
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c35
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c31
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c42
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c48
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c69
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c15
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c68
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c106
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c4
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c7
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c16
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c3
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c4
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx.h3
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c2
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c31
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_out.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c5
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c26
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c12
-rw-r--r--drivers/gpu/drm/bochs/bochs.h1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c16
-rw-r--r--drivers/gpu/drm/bridge/Kconfig26
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c26
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c620
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c1213
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.h144
-rw-r--r--drivers/gpu/drm/bridge/panel.c7
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c2
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c86
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c4
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig19
-rw-r--r--drivers/gpu/drm/cirrus/Makefile2
-rw-r--r--drivers/gpu/drm/drm_atomic.c8
-rw-r--r--drivers/gpu/drm/drm_auth.c69
-rw-r--r--drivers/gpu/drm/drm_blend.c16
-rw-r--r--drivers/gpu/drm/drm_bufs.c2
-rw-r--r--drivers/gpu/drm/drm_client.c8
-rw-r--r--drivers/gpu/drm/drm_connector.c11
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h4
-rw-r--r--drivers/gpu/drm/drm_debugfs.c45
-rw-r--r--drivers/gpu/drm/drm_dma.c2
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c271
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c404
-rw-r--r--drivers/gpu/drm/drm_drv.c230
-rw-r--r--drivers/gpu/drm/drm_edid.c111
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c35
-rw-r--r--drivers/gpu/drm/drm_file.c9
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c23
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c224
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c124
-rw-r--r--drivers/gpu/drm/drm_internal.h7
-rw-r--r--drivers/gpu/drm/drm_ioctl.c6
-rw-r--r--drivers/gpu/drm/drm_managed.c275
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c33
-rw-r--r--drivers/gpu/drm/drm_mm.c133
-rw-r--r--drivers/gpu/drm/drm_mode_config.c110
-rw-r--r--drivers/gpu/drm/drm_mode_object.c10
-rw-r--r--drivers/gpu/drm/drm_modes.c26
-rw-r--r--drivers/gpu/drm/drm_pci.c4
-rw-r--r--drivers/gpu/drm/drm_plane.c9
-rw-r--r--drivers/gpu/drm/drm_scatter.c11
-rw-r--r--drivers/gpu/drm/drm_sysfs.c3
-rw-r--r--drivers/gpu/drm/drm_vblank.c98
-rw-r--r--drivers/gpu/drm/drm_vm.c6
-rw-r--r--drivers/gpu/drm/drm_vram_helper_common.c94
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c20
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c182
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c14
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c14
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c47
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c100
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c16
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c11
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c31
-rw-r--r--drivers/gpu/drm/gma500/mdfld_output.h1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tmd_vid.c6
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tpo_vid.c6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c19
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c18
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c99
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c8
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c43
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h1
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c7
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c23
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile12
-rw-r--r--drivers/gpu/drm/i915/Makefile32
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c188
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c144
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c194
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c121
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c808
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c920
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c130
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c558
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2017
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c164
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c54
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c299
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c178
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c105
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c149
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h4
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c98
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c904
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_fence.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c38
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c44
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c83
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c566
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c52
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c171
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c128
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c26
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_engines.c2
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.h9
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.c74
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h42
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c205
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.h34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h54
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c59
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c (renamed from drivers/gpu/drm/i915/i915_gem_fence_reg.c)170
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h (renamed from drivers/gpu/drm/i915/i915_gem_fence_reg.h)17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c69
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c (renamed from drivers/gpu/drm/i915/gt/intel_engine_pool.c)114
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h (renamed from drivers/gpu/drm/i915/gt/intel_engine_pool_types.h)15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c102
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1079
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c449
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c33
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c49
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c12
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c685
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c1331
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.h17
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c173
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.h23
-rw-r--r--drivers/gpu/drm/i915/gt/st_shmem_utils.c63
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c94
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c46
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c97
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c124
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c53
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c36
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c35
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c30
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c56
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c45
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c247
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.c137
-rw-r--r--drivers/gpu/drm/i915/i915_active.h14
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_config.c15
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c356
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.h4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c156
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h47
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c7
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c33
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h3
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c14
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c219
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c1
-rw-r--r--drivers/gpu/drm/i915/i915_params.c4
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c41
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c595
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h46
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c41
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h7
-rw-r--r--drivers/gpu/drm/i915/i915_query.c62
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h194
-rw-r--r--drivers/gpu/drm/i915/i915_request.c427
-rw-r--r--drivers/gpu/drm/i915/i915_request.h30
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c37
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h2
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c12
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.c5
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.h23
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c3
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c96
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h4
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c76
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h5
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c364
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h6
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c60
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h6
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c12
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h22
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c47
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.c90
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.c88
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.c101
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.c88
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.c118
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.c98
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c88
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.c121
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c26
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c104
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c623
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c29
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.c24
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.h13
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c38
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c8
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h1
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c8
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c8
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c8
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c43
-rw-r--r--drivers/gpu/drm/lima/Kconfig2
-rw-r--r--drivers/gpu/drm/lima/Makefile4
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.c25
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.h2
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.c3
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.h5
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.c257
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.h44
-rw-r--r--drivers/gpu/drm/lima/lima_device.c228
-rw-r--r--drivers/gpu/drm/lima/lima_device.h17
-rw-r--r--drivers/gpu/drm/lima/lima_dlbu.c17
-rw-r--r--drivers/gpu/drm/lima/lima_dlbu.h2
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c141
-rw-r--r--drivers/gpu/drm/lima/lima_drv.h1
-rw-r--r--drivers/gpu/drm/lima/lima_dump.h77
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c21
-rw-r--r--drivers/gpu/drm/lima/lima_gp.h2
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.c38
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.h2
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c49
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.h2
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.c77
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.h2
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c31
-rw-r--r--drivers/gpu/drm/lima/lima_pp.h4
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c193
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h11
-rw-r--r--drivers/gpu/drm/lima/lima_trace.c7
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h50
-rw-r--r--drivers/gpu/drm/lima/lima_vm.h3
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c10
-rw-r--r--drivers/gpu/drm/mcde/mcde_drm.h2
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c52
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c9
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c57
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c19
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c259
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.h7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c58
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c22
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c22
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c54
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c28
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c34
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h6
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c2
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c16
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h15
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c119
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c127
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c16
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c83
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c18
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h14
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c418
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h37
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h48
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c70
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c123
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h50
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c35
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c95
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c129
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h100
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c58
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c80
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c35
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c23
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.h2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c6
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h15
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c31
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c232
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c42
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c49
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/msm_gpummu.c10
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c22
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h5
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c4
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c19
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core827d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core907d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core917d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c158
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/pior507d.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor507d.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor907d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sorc37d.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c86
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl5070.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c48
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c212
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c450
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c175
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.h5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c75
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgp100.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c33
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c43
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c29
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h2
-rw-r--r--drivers/gpu/drm/panel/Kconfig29
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c367
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c31
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c4
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c691
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c46
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c247
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c2
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c303
-rw-r--r--drivers/gpu/drm/pl111/Makefile1
-rw-r--r--drivers/gpu/drm/pl111/pl111_debugfs.c8
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h2
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c13
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c148
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c138
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.h29
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c28
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c32
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c25
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h20
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c15
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c8
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c3
-rw-r--r--drivers/gpu/drm/radeon/Makefile35
-rw-r--r--drivers/gpu/drm/radeon/atom.c3
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c6
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c18
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c14
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c9
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c13
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c6
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c8
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c8
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c8
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c43
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c137
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h17
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c83
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c14
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c6
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c6
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h4
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c14
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c16
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c13
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c7
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c13
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c13
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c12
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c10
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c13
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c8
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h2
-rw-r--r--drivers/gpu/drm/stm/drv.c10
-rw-r--r--drivers/gpu/drm/stm/ltdc.c102
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c17
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c17
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c111
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h10
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c40
-rw-r--r--drivers/gpu/drm/tegra/dc.c11
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c8
-rw-r--r--drivers/gpu/drm/tegra/drm.h4
-rw-r--r--drivers/gpu/drm/tegra/dsi.c21
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c21
-rw-r--r--drivers/gpu/drm/tegra/output.c6
-rw-r--r--drivers/gpu/drm/tegra/rgb.c8
-rw-r--r--drivers/gpu/drm/tegra/sor.c20
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c16
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c11
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h6
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c25
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h4
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c12
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c21
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.h1
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c6
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c20
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c8
-rw-r--r--drivers/gpu/drm/tiny/Kconfig19
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c (renamed from drivers/gpu/drm/cirrus/cirrus.c)82
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c242
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c16
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c16
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c16
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c16
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c16
-rw-r--r--drivers/gpu/drm/tiny/repaper.c28
-rw-r--r--drivers/gpu/drm/tiny/st7586.c16
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c56
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c10
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c45
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_main.c10
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c31
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c20
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c53
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h9
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c17
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c16
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c10
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c32
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c29
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c8
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c15
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h36
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c21
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h5
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c11
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c17
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c4
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c8
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c8
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c8
-rw-r--r--drivers/greybus/Kconfig6
-rw-r--r--drivers/greybus/arpc.h2
-rw-r--r--drivers/hid/Kconfig11
-rw-r--r--drivers/hid/hid-alps.c3
-rw-r--r--drivers/hid/hid-apple.c30
-rw-r--r--drivers/hid/hid-asus.c122
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/hid-mcp2221.c169
-rw-r--r--drivers/hid/hid-multitouch.c66
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-sony.c17
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c8
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c2
-rw-r--r--drivers/hv/channel.c58
-rw-r--r--drivers/hv/channel_mgmt.c439
-rw-r--r--drivers/hv/connection.c58
-rw-r--r--drivers/hv/hv.c16
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/hv_snapshot.c2
-rw-r--r--drivers/hv/hv_trace.h25
-rw-r--r--drivers/hv/hyperv_vmbus.h81
-rw-r--r--drivers/hv/vmbus_drv.c314
-rw-r--r--drivers/hwmon/Kconfig59
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/adt7411.c3
-rw-r--r--drivers/hwmon/amd_energy.c408
-rw-r--r--drivers/hwmon/applesmc.c12
-rw-r--r--drivers/hwmon/bt1-pvt.c1146
-rw-r--r--drivers/hwmon/bt1-pvt.h244
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c26
-rw-r--r--drivers/hwmon/gsc-hwmon.c390
-rw-r--r--drivers/hwmon/hwmon.c136
-rw-r--r--drivers/hwmon/ina2xx.c183
-rw-r--r--drivers/hwmon/lm70.c47
-rw-r--r--drivers/hwmon/lm75.c8
-rw-r--r--drivers/hwmon/lm75.h31
-rw-r--r--drivers/hwmon/lm90.c45
-rw-r--r--drivers/hwmon/nct6775.c10
-rw-r--r--drivers/hwmon/nct7802.c6
-rw-r--r--drivers/hwmon/nct7904.c138
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/max16601.c314
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c8
-rw-r--r--drivers/hwtracing/coresight/Kconfig2
-rw-r--r--drivers/hwtracing/coresight/Makefile3
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-platform.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c16
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.c232
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h8
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c91
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h21
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c204
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c16
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c2
-rw-r--r--drivers/hwtracing/coresight/coresight.c82
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/i3c/master.c16
-rw-r--r--drivers/ide/ide-cd.c17
-rw-r--r--drivers/ide/ide-io.c7
-rw-r--r--drivers/iio/accel/Kconfig10
-rw-r--r--drivers/iio/accel/bma180.c208
-rw-r--r--drivers/iio/accel/dmard06.c3
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c18
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c11
-rw-r--r--drivers/iio/accel/mxc4005.c4
-rw-r--r--drivers/iio/accel/st_accel.h2
-rw-r--r--drivers/iio/accel/st_accel_buffer.c3
-rw-r--r--drivers/iio/accel/st_accel_core.c83
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/adc/Kconfig55
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad7476.c59
-rw-r--r--drivers/iio/adc/ad7780.c27
-rw-r--r--drivers/iio/adc/ad7791.c64
-rw-r--r--drivers/iio/adc/ad7793.c144
-rw-r--r--drivers/iio/adc/ad9467.c422
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c8
-rw-r--r--drivers/iio/adc/adi-axi-adc.c482
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c233
-rw-r--r--drivers/iio/adc/at91_adc.c5
-rw-r--r--drivers/iio/adc/exynos_adc.c17
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c4
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c6
-rw-r--r--drivers/iio/adc/max1241.c227
-rw-r--r--drivers/iio/adc/max1363.c32
-rw-r--r--drivers/iio/adc/mcp3422.c5
-rw-r--r--drivers/iio/adc/mp2629_adc.c208
-rw-r--r--drivers/iio/adc/stm32-adc-core.c34
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c4
-rw-r--r--drivers/iio/adc/ti-ads124s08.c7
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c4
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c2
-rw-r--r--drivers/iio/adc/xilinx-xadc.h2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c41
-rw-r--r--drivers/iio/buffer/industrialio-hw-consumer.c31
-rw-r--r--drivers/iio/buffer/industrialio-triggered-buffer.c11
-rw-r--r--drivers/iio/buffer/kfifo_buf.c22
-rw-r--r--drivers/iio/chemical/Kconfig11
-rw-r--r--drivers/iio/chemical/Makefile1
-rw-r--r--drivers/iio/chemical/atlas-ezo-sensor.c177
-rw-r--r--drivers/iio/chemical/atlas-sensor.c36
-rw-r--r--drivers/iio/chemical/bme680_core.c36
-rw-r--r--drivers/iio/chemical/ccs811.c112
-rw-r--r--drivers/iio/chemical/pms7003.c17
-rw-r--r--drivers/iio/chemical/sps30.c9
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c18
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.h3
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c13
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c4
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c6
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c13
-rw-r--r--drivers/iio/dac/Kconfig6
-rw-r--r--drivers/iio/dac/ad5360.c17
-rw-r--r--drivers/iio/dac/ad5380.c8
-rw-r--r--drivers/iio/dac/ad5421.c21
-rw-r--r--drivers/iio/dac/ad5446.c18
-rw-r--r--drivers/iio/dac/ad5449.c12
-rw-r--r--drivers/iio/dac/ad5592r-base.c30
-rw-r--r--drivers/iio/dac/ad5592r-base.h1
-rw-r--r--drivers/iio/dac/ad5592r.c4
-rw-r--r--drivers/iio/dac/ad5593r.c2
-rw-r--r--drivers/iio/dac/ad5624r_spi.c8
-rw-r--r--drivers/iio/dac/ad5686.c10
-rw-r--r--drivers/iio/dac/ad5686.h2
-rw-r--r--drivers/iio/dac/ad5755.c22
-rw-r--r--drivers/iio/dac/ad5761.c16
-rw-r--r--drivers/iio/dac/ad5764.c12
-rw-r--r--drivers/iio/dac/ltc2632.c67
-rw-r--r--drivers/iio/dac/ti-dac7612.c4
-rw-r--r--drivers/iio/dac/vf610_dac.c11
-rw-r--r--drivers/iio/dummy/iio_dummy_evgen.c31
-rw-r--r--drivers/iio/gyro/Kconfig2
-rw-r--r--drivers/iio/gyro/adis16130.c4
-rw-r--r--drivers/iio/gyro/adis16136.c10
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c6
-rw-r--r--drivers/iio/gyro/bmg160_spi.c5
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c18
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c4
-rw-r--r--drivers/iio/gyro/st_gyro_buffer.c3
-rw-r--r--drivers/iio/gyro/st_gyro_core.c9
-rw-r--r--drivers/iio/health/afe4403.c14
-rw-r--r--drivers/iio/health/max30100.c7
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c12
-rw-r--r--drivers/iio/humidity/hts221_buffer.c6
-rw-r--r--drivers/iio/humidity/hts221_i2c.c6
-rw-r--r--drivers/iio/humidity/hts221_spi.c6
-rw-r--r--drivers/iio/imu/Kconfig13
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis.c27
-rw-r--r--drivers/iio/imu/adis16400.c21
-rw-r--r--drivers/iio/imu/adis16460.c27
-rw-r--r--drivers/iio/imu/adis16475.c1338
-rw-r--r--drivers/iio/imu/adis16480.c16
-rw-r--r--drivers/iio/imu/adis_buffer.c58
-rw-r--r--drivers/iio/imu/adis_trigger.c72
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c4
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c23
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c4
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c21
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c96
-rw-r--r--drivers/iio/industrialio-buffer.c93
-rw-r--r--drivers/iio/industrialio-core.c126
-rw-r--r--drivers/iio/industrialio-trigger.c53
-rw-r--r--drivers/iio/inkern.c27
-rw-r--r--drivers/iio/light/Kconfig2
-rw-r--r--drivers/iio/light/bh1780.c6
-rw-r--r--drivers/iio/light/cm32181.c271
-rw-r--r--drivers/iio/light/cm3232.c3
-rw-r--r--drivers/iio/light/gp2ap002.c19
-rw-r--r--drivers/iio/light/gp2ap020a00f.c6
-rw-r--r--drivers/iio/light/hid-sensor-als.c18
-rw-r--r--drivers/iio/light/hid-sensor-prox.c18
-rw-r--r--drivers/iio/light/isl29125.c28
-rw-r--r--drivers/iio/light/ltr501.c41
-rw-r--r--drivers/iio/light/opt3001.c3
-rw-r--r--drivers/iio/light/si1133.c18
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c7
-rw-r--r--drivers/iio/light/st_uvis25_spi.c7
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/light/tsl2772.c6
-rw-r--r--drivers/iio/light/vcnl4000.c746
-rw-r--r--drivers/iio/light/vl6180.c3
-rw-r--r--drivers/iio/light/zopt2201.c4
-rw-r--r--drivers/iio/magnetometer/ak8974.c201
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c4
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c18
-rw-r--r--drivers/iio/magnetometer/mmc35240.c4
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c5
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c3
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c18
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c18
-rw-r--r--drivers/iio/pressure/bmp280-core.c100
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c18
-rw-r--r--drivers/iio/pressure/hp206c.c8
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c4
-rw-r--r--drivers/iio/pressure/ms5611_spi.c4
-rw-r--r--drivers/iio/pressure/st_pressure_core.c7
-rw-r--r--drivers/iio/pressure/zpa2326.c9
-rw-r--r--drivers/iio/proximity/Kconfig24
-rw-r--r--drivers/iio/proximity/Makefile2
-rw-r--r--drivers/iio/proximity/ping.c7
-rw-r--r--drivers/iio/proximity/sx9310.c1069
-rw-r--r--drivers/iio/proximity/vcnl3020.c258
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c12
-rw-r--r--drivers/iio/temperature/ltc2983.c4
-rw-r--r--drivers/iio/temperature/max31856.c5
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile9
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/core/cm.c306
-rw-r--r--drivers/infiniband/core/cma.c114
-rw-r--r--drivers/infiniband/core/cma_configfs.c13
-rw-r--r--drivers/infiniband/core/cma_priv.h1
-rw-r--r--drivers/infiniband/core/cma_trace.h20
-rw-r--r--drivers/infiniband/core/core_priv.h3
-rw-r--r--drivers/infiniband/core/cq.c173
-rw-r--r--drivers/infiniband/core/device.c22
-rw-r--r--drivers/infiniband/core/fmr_pool.c494
-rw-r--r--drivers/infiniband/core/lag.c138
-rw-r--r--drivers/infiniband/core/mad.c255
-rw-r--r--drivers/infiniband/core/multicast.c12
-rw-r--r--drivers/infiniband/core/rdma_core.c25
-rw-r--r--drivers/infiniband/core/rdma_core.h7
-rw-r--r--drivers/infiniband/core/rw.c2
-rw-r--r--drivers/infiniband/core/sa_query.c51
-rw-r--r--drivers/infiniband/core/sysfs.c10
-rw-r--r--drivers/infiniband/core/ucma.c65
-rw-r--r--drivers/infiniband/core/ud_header.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c4
-rw-r--r--drivers/infiniband/core/user_mad.c22
-rw-r--r--drivers/infiniband/core/uverbs.h21
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c76
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c24
-rw-r--r--drivers/infiniband/core/uverbs_main.c46
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c95
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c17
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c12
-rw-r--r--drivers/infiniband/core/uverbs_std_types_qp.c401
-rw-r--r--drivers/infiniband/core/uverbs_std_types_srq.c234
-rw-r--r--drivers/infiniband/core/uverbs_std_types_wq.c194
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c3
-rw-r--r--drivers/infiniband/core/verbs.c159
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c76
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h18
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c357
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h42
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c88
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h91
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h53
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h106
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c1
-rw-r--r--drivers/infiniband/hw/efa/efa.h6
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h63
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c5
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h3
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c18
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h11
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c52
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c19
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile4
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c12
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h3
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c303
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h5
-rw-r--r--drivers/infiniband/hw/hfi1/common.h13
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c231
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h38
-rw-r--r--drivers/infiniband/hw/hfi1/init.c13
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h171
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c309
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_rx.c95
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c828
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c2
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c36
-rw-r--r--drivers/infiniband/hw/hfi1/msix.h7
-rw-r--r--drivers/infiniband/hw/hfi1/netdev.h118
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c481
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c18
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c4
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c42
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ctxts.h11
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c7
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c14
-rw-r--r--drivers/infiniband/hw/hfi1/vnic.h5
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c325
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c148
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c351
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h246
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c114
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c360
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c1675
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c71
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c1644
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c509
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c378
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h1
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c11
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c97
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile29
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c35
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c131
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h6
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c4
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c11
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c27
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c156
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c38
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c5
-rw-r--r--drivers/infiniband/hw/mlx5/main.c216
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h75
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c13
-rw-r--r--drivers/infiniband/hw/mlx5/qos.c13
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c3779
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h46
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/qp.c)338
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c10
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c113
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c1504
-rw-r--r--drivers/infiniband/hw/mlx5/wr.h76
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c262
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c105
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h23
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/main.c1
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h1
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c6
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c11
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c1
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c155
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h15
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c24
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c9
-rw-r--r--drivers/infiniband/sw/siw/siw.h4
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c42
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c1
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c9
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c1
-rw-r--r--drivers/infiniband/ulp/Makefile1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c37
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h79
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c19
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c188
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c126
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c5
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c12
-rw-r--r--drivers/infiniband/ulp/rtrs/Kconfig27
-rw-r--r--drivers/infiniband/ulp/rtrs/Makefile15
-rw-r--r--drivers/infiniband/ulp/rtrs/README213
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c200
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c483
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c2992
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h252
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-log.h28
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h399
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c38
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c321
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2178
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h148
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c612
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h196
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c265
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h27
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c67
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h5
-rw-r--r--drivers/input/evdev.c7
-rw-r--r--drivers/input/joystick/Kconfig1
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/atkbd.c97
-rw-r--r--drivers/input/keyboard/imx_sc_key.c33
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c2
-rw-r--r--drivers/input/misc/Kconfig32
-rw-r--r--drivers/input/misc/Makefile3
-rw-r--r--drivers/input/misc/gp2ap002a00f.c281
-rw-r--r--drivers/input/misc/iqs269a.c1833
-rw-r--r--drivers/input/misc/msm-vibrator.c281
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c2
-rw-r--r--drivers/input/serio/i8042-ppcio.h57
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h1
-rw-r--r--drivers/input/serio/i8042.c3
-rw-r--r--drivers/input/serio/i8042.h2
-rw-r--r--drivers/input/touchscreen/Kconfig12
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c7
-rw-r--r--drivers/input/touchscreen/cy8ctma140.c353
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c5
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c198
-rw-r--r--drivers/input/touchscreen/elants_i2c.c247
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c4
-rw-r--r--drivers/input/touchscreen/mms114.c19
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c2
-rw-r--r--drivers/input/touchscreen/stmfts.c2
-rw-r--r--drivers/interconnect/Kconfig3
-rw-r--r--drivers/interconnect/Makefile1
-rw-r--r--drivers/interconnect/core.c161
-rw-r--r--drivers/interconnect/imx/Kconfig17
-rw-r--r--drivers/interconnect/imx/Makefile9
-rw-r--r--drivers/interconnect/imx/imx.c284
-rw-r--r--drivers/interconnect/imx/imx.h61
-rw-r--r--drivers/interconnect/imx/imx8mm.c105
-rw-r--r--drivers/interconnect/imx/imx8mn.c94
-rw-r--r--drivers/interconnect/imx/imx8mq.c103
-rw-r--r--drivers/interconnect/internal.h2
-rw-r--r--drivers/iommu/Kconfig9
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c369
-rw-r--r--drivers/iommu/amd_iommu.h96
-rw-r--r--drivers/iommu/amd_iommu_debugfs.c5
-rw-r--r--drivers/iommu/amd_iommu_init.c4
-rw-r--r--drivers/iommu/amd_iommu_proto.h96
-rw-r--r--drivers/iommu/amd_iommu_types.h9
-rw-r--r--drivers/iommu/amd_iommu_v2.c18
-rw-r--r--drivers/iommu/arm-smmu-impl.c8
-rw-r--r--drivers/iommu/arm-smmu-qcom.c37
-rw-r--r--drivers/iommu/arm-smmu-v3.c122
-rw-r--r--drivers/iommu/arm-smmu.c53
-rw-r--r--drivers/iommu/arm-smmu.h1
-rw-r--r--drivers/iommu/dma-iommu.c5
-rw-r--r--drivers/iommu/dmar.c99
-rw-r--r--drivers/iommu/exynos-iommu.c24
-rw-r--r--drivers/iommu/fsl_pamu_domain.c22
-rw-r--r--drivers/iommu/hyperv-iommu.c2
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c62
-rw-r--r--drivers/iommu/intel-iommu.c952
-rw-r--r--drivers/iommu/intel-pasid.c309
-rw-r--r--drivers/iommu/intel-pasid.h27
-rw-r--r--drivers/iommu/intel-svm.c452
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/iommu.c470
-rw-r--r--drivers/iommu/iova.c6
-rw-r--r--drivers/iommu/ipmmu-vmsa.c59
-rw-r--r--drivers/iommu/msm_iommu.c36
-rw-r--r--drivers/iommu/mtk_iommu.c24
-rw-r--r--drivers/iommu/mtk_iommu_v1.c68
-rw-r--r--drivers/iommu/omap-iommu.c103
-rw-r--r--drivers/iommu/qcom_iommu.c24
-rw-r--r--drivers/iommu/rockchip-iommu.c26
-rw-r--r--drivers/iommu/s390-iommu.c30
-rw-r--r--drivers/iommu/sun50i-iommu.c1023
-rw-r--r--drivers/iommu/tegra-gart.c24
-rw-r--r--drivers/iommu/tegra-smmu.c31
-rw-r--r--drivers/iommu/virtio-iommu.c41
-rw-r--r--drivers/irqchip/Kconfig40
-rw-r--r--drivers/irqchip/Makefile4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c170
-rw-r--r--drivers/irqchip/irq-gic-v3.c3
-rw-r--r--drivers/irqchip/irq-gic.c1
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c214
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c255
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c243
-rw-r--r--drivers/irqchip/irq-riscv-intc.c138
-rw-r--r--drivers/irqchip/irq-sifive-plic.c69
-rw-r--r--drivers/leds/Kconfig29
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/leds-ariel.c133
-rw-r--r--drivers/leds/leds-aw2013.c436
-rw-r--r--drivers/leds/leds-lm355x.c1
-rw-r--r--drivers/leds/leds-lp3952.c2
-rw-r--r--drivers/leds/leds-lt3593.c1
-rw-r--r--drivers/leds/leds-netxbig.c148
-rw-r--r--drivers/leds/leds-pca963x.c2
-rw-r--r--drivers/leds/leds-pwm.c16
-rw-r--r--drivers/leds/leds-sgm3140.c320
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/leds-tlc591xx.c5
-rw-r--r--drivers/leds/trigger/ledtrig-timer.c4
-rw-r--r--drivers/lightnvm/pblk-cache.c8
-rw-r--r--drivers/lightnvm/pblk-init.c5
-rw-r--r--drivers/lightnvm/pblk-read.c11
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/macintosh/ams/ams-input.c37
-rw-r--r--drivers/macintosh/ams/ams.h4
-rw-r--r--drivers/macintosh/mac_hid.c3
-rw-r--r--drivers/macintosh/macio-adb.c2
-rw-r--r--drivers/macintosh/mediabay.c2
-rw-r--r--drivers/macintosh/via-pmu.c4
-rw-r--r--drivers/macintosh/windfarm_pm112.c21
-rw-r--r--drivers/mailbox/Kconfig18
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/imx-mailbox.c117
-rw-r--r--drivers/mailbox/pcc.c2
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c61
-rw-r--r--drivers/mailbox/qcom-ipcc.c286
-rw-r--r--drivers/mailbox/sprd-mailbox.c361
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c25
-rw-r--r--drivers/md/Kconfig20
-rw-r--r--drivers/md/Makefile3
-rw-r--r--drivers/md/bcache/Kconfig9
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/bset.c6
-rw-r--r--drivers/md/bcache/btree.c16
-rw-r--r--drivers/md/bcache/extents.c12
-rw-r--r--drivers/md/bcache/io.c8
-rw-r--r--drivers/md/bcache/journal.c34
-rw-r--r--drivers/md/bcache/request.c25
-rw-r--r--drivers/md/bcache/super.c232
-rw-r--r--drivers/md/bcache/sysfs.c8
-rw-r--r--drivers/md/bcache/writeback.c6
-rw-r--r--drivers/md/dm-bufio.c113
-rw-r--r--drivers/md/dm-crypt.c80
-rw-r--r--drivers/md/dm-ebs-target.c471
-rw-r--r--drivers/md/dm-historical-service-time.c561
-rw-r--r--drivers/md/dm-integrity.c8
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-mpath.c123
-rw-r--r--drivers/md/dm-path-selector.h2
-rw-r--r--drivers/md/dm-queue-length.c2
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-service-time.c2
-rw-r--r--drivers/md/dm-stats.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-table.c17
-rw-r--r--drivers/md/dm-writecache.c42
-rw-r--r--drivers/md/dm-zoned-metadata.c1046
-rw-r--r--drivers/md/dm-zoned-reclaim.c210
-rw-r--r--drivers/md/dm-zoned-target.c463
-rw-r--r--drivers/md/dm-zoned.h113
-rw-r--r--drivers/md/dm.c35
-rw-r--r--drivers/md/md-bitmap.c12
-rw-r--r--drivers/md/md-linear.h2
-rw-r--r--drivers/md/md.c71
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h4
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c6
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid1.h2
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c22
-rw-r--r--drivers/media/Kconfig242
-rw-r--r--drivers/media/Makefile2
-rw-r--r--drivers/media/cec/Kconfig25
-rw-r--r--drivers/media/cec/Makefile16
-rw-r--r--drivers/media/cec/core/Makefile16
-rw-r--r--drivers/media/cec/core/cec-adap.c (renamed from drivers/media/cec/cec-adap.c)8
-rw-r--r--drivers/media/cec/core/cec-api.c (renamed from drivers/media/cec/cec-api.c)0
-rw-r--r--drivers/media/cec/core/cec-core.c (renamed from drivers/media/cec/cec-core.c)0
-rw-r--r--drivers/media/cec/core/cec-notifier.c (renamed from drivers/media/cec/cec-notifier.c)2
-rw-r--r--drivers/media/cec/core/cec-pin-error-inj.c (renamed from drivers/media/cec/cec-pin-error-inj.c)0
-rw-r--r--drivers/media/cec/core/cec-pin-priv.h (renamed from drivers/media/cec/cec-pin-priv.h)0
-rw-r--r--drivers/media/cec/core/cec-pin.c (renamed from drivers/media/cec/cec-pin.c)0
-rw-r--r--drivers/media/cec/core/cec-priv.h (renamed from drivers/media/cec/cec-priv.h)0
-rw-r--r--drivers/media/cec/platform/Kconfig120
-rw-r--r--drivers/media/cec/platform/Makefile14
-rw-r--r--drivers/media/cec/platform/cec-gpio/Makefile (renamed from drivers/media/platform/cec-gpio/Makefile)0
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c (renamed from drivers/media/platform/cec-gpio/cec-gpio.c)18
-rw-r--r--drivers/media/cec/platform/cros-ec/Makefile2
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c (renamed from drivers/media/platform/cros-ec-cec/cros-ec-cec.c)0
-rw-r--r--drivers/media/cec/platform/meson/Makefile3
-rw-r--r--drivers/media/cec/platform/meson/ao-cec-g12a.c (renamed from drivers/media/platform/meson/ao-cec-g12a.c)0
-rw-r--r--drivers/media/cec/platform/meson/ao-cec.c (renamed from drivers/media/platform/meson/ao-cec.c)0
-rw-r--r--drivers/media/cec/platform/s5p/Makefile (renamed from drivers/media/platform/s5p-cec/Makefile)2
-rw-r--r--drivers/media/cec/platform/s5p/exynos_hdmi_cec.h (renamed from drivers/media/platform/s5p-cec/exynos_hdmi_cec.h)0
-rw-r--r--drivers/media/cec/platform/s5p/exynos_hdmi_cecctrl.c (renamed from drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c)0
-rw-r--r--drivers/media/cec/platform/s5p/regs-cec.h (renamed from drivers/media/platform/s5p-cec/regs-cec.h)0
-rw-r--r--drivers/media/cec/platform/s5p/s5p_cec.c (renamed from drivers/media/platform/s5p-cec/s5p_cec.c)0
-rw-r--r--drivers/media/cec/platform/s5p/s5p_cec.h (renamed from drivers/media/platform/s5p-cec/s5p_cec.h)0
-rw-r--r--drivers/media/cec/platform/seco/Makefile2
-rw-r--r--drivers/media/cec/platform/seco/seco-cec.c (renamed from drivers/media/platform/seco-cec/seco-cec.c)2
-rw-r--r--drivers/media/cec/platform/seco/seco-cec.h (renamed from drivers/media/platform/seco-cec/seco-cec.h)0
-rw-r--r--drivers/media/cec/platform/sti/Makefile2
-rw-r--r--drivers/media/cec/platform/sti/stih-cec.c (renamed from drivers/media/platform/sti/cec/stih-cec.c)0
-rw-r--r--drivers/media/cec/platform/stm32/Makefile2
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c (renamed from drivers/media/platform/stm32/stm32-cec.c)0
-rw-r--r--drivers/media/cec/platform/tegra/Makefile2
-rw-r--r--drivers/media/cec/platform/tegra/tegra_cec.c (renamed from drivers/media/platform/tegra-cec/tegra_cec.c)0
-rw-r--r--drivers/media/cec/platform/tegra/tegra_cec.h (renamed from drivers/media/platform/tegra-cec/tegra_cec.h)0
-rw-r--r--drivers/media/cec/usb/Kconfig8
-rw-r--r--drivers/media/cec/usb/Makefile6
-rw-r--r--drivers/media/cec/usb/pulse8/Kconfig (renamed from drivers/media/usb/pulse8-cec/Kconfig)3
-rw-r--r--drivers/media/cec/usb/pulse8/Makefile (renamed from drivers/media/usb/pulse8-cec/Makefile)0
-rw-r--r--drivers/media/cec/usb/pulse8/pulse8-cec.c (renamed from drivers/media/usb/pulse8-cec/pulse8-cec.c)6
-rw-r--r--drivers/media/cec/usb/rainshadow/Kconfig (renamed from drivers/media/usb/rainshadow-cec/Kconfig)3
-rw-r--r--drivers/media/cec/usb/rainshadow/Makefile (renamed from drivers/media/usb/rainshadow-cec/Makefile)0
-rw-r--r--drivers/media/cec/usb/rainshadow/rainshadow-cec.c (renamed from drivers/media/usb/rainshadow-cec/rainshadow-cec.c)0
-rw-r--r--drivers/media/common/Kconfig2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c3
-rw-r--r--drivers/media/dvb-core/Kconfig27
-rw-r--r--drivers/media/dvb-core/dvbdev.c5
-rw-r--r--drivers/media/dvb-frontends/Kconfig16
-rw-r--r--drivers/media/dvb-frontends/cxd2880/cxd2880_top.c2
-rw-r--r--drivers/media/dvb-frontends/dib3000.h2
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c2
-rw-r--r--drivers/media/dvb-frontends/eds1547.h2
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c14
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c4
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c2
-rw-r--r--drivers/media/dvb-frontends/z0194a.h2
-rw-r--r--drivers/media/firewire/Kconfig5
-rw-r--r--drivers/media/i2c/Kconfig457
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c40
-rw-r--r--drivers/media/i2c/et8ek8/Kconfig4
-rw-r--r--drivers/media/i2c/imx214.c4
-rw-r--r--drivers/media/i2c/imx219.c110
-rw-r--r--drivers/media/i2c/m5mols/Kconfig5
-rw-r--r--drivers/media/i2c/max2175.c6
-rw-r--r--drivers/media/i2c/ov13858.c13
-rw-r--r--drivers/media/i2c/ov2740.c1016
-rw-r--r--drivers/media/i2c/ov5640.c4
-rw-r--r--drivers/media/i2c/ov5670.c14
-rw-r--r--drivers/media/i2c/ov8856.c191
-rw-r--r--drivers/media/i2c/s5k5baf.c2
-rw-r--r--drivers/media/i2c/smiapp/Kconfig5
-rw-r--r--drivers/media/mc/Kconfig19
-rw-r--r--drivers/media/mc/mc-entity.c2
-rw-r--r--drivers/media/mmc/Kconfig1
-rw-r--r--drivers/media/mmc/siano/Kconfig2
-rw-r--r--drivers/media/mmc/siano/smssdio.c10
-rw-r--r--drivers/media/pci/Kconfig14
-rw-r--r--drivers/media/pci/bt8xx/Kconfig2
-rw-r--r--drivers/media/pci/bt8xx/bt878.c2
-rw-r--r--drivers/media/pci/bt8xx/btcx-risc.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-risc.c2
-rw-r--r--drivers/media/pci/cobalt/Kconfig4
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c22
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c51
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c31
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c99
-rw-r--r--drivers/media/pci/cx88/cx88-core.c3
-rw-r--r--drivers/media/pci/cx88/cx88-input.c2
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/pci/ddbridge/Kconfig1
-rw-r--r--drivers/media/pci/ddbridge/Makefile2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c4
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-dummy-fe.c153
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-dummy-fe.h16
-rw-r--r--drivers/media/pci/intel/ipu3/Kconfig4
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c26
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c19
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c17
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c4
-rw-r--r--drivers/media/pci/mantis/mantis_dvb.c2
-rw-r--r--drivers/media/pci/meye/Kconfig2
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c2
-rw-r--r--drivers/media/pci/sta2x11/Kconfig6
-rw-r--r--drivers/media/platform/Kconfig191
-rw-r--r--drivers/media/platform/Makefile19
-rw-r--r--drivers/media/platform/am437x/Kconfig4
-rw-r--r--drivers/media/platform/atmel/Kconfig4
-rw-r--r--drivers/media/platform/cadence/Kconfig8
-rw-r--r--drivers/media/platform/coda/coda-bit.c9
-rw-r--r--drivers/media/platform/coda/coda-common.c199
-rw-r--r--drivers/media/platform/coda/coda-jpeg.c577
-rw-r--r--drivers/media/platform/coda/coda.h12
-rw-r--r--drivers/media/platform/cros-ec-cec/Makefile2
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c1
-rw-r--r--drivers/media/platform/davinci/vpif_display.c1
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig5
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c2
-rw-r--r--drivers/media/platform/meson/Makefile3
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_comp.c8
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c2
-rw-r--r--drivers/media/platform/pxa_camera.c4
-rw-r--r--drivers/media/platform/qcom/venus/core.c21
-rw-r--r--drivers/media/platform/qcom/venus/core.h7
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c18
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c10
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h3
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.h10
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c200
-rw-r--r--drivers/media/platform/qcom/venus/venc.c10
-rw-r--r--drivers/media/platform/rcar-fcp.c5
-rw-r--r--drivers/media/platform/rcar-vin/Kconfig8
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c4
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c40
-rw-r--r--drivers/media/platform/seco-cec/Makefile2
-rw-r--r--drivers/media/platform/sh_veu.c1203
-rw-r--r--drivers/media/platform/sti/cec/Makefile2
-rw-r--r--drivers/media/platform/stm32/Makefile1
-rw-r--r--drivers/media/platform/sunxi/Kconfig2
-rw-r--r--drivers/media/platform/sunxi/Makefile2
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/Kconfig6
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/Makefile2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/Kconfig4
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c6
-rw-r--r--drivers/media/platform/tegra-cec/Makefile2
-rw-r--r--drivers/media/platform/ti-vpe/cal.c439
-rw-r--r--drivers/media/platform/ti-vpe/cal_regs.h21
-rw-r--r--drivers/media/platform/video-mux.c87
-rw-r--r--drivers/media/platform/xilinx/Kconfig4
-rw-r--r--drivers/media/radio/Kconfig12
-rw-r--r--drivers/media/radio/si470x/Kconfig2
-rw-r--r--drivers/media/radio/wl128x/Kconfig5
-rw-r--r--drivers/media/rc/bpf-lirc.c4
-rw-r--r--drivers/media/rc/gpio-ir-tx.c51
-rw-r--r--drivers/media/rc/iguanair.c36
-rw-r--r--drivers/media/rc/ir-rx51.c2
-rw-r--r--drivers/media/rc/rc-core-priv.h22
-rw-r--r--drivers/media/spi/Kconfig8
-rw-r--r--drivers/media/test-drivers/Kconfig26
-rw-r--r--drivers/media/test-drivers/Makefile9
-rw-r--r--drivers/media/test-drivers/vicodec/Kconfig (renamed from drivers/media/platform/vicodec/Kconfig)2
-rw-r--r--drivers/media/test-drivers/vicodec/Makefile (renamed from drivers/media/platform/vicodec/Makefile)0
-rw-r--r--drivers/media/test-drivers/vicodec/codec-fwht.c (renamed from drivers/media/platform/vicodec/codec-fwht.c)0
-rw-r--r--drivers/media/test-drivers/vicodec/codec-fwht.h (renamed from drivers/media/platform/vicodec/codec-fwht.h)0
-rw-r--r--drivers/media/test-drivers/vicodec/codec-v4l2-fwht.c (renamed from drivers/media/platform/vicodec/codec-v4l2-fwht.c)0
-rw-r--r--drivers/media/test-drivers/vicodec/codec-v4l2-fwht.h (renamed from drivers/media/platform/vicodec/codec-v4l2-fwht.h)0
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c (renamed from drivers/media/platform/vicodec/vicodec-core.c)15
-rw-r--r--drivers/media/test-drivers/vim2m.c (renamed from drivers/media/platform/vim2m.c)8
-rw-r--r--drivers/media/test-drivers/vimc/Kconfig (renamed from drivers/media/platform/vimc/Kconfig)4
-rw-r--r--drivers/media/test-drivers/vimc/Makefile (renamed from drivers/media/platform/vimc/Makefile)0
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c (renamed from drivers/media/platform/vimc/vimc-capture.c)37
-rw-r--r--drivers/media/test-drivers/vimc/vimc-common.c (renamed from drivers/media/platform/vimc/vimc-common.c)83
-rw-r--r--drivers/media/test-drivers/vimc/vimc-common.h (renamed from drivers/media/platform/vimc/vimc-common.h)88
-rw-r--r--drivers/media/test-drivers/vimc/vimc-core.c (renamed from drivers/media/platform/vimc/vimc-core.c)90
-rw-r--r--drivers/media/test-drivers/vimc/vimc-debayer.c (renamed from drivers/media/platform/vimc/vimc-debayer.c)88
-rw-r--r--drivers/media/test-drivers/vimc/vimc-scaler.c (renamed from drivers/media/platform/vimc/vimc-scaler.c)27
-rw-r--r--drivers/media/test-drivers/vimc/vimc-sensor.c (renamed from drivers/media/platform/vimc/vimc-sensor.c)23
-rw-r--r--drivers/media/test-drivers/vimc/vimc-streamer.c (renamed from drivers/media/platform/vimc/vimc-streamer.c)0
-rw-r--r--drivers/media/test-drivers/vimc/vimc-streamer.h (renamed from drivers/media/platform/vimc/vimc-streamer.h)7
-rw-r--r--drivers/media/test-drivers/vivid/Kconfig (renamed from drivers/media/platform/vivid/Kconfig)2
-rw-r--r--drivers/media/test-drivers/vivid/Makefile (renamed from drivers/media/platform/vivid/Makefile)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c (renamed from drivers/media/platform/vivid/vivid-cec.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.h (renamed from drivers/media/platform/vivid/vivid-cec.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c (renamed from drivers/media/platform/vivid/vivid-core.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h (renamed from drivers/media/platform/vivid/vivid-core.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c (renamed from drivers/media/platform/vivid/vivid-ctrls.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.h (renamed from drivers/media/platform/vivid/vivid-ctrls.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c (renamed from drivers/media/platform/vivid/vivid-kthread-cap.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.h (renamed from drivers/media/platform/vivid/vivid-kthread-cap.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-out.c (renamed from drivers/media/platform/vivid/vivid-kthread-out.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-out.h (renamed from drivers/media/platform/vivid/vivid-kthread-out.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-touch.c (renamed from drivers/media/platform/vivid/vivid-kthread-touch.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-touch.h (renamed from drivers/media/platform/vivid/vivid-kthread-touch.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-cap.c (renamed from drivers/media/platform/vivid/vivid-meta-cap.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-cap.h (renamed from drivers/media/platform/vivid/vivid-meta-cap.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-out.c (renamed from drivers/media/platform/vivid/vivid-meta-out.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-out.h (renamed from drivers/media/platform/vivid/vivid-meta-out.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.c (renamed from drivers/media/platform/vivid/vivid-osd.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.h (renamed from drivers/media/platform/vivid/vivid-osd.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-common.c (renamed from drivers/media/platform/vivid/vivid-radio-common.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-common.h (renamed from drivers/media/platform/vivid/vivid-radio-common.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.c (renamed from drivers/media/platform/vivid/vivid-radio-rx.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.h (renamed from drivers/media/platform/vivid/vivid-radio-rx.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.c (renamed from drivers/media/platform/vivid/vivid-radio-tx.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.h (renamed from drivers/media/platform/vivid/vivid-radio-tx.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-rds-gen.c (renamed from drivers/media/platform/vivid/vivid-rds-gen.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-rds-gen.h (renamed from drivers/media/platform/vivid/vivid-rds-gen.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c (renamed from drivers/media/platform/vivid/vivid-sdr-cap.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.h (renamed from drivers/media/platform/vivid/vivid-sdr-cap.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-touch-cap.c (renamed from drivers/media/platform/vivid/vivid-touch-cap.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-touch-cap.h (renamed from drivers/media/platform/vivid/vivid-touch-cap.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.c (renamed from drivers/media/platform/vivid/vivid-vbi-cap.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.h (renamed from drivers/media/platform/vivid/vivid-vbi-cap.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-gen.c (renamed from drivers/media/platform/vivid/vivid-vbi-gen.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-gen.h (renamed from drivers/media/platform/vivid/vivid-vbi-gen.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.c (renamed from drivers/media/platform/vivid/vivid-vbi-out.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.h (renamed from drivers/media/platform/vivid/vivid-vbi-out.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c (renamed from drivers/media/platform/vivid/vivid-vid-cap.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.h (renamed from drivers/media/platform/vivid/vivid-vid-cap.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.c (renamed from drivers/media/platform/vivid/vivid-vid-common.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.h (renamed from drivers/media/platform/vivid/vivid-vid-common.h)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c (renamed from drivers/media/platform/vivid/vivid-vid-out.c)0
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.h (renamed from drivers/media/platform/vivid/vivid-vid-out.h)0
-rw-r--r--drivers/media/tuners/Kconfig6
-rw-r--r--drivers/media/tuners/si2157.c419
-rw-r--r--drivers/media/tuners/si2157_priv.h2
-rw-r--r--drivers/media/usb/Kconfig12
-rw-r--r--drivers/media/usb/Makefile2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c35
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c85
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig10
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.c25
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.h2
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig3
-rw-r--r--drivers/media/usb/dvb-usb/a800.c8
-rw-r--r--drivers/media/usb/dvb-usb/af9005-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/af9005-remote.c2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c6
-rw-r--r--drivers/media/usb/dvb-usb/af9005.h2
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c6
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c6
-rw-r--r--drivers/media/usb/dvb-usb/dib0700.h31
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c6
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mb.c4
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc-common.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb.h2
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c31
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c2
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.h2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-common.h3
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-firmware.c3
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c10
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h10
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c31
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c2
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.h2
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c6
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/opera1.c2
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c4
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c9
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.h2
-rw-r--r--drivers/media/usb/dvb-usb/umt-010.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp7045-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.h2
-rw-r--r--drivers/media/usb/gspca/Kconfig2
-rw-r--r--drivers/media/usb/gspca/mr97310a.c1
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c4
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c4
-rw-r--r--drivers/media/usb/pwc/pwc-ctrl.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c54
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c9
-rw-r--r--drivers/media/usb/zr364xx/Kconfig2
-rw-r--r--drivers/media/v4l2-core/Kconfig27
-rw-r--r--drivers/media/v4l2-core/Makefile3
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c92
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c25
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c106
-rw-r--r--drivers/media/v4l2-core/v4l2-h264.c270
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c72
-rw-r--r--drivers/media/v4l2-core/v4l2-jpeg.c632
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c95
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c91
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c8
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c2
-rw-r--r--drivers/memory/Kconfig11
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/bt1-l2-ctl.c322
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c8
-rw-r--r--drivers/message/fusion/mptbase.c8
-rw-r--r--drivers/mfd/Kconfig61
-rw-r--r--drivers/mfd/Makefile7
-rw-r--r--drivers/mfd/gateworks-gsc.c277
-rw-r--r--drivers/mfd/htc-i2cpld.c6
-rw-r--r--drivers/mfd/intel-lpss-pci.c2
-rw-r--r--drivers/mfd/intel_pmc_bxt.c468
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c1
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c34
-rw-r--r--drivers/mfd/intel_soc_pmic_mrfld.c10
-rw-r--r--drivers/mfd/max77620.c1
-rw-r--r--drivers/mfd/mp2629.c79
-rw-r--r--drivers/mfd/mt6358-irq.c235
-rw-r--r--drivers/mfd/mt6360-core.c424
-rw-r--r--drivers/mfd/mt6397-core.c101
-rw-r--r--drivers/mfd/mt6397-irq.c35
-rw-r--r--drivers/mfd/sm501.c24
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c1
-rw-r--r--drivers/mfd/stm32-timers.c32
-rw-r--r--drivers/mfd/stmfx.c22
-rw-r--r--drivers/mfd/stpmic1.c2
-rw-r--r--drivers/mfd/tqmx86.c2
-rw-r--r--drivers/mfd/vexpress-sysreg.c99
-rw-r--r--drivers/mfd/wcd934x.c1
-rw-r--r--drivers/mfd/wm8994-core.c8
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/rts5249.c29
-rw-r--r--drivers/misc/cardreader/rts5260.c26
-rw-r--r--drivers/misc/cardreader/rts5261.c47
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c43
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h1
-rw-r--r--drivers/misc/cxl/Kconfig8
-rw-r--r--drivers/misc/cxl/cxllib.c9
-rw-r--r--drivers/misc/cxl/fault.c4
-rw-r--r--drivers/misc/fastrpc.c13
-rw-r--r--drivers/misc/genwqe/card_utils.c44
-rw-r--r--drivers/misc/habanalabs/Makefile3
-rw-r--r--drivers/misc/habanalabs/command_buffer.c28
-rw-r--r--drivers/misc/habanalabs/command_submission.c385
-rw-r--r--drivers/misc/habanalabs/context.c8
-rw-r--r--drivers/misc/habanalabs/debugfs.c116
-rw-r--r--drivers/misc/habanalabs/device.c53
-rw-r--r--drivers/misc/habanalabs/firmware_if.c297
-rw-r--r--drivers/misc/habanalabs/gaudi/Makefile5
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c6748
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h261
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c884
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c121
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c9090
-rw-r--r--drivers/misc/habanalabs/goya/goya.c345
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h12
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c2
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c100
-rw-r--r--drivers/misc/habanalabs/habanalabs.h187
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c14
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c21
-rw-r--r--drivers/misc/habanalabs/hw_queue.c118
-rw-r--r--drivers/misc/habanalabs/hwmon.c75
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h43
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h174
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h348
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h800
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h860
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h860
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h860
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h860
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h4974
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h299
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h1456
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h800
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h1456
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h1456
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h1456
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h72
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h502
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h1062
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h56
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h82
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h2578
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h800
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi.h59
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h310
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h694
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h367
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h36
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h458
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_packets.h212
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h27
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h3
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h56
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_reg_map.h43
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h58
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h2
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h16
-rw-r--r--drivers/misc/habanalabs/memory.c37
-rw-r--r--drivers/misc/habanalabs/pci.c63
-rw-r--r--drivers/misc/habanalabs/sysfs.c17
-rw-r--r--drivers/misc/lkdtm/bugs.c2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/pci-txe.c2
-rw-r--r--drivers/misc/mic/Kconfig2
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c2
-rw-r--r--drivers/misc/mic/scif/scif_rma.c26
-rw-r--r--drivers/misc/ocxl/context.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c4
-rw-r--r--drivers/misc/sgi-gru/grufault.c25
-rw-r--r--drivers/misc/sgi-gru/grufile.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c10
-rw-r--r--drivers/misc/sgi-xp/xpnet.c8
-rw-r--r--drivers/misc/uacce/uacce.c172
-rw-r--r--drivers/misc/vexpress-syscfg.c280
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c2
-rw-r--r--drivers/misc/xilinx_sdfec.c61
-rw-r--r--drivers/mmc/core/bus.c14
-rw-r--r--drivers/mmc/core/core.c6
-rw-r--r--drivers/mmc/core/debugfs.c6
-rw-r--r--drivers/mmc/core/mmc.c6
-rw-r--r--drivers/mmc/core/quirks.h2
-rw-r--r--drivers/mmc/core/regulator.c17
-rw-r--r--drivers/mmc/core/sd.c30
-rw-r--r--drivers/mmc/core/sdio.c136
-rw-r--r--drivers/mmc/host/Kconfig31
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/android-goldfish.c10
-rw-r--r--drivers/mmc/host/atmel-mci.c12
-rw-r--r--drivers/mmc/host/au1xmmc.c20
-rw-r--r--drivers/mmc/host/bcm2835.c3
-rw-r--r--drivers/mmc/host/cavium.c3
-rw-r--r--drivers/mmc/host/cb710-mmc.c8
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c9
-rw-r--r--drivers/mmc/host/jz4740_mmc.c13
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c5
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-clkc.c158
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c914
-rw-r--r--drivers/mmc/host/meson-mx-sdhc.h141
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c5
-rw-r--r--drivers/mmc/host/mmc_hsq.c29
-rw-r--r--drivers/mmc/host/mmc_hsq.h1
-rw-r--r--drivers/mmc/host/mmc_spi.c20
-rw-r--r--drivers/mmc/host/mmci.c30
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c15
-rw-r--r--drivers/mmc/host/mtk-sd.c21
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c3
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mmc/host/owl-mmc.c8
-rw-r--r--drivers/mmc/host/renesas_sdhi.h5
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c146
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/s3cmci.c7
-rw-r--r--drivers/mmc/host/sdhci-cadence.c10
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c45
-rw-r--r--drivers/mmc/host/sdhci-esdhc-mcf.c521
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-msm.c162
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c632
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c12
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c74
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c34
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c9
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c106
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c8
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-sprd.c28
-rw-r--r--drivers/mmc/host/sdhci-tegra.c57
-rw-r--r--drivers/mmc/host/sdhci.c284
-rw-r--r--drivers/mmc/host/sdhci.h38
-rw-r--r--drivers/mmc/host/sdricoh_cs.c105
-rw-r--r--drivers/mmc/host/sunxi-mmc.c10
-rw-r--r--drivers/mmc/host/tifm_sd.c9
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c10
-rw-r--r--drivers/mmc/host/uniphier-sd.c12
-rw-r--r--drivers/mmc/host/usdhi6rol0.c9
-rw-r--r--drivers/mmc/host/via-sdmmc.c7
-rw-r--r--drivers/mmc/host/wbsd.c26
-rw-r--r--drivers/mtd/Kconfig10
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c13
-rw-r--r--drivers/mtd/devices/docg3.c10
-rw-r--r--drivers/mtd/maps/physmap-gemini.c5
-rw-r--r--drivers/mtd/mtdblock.c11
-rw-r--r--drivers/mtd/mtdcore.c194
-rw-r--r--drivers/mtd/mtdpart.c54
-rw-r--r--drivers/mtd/mtdpstore.c578
-rw-r--r--drivers/mtd/nand/raw/Kconfig12
-rw-r--r--drivers/mtd/nand/raw/Makefile2
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c5
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c1297
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c403
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/main.c6
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c164
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c17
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c16
-rw-r--r--drivers/mtd/nand/raw/cmx270_nand.c236
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c199
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c312
-rw-r--r--drivers/mtd/nand/raw/denali.c60
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c487
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c7
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c7
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c9
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c19
-rw-r--r--drivers/mtd/nand/raw/gpio.c6
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c189
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c6
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c170
-rw-r--r--drivers/mtd/nand/raw/internals.h12
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c7
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c6
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c68
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c5
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c5
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c19
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c6
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c10
-rw-r--r--drivers/mtd/nand/raw/nand_base.c445
-rw-r--r--drivers/mtd/nand/raw/nand_bch.c10
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c32
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c8
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c65
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c71
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c11
-rw-r--r--drivers/mtd/nand/raw/nand_toshiba.c14
-rw-r--r--drivers/mtd/nand/raw/nandsim.c438
-rw-r--r--drivers/mtd/nand/raw/ndfc.c8
-rw-r--r--drivers/mtd/nand/raw/omap2.c8
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c1
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c8
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c33
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c9
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c8
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c13
-rw-r--r--drivers/mtd/nand/raw/r852.c6
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c3
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c6
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c14
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c8
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c1067
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c16
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c13
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c6
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c8
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c6
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c10
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c8
-rw-r--r--drivers/mtd/parsers/cmdlinepart.c35
-rw-r--r--drivers/mtd/parsers/ofpart.c3
-rw-r--r--drivers/mtd/spi-nor/Kconfig4
-rw-r--r--drivers/mtd/spi-nor/controllers/Kconfig4
-rw-r--r--drivers/mtd/spi-nor/controllers/aspeed-smc.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c2
-rw-r--r--drivers/mtd/spi-nor/core.c22
-rw-r--r--drivers/mtd/spi-nor/macronix.c6
-rw-r--r--drivers/mtd/spi-nor/micron-st.c6
-rw-r--r--drivers/mtd/spi-nor/sfdp.c34
-rw-r--r--drivers/mtd/spi-nor/sfdp.h11
-rw-r--r--drivers/mtd/spi-nor/spansion.c44
-rw-r--r--drivers/mtd/spi-nor/winbond.c29
-rw-r--r--drivers/mtd/ubi/build.c5
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c39
-rw-r--r--drivers/mtd/ubi/fastmap.c11
-rw-r--r--drivers/mtd/ubi/io.c4
-rw-r--r--drivers/mtd/ubi/ubi.h6
-rw-r--r--drivers/mtd/ubi/wl.c28
-rw-r--r--drivers/net/Kconfig12
-rw-r--r--drivers/net/appletalk/Kconfig8
-rw-r--r--drivers/net/arcnet/Kconfig6
-rw-r--r--drivers/net/bonding/bond_alb.c46
-rw-r--r--drivers/net/bonding/bond_main.c300
-rw-r--r--drivers/net/bonding/bonding_priv.h2
-rw-r--r--drivers/net/caif/Kconfig4
-rw-r--r--drivers/net/dsa/b53/b53_common.c82
-rw-r--r--drivers/net/dsa/b53/b53_priv.h8
-rw-r--r--drivers/net/dsa/b53/b53_srab.c2
-rw-r--r--drivers/net/dsa/lantiq_gswip.c3
-rw-r--r--drivers/net/dsa/mt7530.c24
-rw-r--r--drivers/net/dsa/mt7530.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c16
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c55
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h7
-rw-r--r--drivers/net/dsa/ocelot/felix.c85
-rw-r--r--drivers/net/dsa/ocelot/felix.h6
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c192
-rw-r--r--drivers/net/dsa/qca8k.c3
-rw-r--r--drivers/net/dsa/sja1105/Kconfig9
-rw-r--r--drivers/net/dsa/sja1105/Makefile4
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h103
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c58
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c208
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c144
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c215
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c1235
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h13
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c16
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c274
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h118
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c127
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h36
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c782
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.h74
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c10
-rw-r--r--drivers/net/ethernet/3com/3c509.c1
-rw-r--r--drivers/net/ethernet/3com/3c515.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c345
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c1
-rw-r--r--drivers/net/ethernet/agere/et131x.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h19
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c124
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h80
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_common_defs.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c26
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c85
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c65
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h17
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h2
-rw-r--r--drivers/net/ethernet/amd/7990.c4
-rw-r--r--drivers/net/ethernet/amd/7990.h2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/amd/hplance.c2
-rw-r--r--drivers/net/ethernet/amd/mvme147.c2
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/apple/bmac.c2
-rw-r--r--drivers/net/ethernet/apple/mace.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h40
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c79
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c72
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c335
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c72
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c30
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c348
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h44
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c83
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h101
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c60
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c841
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h127
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c234
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h102
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h391
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c131
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h606
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c320
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c6
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c43
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c26
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c100
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c261
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h216
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c790
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h23
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c96
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h12
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c172
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c96
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c204
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c10
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/dnet.c3
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c150
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c491
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h85
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c26
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h59
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c177
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h97
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c34
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h86
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h159
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c1103
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c127
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c166
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h53
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c195
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1710
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c79
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h87
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c388
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h87
-rw-r--r--drivers/net/ethernet/huawei/hinic/Makefile2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c538
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c205
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c98
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c78
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h26
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c53
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.h26
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c1210
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h154
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c17
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h12
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c142
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c207
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h159
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c15
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c1294
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.h109
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c17
-rw-r--r--drivers/net/ethernet/i825xx/82596.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c22
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c117
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c166
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h40
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c381
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h3
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h72
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h76
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c663
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c146
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c180
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c105
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c134
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c1697
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c840
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h166
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c919
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c355
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c397
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h128
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c605
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c733
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c415
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c1221
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c380
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h13
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h449
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h51
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.c186
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.h30
-rw-r--r--drivers/net/ethernet/intel/igc/igc_dump.c113
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c783
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c1043
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h44
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c157
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c79
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c309
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c34
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c30
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c52
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig9
-rw-r--r--drivers/net/ethernet/mediatek/Makefile3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c1651
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c350
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c646
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c327
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h153
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c935
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c635
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c235
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c279
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c322
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c600
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c427
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c136
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c161
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c203
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c612
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h185
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c220
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c305
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c378
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c621
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c1324
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h7
-rw-r--r--drivers/net/ethernet/micrel/Kconfig2
-rw-r--r--drivers/net/ethernet/micrel/Makefile2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h151
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c (renamed from drivers/net/ethernet/micrel/ks8851.c)698
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c1393
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c357
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c485
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c5
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c12
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c81
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h6
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c2
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c5
-rw-r--r--drivers/net/ethernet/mscc/Makefile2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c235
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c113
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.h5
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c30
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c29
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c324
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.h41
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c12
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c2
-rw-r--r--drivers/net/ethernet/neterion/Kconfig4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c125
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h27
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c42
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c16
-rw-r--r--drivers/net/ethernet/ni/nixge.c3
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h17
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c20
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h1089
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c158
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c136
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c42
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c253
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h17
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c230
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c25
-rw-r--r--drivers/net/ethernet/realtek/8139too.c26
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1010
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c10
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c3
-rw-r--r--drivers/net/ethernet/seeq/ether3.c5
-rw-r--r--drivers/net/ethernet/sfc/ef10.c217
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c27
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c25
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h12
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c82
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h17
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.c8
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h10
-rw-r--r--drivers/net/ethernet/sfc/nic.h11
-rw-r--r--drivers/net/ethernet/sfc/ptp.c7
-rw-r--r--drivers/net/ethernet/sfc/rx.c3
-rw-r--r--drivers/net/ethernet/sfc/siena.c8
-rw-r--r--drivers/net/ethernet/smsc/Kconfig4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c32
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c315
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c160
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c146
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c74
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/sun/cassini.c14
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c1
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c12
-rw-r--r--drivers/net/ethernet/ti/Kconfig25
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c36
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c205
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h13
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c626
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h29
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c1086
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h74
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c25
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c19
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c422
-rw-r--r--drivers/net/ethernet/ti/cpts.h27
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c3
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/via/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c8
-rw-r--r--drivers/net/fddi/Kconfig2
-rw-r--r--drivers/net/geneve.c7
-rw-r--r--drivers/net/hamradio/Kconfig14
-rw-r--r--drivers/net/hamradio/bpqether.c20
-rw-r--r--drivers/net/hamradio/scc.c2
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/ipa/gsi.c127
-rw-r--r--drivers/net/ipa/gsi.h15
-rw-r--r--drivers/net/ipa/ipa.h10
-rw-r--r--drivers/net/ipa/ipa_clock.c4
-rw-r--r--drivers/net/ipa/ipa_cmd.c59
-rw-r--r--drivers/net/ipa/ipa_cmd.h11
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c14
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c15
-rw-r--r--drivers/net/ipa/ipa_data.h29
-rw-r--r--drivers/net/ipa/ipa_endpoint.c176
-rw-r--r--drivers/net/ipa/ipa_endpoint.h3
-rw-r--r--drivers/net/ipa/ipa_main.c8
-rw-r--r--drivers/net/ipa/ipa_mem.c210
-rw-r--r--drivers/net/ipa/ipa_mem.h3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/macvlan.c14
-rw-r--r--drivers/net/net_failover.c3
-rw-r--r--drivers/net/netdevsim/dev.c10
-rw-r--r--drivers/net/phy/Kconfig21
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/at803x.c310
-rw-r--r--drivers/net/phy/bcm-phy-lib.c337
-rw-r--r--drivers/net/phy/bcm-phy-lib.h19
-rw-r--r--drivers/net/phy/bcm54140.c860
-rw-r--r--drivers/net/phy/bcm87xx.c2
-rw-r--r--drivers/net/phy/broadcom.c64
-rw-r--r--drivers/net/phy/cortina.c4
-rw-r--r--drivers/net/phy/dp83867.c6
-rw-r--r--drivers/net/phy/dp83869.c41
-rw-r--r--drivers/net/phy/fixed_phy.c28
-rw-r--r--drivers/net/phy/marvell.c486
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/mdio-bcm-iproc.c4
-rw-r--r--drivers/net/phy/mdio-ipq4019.c160
-rw-r--r--drivers/net/phy/mdio-moxart.c1
-rw-r--r--drivers/net/phy/mdio-mscc-miim.c33
-rw-r--r--drivers/net/phy/mdio_bus.c27
-rw-r--r--drivers/net/phy/micrel.c128
-rw-r--r--drivers/net/phy/mscc/mscc.h3
-rw-r--r--drivers/net/phy/mscc/mscc_main.c114
-rw-r--r--drivers/net/phy/nxp-tja11xx.c412
-rw-r--r--drivers/net/phy/phy-c45.c1
-rw-r--r--drivers/net/phy/phy-core.c11
-rw-r--r--drivers/net/phy/phy.c188
-rw-r--r--drivers/net/phy/phy_device.c280
-rw-r--r--drivers/net/phy/phylink.c60
-rw-r--r--drivers/net/phy/realtek.c15
-rw-r--r--drivers/net/phy/swphy.c2
-rw-r--r--drivers/net/phy/teranetics.c1
-rw-r--r--drivers/net/plip/Kconfig2
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/rionet.c3
-rw-r--r--drivers/net/team/team.c1
-rw-r--r--drivers/net/tun.c18
-rw-r--r--drivers/net/usb/ax88179_178a.c79
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c22
-rw-r--r--drivers/net/usb/sierra_net.c5
-rw-r--r--drivers/net/veth.c34
-rw-r--r--drivers/net/virtio_net.c19
-rw-r--r--drivers/net/vmxnet3/Makefile2
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h31
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c191
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c277
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h25
-rw-r--r--drivers/net/vrf.c1
-rw-r--r--drivers/net/vxlan.c374
-rw-r--r--drivers/net/wan/Kconfig4
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h38
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h20
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c399
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h40
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h66
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c51
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c329
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c71
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c74
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c216
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h27
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c186
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h40
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c142
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h116
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c52
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h61
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c61
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h22
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c47
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h25
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c9
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h22
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.c48
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c297
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c60
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c69
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c87
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c22
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h65
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h7
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c102
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c35
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c170
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h88
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c26
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h8
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c49
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c21
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h16
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h58
-rw-r--r--drivers/net/wireless/atmel/atmel.c3
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/sdio.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c13
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c301
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c151
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c79
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h2
-rw-r--r--drivers/net/wireless/cisco/airo.c12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Kconfig4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c29
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h28
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h22
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c175
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/soc.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c220
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h128
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c105
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c175
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c139
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h142
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c132
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c22
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/spectrum_cs.c3
-rw-r--r--drivers/net/wireless/intersil/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_oid.h8
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_mgt.h2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c64
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h8
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h2
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.h2
-rw-r--r--drivers/net/wireless/marvell/libertas/rx.c5
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c35
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c29
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c38
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c39
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h68
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c47
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c87
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c94
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c291
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c765
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c389
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c1579
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h314
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c73
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h190
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c135
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c184
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h108
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c447
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c145
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c93
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c463
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c285
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c243
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h125
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c702
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c1477
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h346
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c838
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c3182
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h1034
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h469
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c191
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h375
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h14
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c83
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h54
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c3
-rw-r--r--drivers/net/wireless/ray_cs.c3
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig26
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile28
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c14
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h22
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c24
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.c27
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c55
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h32
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c437
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c43
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c82
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h101
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c82
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c94
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h108
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c2753
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h283
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d_table.c1196
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d_table.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c30
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c52
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c30
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c183
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h28
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c14666
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c30
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c20
-rw-r--r--drivers/net/wireless/rndis_wlan.c32
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c2
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_sdio.c9
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c37
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c1
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c10
-rw-r--r--drivers/ntb/core.c9
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c4
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c6
-rw-r--r--drivers/ntb/hw/intel/Makefile2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c49
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.h1
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.c13
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.h8
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.c552
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.h100
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h12
-rw-r--r--drivers/ntb/test/ntb_perf.c49
-rw-r--r--drivers/ntb/test/ntb_pingpong.c14
-rw-r--r--drivers/ntb/test/ntb_tool.c9
-rw-r--r--drivers/nvdimm/blk.c6
-rw-r--r--drivers/nvdimm/btt.c6
-rw-r--r--drivers/nvdimm/nd.h19
-rw-r--r--drivers/nvdimm/pmem.c9
-rw-r--r--drivers/nvme/host/core.c324
-rw-r--r--drivers/nvme/host/fc.c577
-rw-r--r--drivers/nvme/host/fc.h227
-rw-r--r--drivers/nvme/host/lightnvm.c7
-rw-r--r--drivers/nvme/host/multipath.c16
-rw-r--r--drivers/nvme/host/nvme.h28
-rw-r--r--drivers/nvme/host/pci.c117
-rw-r--r--drivers/nvme/host/rdma.c321
-rw-r--r--drivers/nvme/host/tcp.c117
-rw-r--r--drivers/nvme/target/Kconfig1
-rw-r--r--drivers/nvme/target/admin-cmd.c42
-rw-r--r--drivers/nvme/target/configfs.c272
-rw-r--r--drivers/nvme/target/core.c166
-rw-r--r--drivers/nvme/target/discovery.c8
-rw-r--r--drivers/nvme/target/fabrics-cmd.c15
-rw-r--r--drivers/nvme/target/fc.c805
-rw-r--r--drivers/nvme/target/fcloop.c155
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c120
-rw-r--r--drivers/nvme/target/io-cmd-file.c23
-rw-r--r--drivers/nvme/target/nvmet.h36
-rw-r--r--drivers/nvme/target/rdma.c420
-rw-r--r--drivers/nvme/target/tcp.c107
-rw-r--r--drivers/nvme/target/trace.h28
-rw-r--r--drivers/nvmem/core.c104
-rw-r--r--drivers/nvmem/imx-ocotp.c9
-rw-r--r--drivers/nvmem/jz4780-efuse.c4
-rw-r--r--drivers/nvmem/qfprom.c14
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c11
-rw-r--r--drivers/of/dynamic.c3
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/of/kobj.c3
-rw-r--r--drivers/of/of_mdio.c73
-rw-r--r--drivers/of/of_reserved_mem.c51
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/of/property.c20
-rw-r--r--drivers/opp/core.c119
-rw-r--r--drivers/opp/debugfs.c42
-rw-r--r--drivers/opp/of.c205
-rw-r--r--drivers/opp/opp.h10
-rw-r--r--drivers/oprofile/buffer_sync.c12
-rw-r--r--drivers/oprofile/event_buffer.c2
-rw-r--r--drivers/parport/daisy.c29
-rw-r--r--drivers/parport/ieee1284.c94
-rw-r--r--drivers/parport/ieee1284_ops.c70
-rw-r--r--drivers/parport/parport_amiga.c22
-rw-r--r--drivers/parport/parport_atari.c2
-rw-r--r--drivers/parport/parport_cs.c6
-rw-r--r--drivers/parport/parport_gsc.c25
-rw-r--r--drivers/parport/parport_gsc.h21
-rw-r--r--drivers/parport/parport_ip32.c117
-rw-r--r--drivers/parport/parport_mfc3.c21
-rw-r--r--drivers/parport/parport_pc.c263
-rw-r--r--drivers/parport/parport_sunbpp.c2
-rw-r--r--drivers/parport/probe.c34
-rw-r--r--drivers/parport/procfs.c45
-rw-r--r--drivers/parport/share.c292
-rw-r--r--drivers/pci/ats.c18
-rw-r--r--drivers/pci/controller/Kconfig32
-rw-r--r--drivers/pci/controller/Makefile4
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c2
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c10
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h6
-rw-r--r--drivers/pci/controller/dwc/Kconfig17
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c8
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c4
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c22
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h3
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c19
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c383
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c4
-rw-r--r--drivers/pci/controller/pci-aardvark.c266
-rw-r--r--drivers/pci/controller/pci-host-common.c18
-rw-r--r--drivers/pci/controller/pci-host-generic.c26
-rw-r--r--drivers/pci/controller/pci-hyperv.c126
-rw-r--r--drivers/pci/controller/pci-loongson.c247
-rw-r--r--drivers/pci/controller/pci-tegra.c7
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c14
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c16
-rw-r--r--drivers/pci/controller/pci-v3-semi.c6
-rw-r--r--drivers/pci/controller/pci-xgene.c4
-rw-r--r--drivers/pci/controller/pcie-altera.c2
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c37
-rw-r--r--drivers/pci/controller/pcie-mediatek.c3
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c563
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c1130
-rw-r--r--drivers/pci/controller/pcie-rcar.c1211
-rw-r--r--drivers/pci/controller/pcie-rcar.h140
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c2
-rw-r--r--drivers/pci/controller/pcie-tango.c13
-rw-r--r--drivers/pci/controller/vmd.c6
-rw-r--r--drivers/pci/ecam.c10
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c3
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c204
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c13
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c2
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c16
-rw-r--r--drivers/pci/hotplug/shpchp.h2
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c5
-rw-r--r--drivers/pci/iov.c39
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/p2pdma.c2
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci-bridge-emul.c61
-rw-r--r--drivers/pci/pci-driver.c34
-rw-r--r--drivers/pci/pci-label.c4
-rw-r--r--drivers/pci/pci.c64
-rw-r--r--drivers/pci/pcie/Kconfig1
-rw-r--r--drivers/pci/pcie/aer.c340
-rw-r--r--drivers/pci/pcie/aspm.c10
-rw-r--r--drivers/pci/pcie/dpc.c3
-rw-r--r--drivers/pci/pcie/edr.c4
-rw-r--r--drivers/pci/pcie/pme.c4
-rw-r--r--drivers/pci/pcie/portdrv.h13
-rw-r--r--drivers/pci/pcie/portdrv_pci.c2
-rw-r--r--drivers/pci/pcie/ptm.c22
-rw-r--r--drivers/pci/probe.c67
-rw-r--r--drivers/pci/quirks.c50
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/setup-bus.c115
-rw-r--r--drivers/pci/setup-res.c9
-rw-r--r--drivers/pci/switch/switchtec.c2
-rw-r--r--drivers/pcmcia/cs_internal.h6
-rw-r--r--drivers/pcmcia/electra_cf.c45
-rw-r--r--drivers/pcmcia/pcmcia_cis.c6
-rw-r--r--drivers/pcmcia/yenta_socket.c40
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/arm_dsu_pmu.c4
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c5
-rw-r--r--drivers/perf/arm_spe_pmu.c8
-rw-r--r--drivers/perf/hisilicon/Kconfig7
-rw-r--r--drivers/perf/hisilicon/Makefile3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c10
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c12
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c12
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c23
-rw-r--r--drivers/phy/amlogic/Kconfig15
-rw-r--r--drivers/phy/amlogic/Makefile1
-rw-r--r--drivers/phy/amlogic/phy-meson-gxl-usb3.c283
-rw-r--r--drivers/phy/amlogic/phy-meson8b-usb2.c149
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-usb.c57
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c16
-rw-r--r--drivers/phy/cadence/Kconfig9
-rw-r--r--drivers/phy/cadence/Makefile1
-rw-r--r--drivers/phy/cadence/phy-cadence-salvo.c325
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c27
-rw-r--r--drivers/phy/intel/Kconfig15
-rw-r--r--drivers/phy/intel/Makefile1
-rw-r--r--drivers/phy/intel/phy-intel-combo.c632
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/qualcomm/Kconfig17
-rw-r--r--drivers/phy/qualcomm/Makefile2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c148
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c254
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h238
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c287
-rw-r--r--drivers/phy/samsung/phy-s5pv210-usb2.c4
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c104
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c65
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c60
-rw-r--r--drivers/pinctrl/Kconfig17
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c80
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c26
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8dxl.c193
-rw-r--r--drivers/pinctrl/intel/Kconfig8
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c9
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c58
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c278
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c30
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c22
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h27
-rw-r--r--drivers/pinctrl/intel/pinctrl-jasperlake.c344
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c32
-rw-r--r--drivers/pinctrl/mediatek/Kconfig13
-rw-r--r--drivers/pinctrl/mediatek/Makefile5
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c9
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6765.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c28
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c6
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c14
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-ab8505.c1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c2
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c1
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c21
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c514
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.h52
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_i2c.c124
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_spi.c262
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c127
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c4
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c11
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c4
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c6
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c2
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c9
-rw-r--r--drivers/pinctrl/qcom/Kconfig9
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c1361
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c82
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig4
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile1
-rw-r--r--drivers/pinctrl/sh-pfc/core.c6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c744
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c5
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h1
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c20
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c7
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c2
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c2
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c3
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c2
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c45
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c119
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c7
-rw-r--r--drivers/platform/mips/Kconfig6
-rw-r--r--drivers/platform/mips/Makefile1
-rw-r--r--drivers/platform/mips/rs780e-acpi.c169
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c4
-rw-r--r--drivers/platform/x86/Kconfig66
-rw-r--r--drivers/platform/x86/Makefile4
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/asus-laptop.c25
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c1
-rw-r--r--drivers/platform/x86/asus-wmi.c117
-rw-r--r--drivers/platform/x86/dcdbas.c43
-rw-r--r--drivers/platform/x86/dell-laptop.c11
-rw-r--r--drivers/platform/x86/dell-wmi.c10
-rw-r--r--drivers/platform/x86/eeepc-laptop.c4
-rw-r--r--drivers/platform/x86/hp-wmi.c30
-rw-r--r--drivers/platform/x86/intel-hid.c7
-rw-r--r--drivers/platform/x86/intel-vbtn.c104
-rw-r--r--drivers/platform/x86/intel-wmi-sbl-fw-update.c145
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_typec.c106
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c15
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c949
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c447
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c43
-rw-r--r--drivers/platform/x86/intel_scu_pcidrv.c68
-rw-r--r--drivers/platform/x86/intel_scu_pltdrv.c60
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c11
-rw-r--r--drivers/platform/x86/intel_telemetry_core.c17
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c15
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c97
-rw-r--r--drivers/platform/x86/lg-laptop.c18
-rw-r--r--drivers/platform/x86/samsung-laptop.c3
-rw-r--r--drivers/platform/x86/sony-laptop.c60
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c173
-rw-r--r--drivers/platform/x86/toshiba_acpi.c26
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c113
-rw-r--r--drivers/platform/x86/wmi.c45
-rw-r--r--drivers/pnp/pnpbios/pnpbios.h2
-rw-r--r--drivers/power/reset/Kconfig9
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/gpio-poweroff.c2
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c3
-rw-r--r--drivers/power/reset/mt6323-poweroff.c2
-rw-r--r--drivers/power/reset/oxnas-restart.c233
-rw-r--r--drivers/power/reset/qcom-pon.c3
-rw-r--r--drivers/power/reset/syscon-reboot.c7
-rw-r--r--drivers/power/reset/vexpress-poweroff.c8
-rw-r--r--drivers/power/supply/88pm860x_battery.c8
-rw-r--r--drivers/power/supply/Kconfig59
-rw-r--r--drivers/power/supply/Makefile3
-rw-r--r--drivers/power/supply/ab8500_fg.c2
-rw-r--r--drivers/power/supply/axp288_charger.c5
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c6
-rw-r--r--drivers/power/supply/bd70528-charger.c140
-rw-r--r--drivers/power/supply/bd99954-charger.c1142
-rw-r--r--drivers/power/supply/bd99954-charger.h1075
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq25890_charger.c200
-rw-r--r--drivers/power/supply/charger-manager.c40
-rw-r--r--drivers/power/supply/cw2015_battery.c750
-rw-r--r--drivers/power/supply/generic-adc-battery.c22
-rw-r--r--drivers/power/supply/lp8788-charger.c18
-rw-r--r--drivers/power/supply/max14577_charger.c10
-rw-r--r--drivers/power/supply/max14656_charger_detector.c5
-rw-r--r--drivers/power/supply/max17040_battery.c2
-rw-r--r--drivers/power/supply/max17042_battery.c8
-rw-r--r--drivers/power/supply/mp2629_charger.c669
-rw-r--r--drivers/power/supply/olpc_battery.c4
-rw-r--r--drivers/power/supply/power_supply_core.c8
-rw-r--r--drivers/power/supply/power_supply_hwmon.c64
-rw-r--r--drivers/power/supply/power_supply_sysfs.c484
-rw-r--r--drivers/power/supply/sbs-battery.c232
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c77
-rw-r--r--drivers/power/supply/smb347-charger.c5
-rw-r--r--drivers/power/supply/test_power.c2
-rw-r--r--drivers/powercap/intel_rapl_common.c4
-rw-r--r--drivers/ps3/ps3-lpm.c8
-rw-r--r--drivers/ps3/ps3-vuart.c5
-rw-r--r--drivers/ptp/ptp_chardev.c1
-rw-r--r--drivers/ptp/ptp_clock.c9
-rw-r--r--drivers/ptp/ptp_clockmatrix.c94
-rw-r--r--drivers/ptp/ptp_clockmatrix.h8
-rw-r--r--drivers/ptp/ptp_idt82p33.c6
-rw-r--r--drivers/ptp/ptp_ines.c8
-rw-r--r--drivers/ptp/ptp_kvm.c2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c27
-rw-r--r--drivers/regulator/88pg86x.c4
-rw-r--r--drivers/regulator/88pm800-regulator.c4
-rw-r--r--drivers/regulator/Kconfig11
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/ab8500.c22
-rw-r--r--drivers/regulator/act8865-regulator.c4
-rw-r--r--drivers/regulator/act8945a-regulator.c2
-rw-r--r--drivers/regulator/arizona-ldo1.c2
-rw-r--r--drivers/regulator/arizona-micsupp.c4
-rw-r--r--drivers/regulator/as3711-regulator.c6
-rw-r--r--drivers/regulator/as3722-regulator.c4
-rw-r--r--drivers/regulator/axp20x-regulator.c16
-rw-r--r--drivers/regulator/bcm590xx-regulator.c8
-rw-r--r--drivers/regulator/bd70528-regulator.c8
-rw-r--r--drivers/regulator/bd71828-regulator.c10
-rw-r--r--drivers/regulator/bd718x7-regulator.c238
-rw-r--r--drivers/regulator/core.c59
-rw-r--r--drivers/regulator/da903x.c2
-rw-r--r--drivers/regulator/db8500-prcmu.c2
-rw-r--r--drivers/regulator/helpers.c130
-rw-r--r--drivers/regulator/hi6421-regulator.c4
-rw-r--r--drivers/regulator/lochnagar-regulator.c4
-rw-r--r--drivers/regulator/lp873x-regulator.c4
-rw-r--r--drivers/regulator/lp87565-regulator.c2
-rw-r--r--drivers/regulator/lp8788-buck.c2
-rw-r--r--drivers/regulator/max77650-regulator.c2
-rw-r--r--drivers/regulator/max77826-regulator.c301
-rw-r--r--drivers/regulator/max8998.c105
-rw-r--r--drivers/regulator/mcp16502.c4
-rw-r--r--drivers/regulator/mp8859.c2
-rw-r--r--drivers/regulator/mt6323-regulator.c6
-rw-r--r--drivers/regulator/mt6358-regulator.c8
-rw-r--r--drivers/regulator/mt6380-regulator.c6
-rw-r--r--drivers/regulator/mt6397-regulator.c6
-rw-r--r--drivers/regulator/palmas-regulator.c4
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c10
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c24
-rw-r--r--drivers/regulator/qcom_smd-regulator.c78
-rw-r--r--drivers/regulator/rk808-regulator.c10
-rw-r--r--drivers/regulator/s2mps11.c14
-rw-r--r--drivers/regulator/sky81452-regulator.c2
-rw-r--r--drivers/regulator/stpmic1_regulator.c18
-rw-r--r--drivers/regulator/tps65086-regulator.c10
-rw-r--r--drivers/regulator/tps65217-regulator.c4
-rw-r--r--drivers/regulator/tps65218-regulator.c6
-rw-r--r--drivers/regulator/tps65912-regulator.c4
-rw-r--r--drivers/regulator/tps80031-regulator.c7
-rw-r--r--drivers/regulator/twl-regulator.c4
-rw-r--r--drivers/regulator/twl6030-regulator.c2
-rw-r--r--drivers/regulator/wm831x-dcdc.c2
-rw-r--r--drivers/regulator/wm831x-ldo.c4
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/regulator/wm8400-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig9
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/ingenic_rproc.c280
-rw-r--r--drivers/remoteproc/mtk_scp.c4
-rw-r--r--drivers/remoteproc/qcom_common.c17
-rw-r--r--drivers/remoteproc/qcom_common.h5
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c173
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c68
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c6
-rw-r--r--drivers/remoteproc/qcom_sysmon.c116
-rw-r--r--drivers/remoteproc/qcom_wcnss.c1
-rw-r--r--drivers/remoteproc/remoteproc_core.c243
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c28
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c24
-rw-r--r--drivers/remoteproc/remoteproc_internal.h17
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c15
-rw-r--r--drivers/remoteproc/st_remoteproc.c2
-rw-r--r--drivers/remoteproc/st_slim_rproc.c2
-rw-r--r--drivers/remoteproc/stm32_rproc.c3
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c69
-rw-r--r--drivers/reset/reset-imx7.c101
-rw-r--r--drivers/reset/reset-zynqmp.c26
-rw-r--r--drivers/rpmsg/Kconfig6
-rw-r--r--drivers/rpmsg/Makefile3
-rw-r--r--drivers/rpmsg/qcom_glink_ssr.c (renamed from drivers/soc/qcom/glink_ssr.c)28
-rw-r--r--drivers/rpmsg/rpmsg_core.c2
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c2
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/rtc-88pm860x.c6
-rw-r--r--drivers/rtc/rtc-abx80x.c66
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c10
-rw-r--r--drivers/rtc/rtc-goldfish.c2
-rw-r--r--drivers/rtc/rtc-jz4740.c173
-rw-r--r--drivers/rtc/rtc-lpc24xx.c4
-rw-r--r--drivers/rtc/rtc-max77686.c22
-rw-r--r--drivers/rtc/rtc-mc13xxx.c4
-rw-r--r--drivers/rtc/rtc-mpc5121.c2
-rw-r--r--drivers/rtc/rtc-mt2712.c16
-rw-r--r--drivers/rtc/rtc-mt6397.c18
-rw-r--r--drivers/rtc/rtc-pcf2127.c31
-rw-r--r--drivers/rtc/rtc-rc5t619.c4
-rw-r--r--drivers/rtc/rtc-rv3028.c2
-rw-r--r--drivers/rtc/rtc-snvs.c59
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c20
-rw-r--r--drivers/s390/block/dasd_ioctl.c76
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chsc.c40
-rw-r--r--drivers/s390/cio/chsc.h50
-rw-r--r--drivers/s390/cio/device_ops.c23
-rw-r--r--drivers/s390/cio/idset.c12
-rw-r--r--drivers/s390/cio/qdio.h16
-rw-r--r--drivers/s390/cio/qdio_main.c299
-rw-r--r--drivers/s390/cio/qdio_setup.c100
-rw-r--r--drivers/s390/cio/qdio_thinint.c61
-rw-r--r--drivers/s390/cio/vfio_ccw_chp.c148
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c19
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c165
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c65
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h16
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.h30
-rw-r--r--drivers/s390/crypto/ap_bus.c94
-rw-r--r--drivers/s390/crypto/ap_bus.h25
-rw-r--r--drivers/s390/crypto/ap_card.c47
-rw-r--r--drivers/s390/crypto/ap_queue.c10
-rw-r--r--drivers/s390/net/Kconfig9
-rw-r--r--drivers/s390/net/ctcm_main.c40
-rw-r--r--drivers/s390/net/lcs.c59
-rw-r--r--drivers/s390/net/netiucv.c104
-rw-r--r--drivers/s390/net/qeth_core.h49
-rw-r--r--drivers/s390/net/qeth_core_main.c496
-rw-r--r--drivers/s390/net/qeth_core_mpc.h25
-rw-r--r--drivers/s390/net/qeth_core_sys.c15
-rw-r--r--drivers/s390/net/qeth_l2_main.c200
-rw-r--r--drivers/s390/net/qeth_l3_main.c19
-rw-r--r--drivers/s390/net/smsgiucv.c65
-rw-r--r--drivers/s390/scsi/zfcp_aux.c5
-rw-r--r--drivers/s390/scsi/zfcp_diag.h6
-rw-r--r--drivers/s390/scsi/zfcp_erp.c84
-rw-r--r--drivers/s390/scsi/zfcp_ext.h11
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c76
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c19
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c131
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c16
-rw-r--r--drivers/sbus/char/flash.c1
-rw-r--r--drivers/sbus/char/oradax.c8
-rw-r--r--drivers/sbus/char/uctrl.c1
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/a2091.c1
-rw-r--r--drivers/scsi/a3000.c1
-rw-r--r--drivers/scsi/aacraid/aachba.c1
-rw-r--r--drivers/scsi/aacraid/commctrl.c13
-rw-r--r--drivers/scsi/aacraid/commsup.c4
-rw-r--r--drivers/scsi/aacraid/linit.c16
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c18
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c19
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c14
-rw-r--r--drivers/scsi/arm/cumana_2.c2
-rw-r--r--drivers/scsi/arm/eesox.c2
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/bfa/bfa_core.c2
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c4
-rw-r--r--drivers/scsi/bfa/bfa_svc.c7
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_attr.c4
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c18
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c7
-rw-r--r--drivers/scsi/cxlflash/main.c1
-rw-r--r--drivers/scsi/dpt_i2o.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c4
-rw-r--r--drivers/scsi/fnic/fnic_main.c4
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c6
-rw-r--r--drivers/scsi/fnic/vnic_dev.c12
-rw-r--r--drivers/scsi/fnic/vnic_wq.c4
-rw-r--r--drivers/scsi/gdth.c4
-rw-r--r--drivers/scsi/gvp11.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c17
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c26
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/isci/isci.h6
-rw-r--r--drivers/scsi/lasi700.c1
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h25
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c111
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c528
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h180
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c841
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h158
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c173
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c12
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c81
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h6
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/mpt3sas/Makefile3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c266
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debugfs.c157
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8
-rw-r--r--drivers/scsi/mvme147.c1
-rw-r--r--drivers/scsi/mvsas/mv_init.c6
-rw-r--r--drivers/scsi/pmcraid.c4
-rw-r--r--drivers/scsi/qedf/qedf.h6
-rw-r--r--drivers/scsi/qedf/qedf_els.c10
-rw-r--r--drivers/scsi/qedf/qedf_io.c48
-rw-r--r--drivers/scsi/qedf/qedf_main.c135
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c21
-rw-r--r--drivers/scsi/qedi/qedi_main.c22
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c866
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h443
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h728
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h768
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c380
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c287
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c123
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h64
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c208
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h36
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c323
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c111
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h232
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c16
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi_debug.c2048
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_ioctl.c20
-rw-r--r--drivers/scsi/scsi_lib.c313
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c68
-rw-r--r--drivers/scsi/sd.c35
-rw-r--r--drivers/scsi/sd.h43
-rw-r--r--drivers/scsi/sd_zbc.c402
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/sni_53c710.c1
-rw-r--r--drivers/scsi/snic/snic.h2
-rw-r--r--drivers/scsi/snic/snic_ctl.c5
-rw-r--r--drivers/scsi/sr.c29
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/storvsc_drv.c96
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c13
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c30
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c10
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c61
-rw-r--r--drivers/scsi/ufs/ufs.h43
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h7
-rw-r--r--drivers/scsi/ufs/ufshcd.c515
-rw-r--r--drivers/scsi/ufs/ufshcd.h45
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/scsi/zorro_esp.c2
-rw-r--r--drivers/slimbus/core.c6
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c5
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c112
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c6
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c13
-rw-r--r--drivers/soc/fsl/qbman/qman.c5
-rw-r--r--drivers/soc/fsl/qe/qe.c4
-rw-r--r--drivers/soc/fsl/qe/ucc.c2
-rw-r--r--drivers/soc/imx/Makefile3
-rw-r--r--drivers/soc/imx/soc-imx.c192
-rw-r--r--drivers/soc/imx/soc-imx8m.c7
-rw-r--r--drivers/soc/kendryte/k210-sysctl.c12
-rw-r--r--drivers/soc/mediatek/Kconfig7
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c378
-rw-r--r--drivers/soc/qcom/Kconfig25
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/cmd-db.c78
-rw-r--r--drivers/soc/qcom/pdr_interface.c4
-rw-r--r--drivers/soc/qcom/qcom_aoss.c1
-rw-r--r--drivers/soc/qcom/rpmh-internal.h59
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c746
-rw-r--r--drivers/soc/qcom/rpmh.c97
-rw-r--r--drivers/soc/qcom/rpmhpd.c24
-rw-r--r--drivers/soc/qcom/rpmpd.c5
-rw-r--r--drivers/soc/qcom/smp2p.c4
-rw-r--r--drivers/soc/qcom/socinfo.c6
-rw-r--r--drivers/soc/renesas/Kconfig11
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r8a7742-sysc.c42
-rw-r--r--drivers/soc/renesas/rcar-rst.c1
-rw-r--r--drivers/soc/renesas/rcar-sysc.c3
-rw-r--r--drivers/soc/renesas/rcar-sysc.h1
-rw-r--r--drivers/soc/sifive/sifive_l2_cache.c40
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c57
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra20.c1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c6
-rw-r--r--drivers/soc/tegra/fuse/fuse.h8
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c32
-rw-r--r--drivers/soc/tegra/pmc.c3
-rw-r--r--drivers/soc/ti/Kconfig10
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/k3-socinfo.c152
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c2
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c26
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c17
-rw-r--r--drivers/soundwire/Makefile8
-rw-r--r--drivers/soundwire/bus.c71
-rw-r--r--drivers/soundwire/bus.h4
-rw-r--r--drivers/soundwire/bus_type.c22
-rw-r--r--drivers/soundwire/cadence_master.c8
-rw-r--r--drivers/soundwire/debugfs.c2
-rw-r--r--drivers/soundwire/intel.c13
-rw-r--r--drivers/soundwire/intel_init.c4
-rw-r--r--drivers/soundwire/master.c172
-rw-r--r--drivers/soundwire/mipi_disco.c11
-rw-r--r--drivers/soundwire/qcom.c34
-rw-r--r--drivers/soundwire/slave.c10
-rw-r--r--drivers/soundwire/sysfs_local.h14
-rw-r--r--drivers/soundwire/sysfs_slave.c214
-rw-r--r--drivers/soundwire/sysfs_slave_dpn.c300
-rw-r--r--drivers/spi/Kconfig22
-rw-r--r--drivers/spi/Makefile6
-rw-r--r--drivers/spi/spi-amd.c315
-rw-r--r--drivers/spi/spi-armada-3700.c10
-rw-r--r--drivers/spi/spi-atmel.c1
-rw-r--r--drivers/spi/spi-axi-spi-engine.c32
-rw-r--r--drivers/spi/spi-bcm-qspi.c181
-rw-r--r--drivers/spi/spi-bcm2835.c26
-rw-r--r--drivers/spi/spi-bcm2835aux.c4
-rw-r--r--drivers/spi/spi-dw-core.c (renamed from drivers/spi/spi-dw.c)233
-rw-r--r--drivers/spi/spi-dw-dma.c480
-rw-r--r--drivers/spi/spi-dw-mid.c322
-rw-r--r--drivers/spi/spi-dw-mmio.c86
-rw-r--r--drivers/spi/spi-dw-pci.c50
-rw-r--r--drivers/spi/spi-dw.h66
-rw-r--r--drivers/spi/spi-ep93xx.c8
-rw-r--r--drivers/spi/spi-fsl-dspi.c47
-rw-r--r--drivers/spi/spi-fsl-lpspi.c21
-rw-r--r--drivers/spi/spi-fsl-qspi.c11
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c26
-rw-r--r--drivers/spi/spi-imx.c31
-rw-r--r--drivers/spi/spi-mem.c10
-rw-r--r--drivers/spi/spi-mtk-nor.c2
-rw-r--r--drivers/spi/spi-mux.c8
-rw-r--r--drivers/spi/spi-orion.c70
-rw-r--r--drivers/spi/spi-pxa2xx.c6
-rw-r--r--drivers/spi/spi-rb4xx.c19
-rw-r--r--drivers/spi/spi-rockchip.c229
-rw-r--r--drivers/spi/spi-sc18is602.c2
-rw-r--r--drivers/spi/spi-sh-msiof.c2
-rw-r--r--drivers/spi/spi-sprd-adi.c2
-rw-r--r--drivers/spi/spi-stm32-qspi.c62
-rw-r--r--drivers/spi/spi-stm32.c19
-rw-r--r--drivers/spi/spi-sun6i.c1
-rw-r--r--drivers/spi/spi-tegra114.c1
-rw-r--r--drivers/spi/spi-tegra20-sflash.c1
-rw-r--r--drivers/spi/spi-tegra20-slink.c1
-rw-r--r--drivers/spi/spi-uniphier.c11
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c5
-rw-r--r--drivers/spi/spi.c14
-rw-r--r--drivers/spi/spidev.c3
-rw-r--r--drivers/ssb/scan.c6
-rw-r--r--drivers/ssb/sprom.c12
-rw-r--r--drivers/staging/android/ashmem.c4
-rw-r--r--drivers/staging/android/ion/ion_heap.c4
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c4
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c12
-rw-r--r--drivers/staging/comedi/Makefile1
-rw-r--r--drivers/staging/comedi/comedi_compat32.c455
-rw-r--r--drivers/staging/comedi/comedi_compat32.h28
-rw-r--r--drivers/staging/comedi/comedi_fops.c566
-rw-r--r--drivers/staging/comedi/comedi_internal.h6
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c4
-rw-r--r--drivers/staging/comedi/range.c17
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c32
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/README2
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c6
-rw-r--r--drivers/staging/gasket/gasket_page_table.c2
-rw-r--r--drivers/staging/gasket/gasket_sysfs.c2
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c2
-rw-r--r--drivers/staging/greybus/hid.c3
-rw-r--r--drivers/staging/greybus/light.c3
-rw-r--r--drivers/staging/greybus/loopback.c2
-rw-r--r--drivers/staging/greybus/sdio.c11
-rw-r--r--drivers/staging/greybus/uart.c19
-rw-r--r--drivers/staging/iio/Documentation/overview.txt2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c77
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c4
-rw-r--r--drivers/staging/media/Kconfig6
-rw-r--r--drivers/staging/media/Makefile3
-rw-r--r--drivers/staging/media/atomisp/Kconfig36
-rw-r--r--drivers/staging/media/atomisp/Makefile363
-rw-r--r--drivers/staging/media/atomisp/TODO89
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig86
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile18
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c1406
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c1139
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c207
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c972
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c1910
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c1340
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c1288
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.h404
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h680
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h1791
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h845
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h1272
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Kconfig11
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Makefile2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ad5823.h62
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c2006
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.h1391
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm.h102
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_bo.h315
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_common.h96
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_pool.h115
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_vm.h65
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h1359
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h38
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h247
-rw-r--r--drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h27
-rw-r--r--drivers/staging/media/atomisp/include/media/lm3554.h130
-rw-r--r--drivers/staging/media/atomisp/include/mmu/isp_mmu.h168
-rw-r--r--drivers/staging/media/atomisp/include/mmu/sh_mmu_mrfld.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp-regs.h199
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_acc.c605
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_acc.h119
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c6629
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h442
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_common.h74
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat.h663
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c4706
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.h277
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c1177
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h367
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c426
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h58
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_dfs_tables.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.c205
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_file.c227
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_file.h43
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c1306
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.h50
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c1081
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_helper.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h307
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c3094
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.h66
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c1456
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h466
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tables.h187
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tpg.c163
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tpg.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_trace_event.h127
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c1997
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.h36
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h376
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_comm.h58
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_desc.h173
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c320
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/interface/ia_css_refcount.h83
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c275
-rw-r--r--drivers/staging/media/atomisp/pci/bits.h104
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_binarydesc.h297
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_stagedesc.h51
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_util.h39
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c873
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c118
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c50
-rw-r--r--drivers/staging/media/atomisp/pci/camera/util/interface/ia_css_util.h141
-rw-r--r--drivers/staging/media/atomisp/pci/camera/util/src/util.c225
-rw-r--r--drivers/staging/media/atomisp/pci/cell_params.h40
-rw-r--r--drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_configs.c385
-rw-r--r--drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_params.c3419
-rw-r--r--drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_states.c223
-rw-r--r--drivers/staging/media/atomisp/pci/css_2400_system/hrt/hive_isp_css_irq_types_hrt.h68
-rw-r--r--drivers/staging/media/atomisp/pci/css_2400_system/hrt/isp2400_mamoiada_params.h228
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/csi_rx_global.h63
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_configs.c386
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_params.c3366
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_states.c223
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c40
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_local.h62
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_private.h305
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c22
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h58
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_private.h267
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c40
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h61
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c43
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h35
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h106
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c21
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_local.h36
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_private.h167
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_local.h50
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h182
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/PixelGen_SysBlock_defs.h113
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/ibuf_cntrl_defs.h134
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_common_defs.h205
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_defs.h208
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/rx_csi_defs.h169
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/stream2mmio_defs.h68
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h79
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_dma_global.h89
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h35
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_stream2mmio_global.h39
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h90
-rw-r--r--drivers/staging/media/atomisp/pci/css_receiver_2400_common_defs.h198
-rw-r--r--drivers/staging/media/atomisp/pci/css_receiver_2400_defs.h256
-rw-r--r--drivers/staging/media/atomisp/pci/css_trace.h278
-rw-r--r--drivers/staging/media/atomisp/pci/defs.h36
-rw-r--r--drivers/staging/media/atomisp/pci/dma_v2_defs.h199
-rw-r--r--drivers/staging/media/atomisp/pci/gdc_v2_defs.h163
-rw-r--r--drivers/staging/media/atomisp/pci/gp_timer_defs.h36
-rw-r--r--drivers/staging/media/atomisp/pci/gpio_block_defs.h41
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_2401_irq_types_hrt.h68
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h81
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/dma_global.h254
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/event_fifo_global.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/fifo_monitor_global.h32
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gdc_global.h89
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_device_global.h84
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_timer_global.h33
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gpio_global.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/hmem_global.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c71
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_private.h126
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c299
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h207
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_private.h41
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c19
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_local.h61
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_private.h77
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c569
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_local.h99
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_private.h80
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c125
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_private.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c108
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_local.h143
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_private.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c70
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_local.h43
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_private.h22
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_private.h44
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c19
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_private.h30
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c241
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h121
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_private.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c1849
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c451
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h134
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_private.h44
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c128
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h57
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_private.h160
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c81
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h101
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_private.h166
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c74
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_private.h34
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vamem_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c276
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h57
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_private.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/input_formatter_global.h114
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/isp_global.h109
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/mmu_global.h22
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/sp_global.h93
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/timed_ctrl_global.h54
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/vamem_global.h34
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/vmem_global.h28
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_defs.h411
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h73
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/bitop_support.h24
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/csi_rx.h42
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/debug.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/device_access/device_access.h177
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/dma.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/error_support.h39
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/event_fifo.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/fifo_monitor.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gdc_device.h47
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_device.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_timer.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gpio.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/hmem.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h135
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h98
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h72
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/event_fifo_public.h79
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/fifo_monitor_public.h110
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gdc_public.h59
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_device_public.h58
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_timer_public.h33
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gpio_public.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/hmem_public.h32
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/ibuf_ctrl_public.h93
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/input_formatter_public.h115
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h184
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h185
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h37
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_stream2mmio_public.h101
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/mmu_public.h94
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h79
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h223
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/tag_public.h40
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/timed_ctrl_public.h59
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vamem_public.h18
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vmem_public.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/ibuf_ctrl.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/input_formatter.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/input_system.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/irq.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isp.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_dma.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h39
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_stream2mmio.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h153
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/memory_access/memory_access.h174
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/memory_realloc.h38
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/misc_support.h26
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/mmu_device.h39
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/pixelgen.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/platform_support.h36
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h41
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/queue.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/resource.h46
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/sp.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/string_support.h165
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/system_types.h24
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/tag.h44
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/timed_ctrl.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/type_support.h40
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/vamem.h36
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/vmem.h45
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_private.h18
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c91
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_local.h22
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_private.h18
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/queue_global.h18
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/sw_event_global.h35
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/tag_global.h56
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_streaming_to_mipi_types_hrt.h26
-rw-r--r--drivers/staging/media/atomisp/pci/hive_types.h128
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c733
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c1511
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_dynamic_pool.c233
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_reserved_pool.c252
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_vm.c212
-rw-r--r--drivers/staging/media/atomisp/pci/hrt/hive_isp_css_custom_host_hrt.h106
-rw-r--r--drivers/staging/media/atomisp/pci/hrt/hive_isp_css_mm_hrt.c124
-rw-r--r--drivers/staging/media/atomisp/pci/hrt/hive_isp_css_mm_hrt.h57
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css.h57
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_3a.h189
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_acc_types.h476
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_buffer.h85
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_control.h131
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_device_access.c95
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_device_access.h60
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_dvs.h297
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_env.h94
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_err.h63
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_event_public.h196
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_firmware.h64
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frac.h37
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_format.h101
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_public.h353
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_host_data.h45
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_input_port.h60
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_irq.h235
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_configs.h183
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_params.h394
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_states.h73
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_memory_access.c85
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_metadata.h72
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mipi.h82
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mmu.h32
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mmu_private.h29
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_morph.h39
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe.h189
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe_public.h569
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_prbs.h53
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_properties.h41
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_shading.h40
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream.h111
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_format.h29
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_public.h585
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_timer.h68
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_tpg.h78
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_types.h605
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_version.h40
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_version_data.h27
-rw-r--r--drivers/staging/media/atomisp/pci/if_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/input_formatter_subsystem_defs.h53
-rw-r--r--drivers/staging/media/atomisp/pci/input_selector_defs.h88
-rw-r--r--drivers/staging/media/atomisp/pci/input_switch_2400_defs.h30
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_ctrl_defs.h243
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_defs.h126
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_global.h10
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_local.h10
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_private.h10
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_public.h8
-rw-r--r--drivers/staging/media/atomisp/pci/irq_controller_defs.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.h27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_param.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_types.h46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c61
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.h39
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_param.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_param.h27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c55
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_types.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c66
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_param.h40
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_types.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c196
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.h40
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_param.h64
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_types.h106
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c131
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h71
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c64
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h34
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c73
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h43
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h54
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h34
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c64
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_param.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_types.h34
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c127
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.h54
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_param.h33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_types.h78
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c121
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c157
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h48
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h54
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c58
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c214
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h110
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c78
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_param.h27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_types.h42
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c53
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_param.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_types.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c131
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_param.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_types.h48
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c65
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.h39
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h51
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_types.h59
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c301
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h60
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c338
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.h45
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h153
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_types.h87
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c63
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_param.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_types.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c88
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h52
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c117
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.h65
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_param.h61
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c213
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h24
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_types.h97
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c109
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.h79
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_param.h43
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c131
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_types.h54
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h59
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h70
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c93
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c93
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c80
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h34
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c74
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c34
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h73
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c49
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_param.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c51
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h23
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_types.h63
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c15
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.h20
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm_param.h18
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c76
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.h40
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_param.h28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_types.h44
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c154
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.h53
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_param.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_types.h68
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c163
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h75
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_param.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_types.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c61
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h43
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c135
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_param.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_types.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h27
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c76
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_param.h36
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_state.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_types.h25
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c386
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h77
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h53
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h221
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c158
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h77
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h42
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h134
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common.host.h101
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common_types.h220
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c437
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h101
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h55
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c350
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h95
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h75
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c74
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h43
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h52
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h63
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c120
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h56
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h40
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h57
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/uds/uds_1.0/ia_css_uds_param.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c138
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_param.h37
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_types.h31
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c86
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.h39
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_param.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_types.h46
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c65
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h47
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h50
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c81
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h70
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c248
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h83
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h97
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c217
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h60
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h49
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h80
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c118
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h56
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h45
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h93
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h37
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h180
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_types.h79
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_global.h155
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_local.h539
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_private.h122
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_public.h369
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_support.h38
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_system_global.h348
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_system_local.h325
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_global.h205
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_local.h106
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_private.h129
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_mamoiada_params.h228
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_system_global.h457
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_system_local.h406
-rw-r--r--drivers/staging/media/atomisp/pci/isp_acquisition_defs.h229
-rw-r--r--drivers/staging/media/atomisp/pci/isp_capture_defs.h278
-rw-r--r--drivers/staging/media/atomisp/pci/memory_realloc.c81
-rw-r--r--drivers/staging/media/atomisp/pci/mmu/isp_mmu.c566
-rw-r--r--drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c77
-rw-r--r--drivers/staging/media/atomisp/pci/mmu_defs.h23
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h228
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c1852
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h177
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h50
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c566
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h502
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_internal.h15
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_pipe.h67
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c3540
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/event/interface/ia_css_event.h30
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/event/src/event.c112
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/eventq/interface/ia_css_eventq.h53
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c77
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h163
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame_comm.h115
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c989
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/interface/ia_css_ifmtr.h33
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c552
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/interface/ia_css_inputfifo.h53
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c538
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param.h102
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param_types.h81
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c216
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h184
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h53
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c167
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.h26
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c121
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.h38
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c87
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.h24
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c123
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c89
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.h24
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c600
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c892
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.h24
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h286
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline_common.h27
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c786
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue.h175
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue_comm.h53
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c422
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c176
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.h85
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr.h72
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr_vbuf.h99
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c39
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c336
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl.h68
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl_comm.h45
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c184
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/tagger/interface/ia_css_tagger_common.h43
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c31
-rw-r--r--drivers/staging/media/atomisp/pci/scalar_processor_2400_params.h20
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c11110
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_defs.h410
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_dvs_info.h36
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.c333
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.h55
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_frac.h40
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_host_data.c42
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.c85
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.h34
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h1061
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_legacy.h70
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metadata.c16
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.c175
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.h55
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c757
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.h49
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mmu.c60
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_morph.c16
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.c286
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.h85
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.c402
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.h34
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c5247
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.h188
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params_internal.h21
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_pipe.c16
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_properties.c43
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_shading.c16
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.c1829
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.h248
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream.c16
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream_format.c76
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream_format.h23
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_struct.h85
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_uds.h37
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_version.c37
-rw-r--r--drivers/staging/media/atomisp/pci/str2mem_defs.h39
-rw-r--r--drivers/staging/media/atomisp/pci/streaming_to_mipi_defs.h28
-rw-r--r--drivers/staging/media/atomisp/pci/system_global.h10
-rw-r--r--drivers/staging/media/atomisp/pci/system_local.h10
-rw-r--r--drivers/staging/media/atomisp/pci/timed_controller_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/version.h20
-rw-r--r--drivers/staging/media/hantro/Kconfig6
-rw-r--r--drivers/staging/media/hantro/Makefile2
-rw-r--r--drivers/staging/media/hantro/hantro.h7
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c28
-rw-r--r--drivers/staging/media/hantro/hantro_h264.c237
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h31
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c111
-rw-r--r--drivers/staging/media/imx/Kconfig5
-rw-r--r--drivers/staging/media/imx/TODO29
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c15
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c14
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c42
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c13
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c223
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c50
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c2
-rw-r--r--drivers/staging/media/imx/imx-media-internal-sd.c6
-rw-r--r--drivers/staging/media/imx/imx-media-of.c114
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c550
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c12
-rw-r--r--drivers/staging/media/imx/imx-media.h63
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c93
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c177
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c582
-rw-r--r--drivers/staging/media/ipu3/Kconfig3
-rw-r--r--drivers/staging/media/ipu3/TODO6
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h7
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.c14
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-pool.h4
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c7
-rw-r--r--drivers/staging/media/ipu3/ipu3-dmamap.c30
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.c10
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c92
-rw-r--r--drivers/staging/media/ipu3/ipu3.c5
-rw-r--r--drivers/staging/media/ipu3/ipu3.h4
-rw-r--r--drivers/staging/media/meson/vdec/codec_vp9.c31
-rw-r--r--drivers/staging/media/omap4iss/Kconfig4
-rw-r--r--drivers/staging/media/phy-rockchip-dphy-rx0/Documentation/devicetree/bindings/phy/rockchip-mipi-dphy-rx0.yaml76
-rw-r--r--drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig2
-rw-r--r--drivers/staging/media/rkisp1/Kconfig6
-rw-r--r--drivers/staging/media/rkisp1/Makefile2
-rw-r--r--drivers/staging/media/rkisp1/TODO6
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-capture.c101
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-common.h16
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-dev.c114
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-isp.c94
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-resizer.c36
-rw-r--r--drivers/staging/media/rkvdec/Kconfig16
-rw-r--r--drivers/staging/media/rkvdec/Makefile3
-rw-r--r--drivers/staging/media/rkvdec/TODO11
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c1156
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-regs.h223
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c1103
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.h121
-rw-r--r--drivers/staging/media/soc_camera/soc-camera.rst171
-rw-r--r--drivers/staging/media/sunxi/cedrus/Kconfig5
-rw-r--r--drivers/staging/media/tegra-video/Kconfig12
-rw-r--r--drivers/staging/media/tegra-video/Makefile8
-rw-r--r--drivers/staging/media/tegra-video/TODO11
-rw-r--r--drivers/staging/media/tegra-video/csi.c539
-rw-r--r--drivers/staging/media/tegra-video/csi.h147
-rw-r--r--drivers/staging/media/tegra-video/tegra210.c978
-rw-r--r--drivers/staging/media/tegra-video/vi.c1074
-rw-r--r--drivers/staging/media/tegra-video/vi.h257
-rw-r--r--drivers/staging/media/tegra-video/video.c155
-rw-r--r--drivers/staging/media/tegra-video/video.h29
-rw-r--r--drivers/staging/media/usbvision/Kconfig2
-rw-r--r--drivers/staging/media/usbvision/usbvision-core.c2
-rw-r--r--drivers/staging/most/usb/Kconfig2
-rw-r--r--drivers/staging/most/usb/usb.c305
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi9
-rw-r--r--drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt28
-rw-r--r--drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml36
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c64
-rw-r--r--drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c2
-rw-r--r--drivers/staging/pi433/pi433_if.c1
-rw-r--r--drivers/staging/qlge/qlge_dbg.c7
-rw-r--r--drivers/staging/qlge/qlge_main.c476
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c99
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c33
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c17
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c19
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c7
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c54
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_hwconfig.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c62
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c3
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c116
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c24
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c18
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c126
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c158
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h2
-rw-r--r--drivers/staging/rtl8712/usb_halinit.c2
-rw-r--r--drivers/staging/rtl8712/wifi.h9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c22
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c45
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c13
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h4
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c44
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_data.h8
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c58
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c33
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c26
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c8
-rw-r--r--drivers/staging/sm750fb/sm750.c154
-rw-r--r--drivers/staging/sm750fb/sm750.h21
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c2
-rw-r--r--drivers/staging/speakup/speakup_decext.c4
-rw-r--r--drivers/staging/speakup/speakup_decpc.c4
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c5
-rw-r--r--drivers/staging/speakup/speakup_dummy.c4
-rw-r--r--drivers/staging/speakup/speakup_soft.c4
-rw-r--r--drivers/staging/speakup/spk_types.h3
-rw-r--r--drivers/staging/speakup/spkguide.txt7
-rw-r--r--drivers/staging/speakup/sysfs-driver-speakup6
-rw-r--r--drivers/staging/speakup/varhandlers.c1
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c383
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h62
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c97
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-common.h18
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h14
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h81
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c7
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c33
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c19
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h7
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c166
-rw-r--r--drivers/staging/vt6655/Makefile3
-rw-r--r--drivers/staging/vt6655/baseband.c320
-rw-r--r--drivers/staging/vt6655/baseband.h37
-rw-r--r--drivers/staging/vt6655/card.c145
-rw-r--r--drivers/staging/vt6655/card.h4
-rw-r--r--drivers/staging/vt6655/channel.c4
-rw-r--r--drivers/staging/vt6655/device_main.c37
-rw-r--r--drivers/staging/vt6655/rf.c4
-rw-r--r--drivers/staging/vt6655/rxtx.c252
-rw-r--r--drivers/staging/vt6656/Makefile6
-rw-r--r--drivers/staging/vt6656/baseband.c620
-rw-r--r--drivers/staging/vt6656/baseband.h17
-rw-r--r--drivers/staging/vt6656/card.c570
-rw-r--r--drivers/staging/vt6656/card.h20
-rw-r--r--drivers/staging/vt6656/device.h20
-rw-r--r--drivers/staging/vt6656/firmware.c106
-rw-r--r--drivers/staging/vt6656/firmware.h25
-rw-r--r--drivers/staging/vt6656/key.c47
-rw-r--r--drivers/staging/vt6656/key.h13
-rw-r--r--drivers/staging/vt6656/mac.c128
-rw-r--r--drivers/staging/vt6656/mac.h28
-rw-r--r--drivers/staging/vt6656/main_usb.c181
-rw-r--r--drivers/staging/vt6656/power.c34
-rw-r--r--drivers/staging/vt6656/power.h2
-rw-r--r--drivers/staging/vt6656/rf.c463
-rw-r--r--drivers/staging/vt6656/rf.h3
-rw-r--r--drivers/staging/vt6656/rxtx.c674
-rw-r--r--drivers/staging/vt6656/rxtx.h20
-rw-r--r--drivers/staging/vt6656/usbpipe.c70
-rw-r--r--drivers/staging/vt6656/usbpipe.h11
-rw-r--r--drivers/staging/vt6656/wcmd.c3
-rw-r--r--drivers/staging/wfx/Makefile1
-rw-r--r--drivers/staging/wfx/TODO51
-rw-r--r--drivers/staging/wfx/bh.c50
-rw-r--r--drivers/staging/wfx/bh.h1
-rw-r--r--drivers/staging/wfx/bus.h2
-rw-r--r--drivers/staging/wfx/bus_sdio.c86
-rw-r--r--drivers/staging/wfx/bus_spi.c44
-rw-r--r--drivers/staging/wfx/data_rx.c16
-rw-r--r--drivers/staging/wfx/data_rx.h3
-rw-r--r--drivers/staging/wfx/data_tx.c352
-rw-r--r--drivers/staging/wfx/data_tx.h8
-rw-r--r--drivers/staging/wfx/debug.c70
-rw-r--r--drivers/staging/wfx/fwio.c14
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h623
-rw-r--r--drivers/staging/wfx/hif_api_general.h495
-rw-r--r--drivers/staging/wfx/hif_api_mib.h671
-rw-r--r--drivers/staging/wfx/hif_rx.c221
-rw-r--r--drivers/staging/wfx/hif_tx.c119
-rw-r--r--drivers/staging/wfx/hif_tx.h10
-rw-r--r--drivers/staging/wfx/hif_tx_mib.c386
-rw-r--r--drivers/staging/wfx/hif_tx_mib.h436
-rw-r--r--drivers/staging/wfx/hwio.c18
-rw-r--r--drivers/staging/wfx/key.c71
-rw-r--r--drivers/staging/wfx/key.h2
-rw-r--r--drivers/staging/wfx/main.c78
-rw-r--r--drivers/staging/wfx/main.h4
-rw-r--r--drivers/staging/wfx/queue.c533
-rw-r--r--drivers/staging/wfx/queue.h42
-rw-r--r--drivers/staging/wfx/scan.c13
-rw-r--r--drivers/staging/wfx/sta.c871
-rw-r--r--drivers/staging/wfx/sta.h38
-rw-r--r--drivers/staging/wfx/traces.h31
-rw-r--r--drivers/staging/wfx/wfx.h47
-rw-r--r--drivers/staging/wilc1000/cfg80211.c36
-rw-r--r--drivers/staging/wilc1000/cfg80211.h5
-rw-r--r--drivers/staging/wilc1000/hif.c4
-rw-r--r--drivers/staging/wilc1000/netdev.c21
-rw-r--r--drivers/staging/wilc1000/netdev.h9
-rw-r--r--drivers/target/iscsi/Kconfig2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c35
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c30
-rw-r--r--drivers/target/loopback/tcm_loop.c36
-rw-r--r--drivers/target/target_core_alua.c10
-rw-r--r--drivers/target/target_core_configfs.c82
-rw-r--r--drivers/target/target_core_device.c13
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_pscsi.c6
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/target/target_core_transport.c6
-rw-r--r--drivers/target/target_core_user.c177
-rw-r--r--drivers/tee/Kconfig2
-rw-r--r--drivers/tee/optee/call.c10
-rw-r--r--drivers/tee/tee_core.c159
-rw-r--r--drivers/tee/tee_shm.c31
-rw-r--r--drivers/thermal/imx_sc_thermal.c2
-rw-r--r--drivers/thunderbolt/Kconfig1
-rw-r--r--drivers/thunderbolt/icm.c22
-rw-r--r--drivers/thunderbolt/nhi.c5
-rw-r--r--drivers/thunderbolt/nhi.h2
-rw-r--r--drivers/thunderbolt/switch.c11
-rw-r--r--drivers/tty/hvc/hvc_console.c23
-rw-r--r--drivers/tty/hvc/hvcs.c2
-rw-r--r--drivers/tty/mxser.c7
-rw-r--r--drivers/tty/n_gsm.c39
-rw-r--r--drivers/tty/n_hdlc.c7
-rw-r--r--drivers/tty/rocket.c10
-rw-r--r--drivers/tty/serial/8250/8250_core.c18
-rw-r--r--drivers/tty/serial/8250/8250_early.c23
-rw-r--r--drivers/tty/serial/8250/8250_exar.c65
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c13
-rw-r--r--drivers/tty/serial/8250/8250_pci.c6
-rw-r--r--drivers/tty/serial/8250/8250_port.c9
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/8250/serial_cs.c6
-rw-r--r--drivers/tty/serial/Kconfig16
-rw-r--r--drivers/tty/serial/amba-pl011.c33
-rw-r--r--drivers/tty/serial/ar933x_uart.c6
-rw-r--r--drivers/tty/serial/atmel_serial.c6
-rw-r--r--drivers/tty/serial/fsl_lpuart.c27
-rw-r--r--drivers/tty/serial/imx.c13
-rw-r--r--drivers/tty/serial/kgdboc.c318
-rw-r--r--drivers/tty/serial/lantiq.c40
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c1
-rw-r--r--drivers/tty/serial/omap-serial.c52
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c39
-rw-r--r--drivers/tty/serial/samsung_tty.c84
-rw-r--r--drivers/tty/serial/sc16is7xx.c73
-rw-r--r--drivers/tty/serial/serial_core.c22
-rw-r--r--drivers/tty/serial/sh-sci.h1
-rw-r--r--drivers/tty/serial/stm32-usart.c74
-rw-r--r--drivers/tty/serial/stm32-usart.h1
-rw-r--r--drivers/tty/serial/xilinx_uartps.c12
-rw-r--r--drivers/tty/sysrq.c70
-rw-r--r--drivers/tty/vcc.c1
-rw-r--r--drivers/tty/vt/consolemap.c2
-rw-r--r--drivers/tty/vt/keyboard.c26
-rw-r--r--drivers/tty/vt/selection.c133
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c3
-rw-r--r--drivers/uio/uio_hv_generic.c1
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c3
-rw-r--r--drivers/usb/cdns3/core.c47
-rw-r--r--drivers/usb/cdns3/core.h2
-rw-r--r--drivers/usb/cdns3/drd.c4
-rw-r--r--drivers/usb/cdns3/ep0.c7
-rw-r--r--drivers/usb/cdns3/gadget.c15
-rw-r--r--drivers/usb/chipidea/Kconfig37
-rw-r--r--drivers/usb/chipidea/Makefile13
-rw-r--r--drivers/usb/chipidea/ci.h1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c13
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c30
-rw-r--r--drivers/usb/chipidea/ci_hdrc_zevio.c67
-rw-r--r--drivers/usb/chipidea/core.c48
-rw-r--r--drivers/usb/chipidea/udc.c170
-rw-r--r--drivers/usb/chipidea/udc.h6
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c334
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/usblp.c5
-rw-r--r--drivers/usb/core/devices.c2
-rw-r--r--drivers/usb/core/devio.c9
-rw-r--r--drivers/usb/core/hcd-pci.c7
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/otg_whitelist.h2
-rw-r--r--drivers/usb/core/sysfs.c6
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/dwc2/core.c23
-rw-r--r--drivers/usb/dwc2/core.h6
-rw-r--r--drivers/usb/dwc2/core_intr.c7
-rw-r--r--drivers/usb/dwc2/debug.h2
-rw-r--r--drivers/usb/dwc2/hcd.h2
-rw-r--r--drivers/usb/dwc2/hw.h3
-rw-r--r--drivers/usb/dwc2/params.c19
-rw-r--r--drivers/usb/dwc2/platform.c39
-rw-r--r--drivers/usb/dwc3/core.c62
-rw-r--r--drivers/usb/dwc3/core.h83
-rw-r--r--drivers/usb/dwc3/debug.h4
-rw-r--r--drivers/usb/dwc3/debugfs.c14
-rw-r--r--drivers/usb/dwc3/drd.c6
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c41
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c422
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c30
-rw-r--r--drivers/usb/dwc3/gadget.c469
-rw-r--r--drivers/usb/dwc3/gadget.h2
-rw-r--r--drivers/usb/dwc3/host.c2
-rw-r--r--drivers/usb/dwc3/io.h2
-rw-r--r--drivers/usb/dwc3/trace.h2
-rw-r--r--drivers/usb/early/xhci-dbc.c1
-rw-r--r--drivers/usb/early/xhci-dbc.h2
-rw-r--r--drivers/usb/gadget/composite.c78
-rw-r--r--drivers/usb/gadget/configfs.c14
-rw-r--r--drivers/usb/gadget/function/f_acm.c16
-rw-r--r--drivers/usb/gadget/function/f_eem.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c12
-rw-r--r--drivers/usb/gadget/function/f_hid.c6
-rw-r--r--drivers/usb/gadget/function/f_serial.c16
-rw-r--r--drivers/usb/gadget/function/f_tcm.c3
-rw-r--r--drivers/usb/gadget/function/f_uvc.h2
-rw-r--r--drivers/usb/gadget/function/rndis.h2
-rw-r--r--drivers/usb/gadget/function/u_audio.h2
-rw-r--r--drivers/usb/gadget/function/u_ecm.h2
-rw-r--r--drivers/usb/gadget/function/u_eem.h2
-rw-r--r--drivers/usb/gadget/function/u_ether.h2
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h2
-rw-r--r--drivers/usb/gadget/function/u_fs.h2
-rw-r--r--drivers/usb/gadget/function/u_gether.h2
-rw-r--r--drivers/usb/gadget/function/u_hid.h2
-rw-r--r--drivers/usb/gadget/function/u_midi.h2
-rw-r--r--drivers/usb/gadget/function/u_ncm.h2
-rw-r--r--drivers/usb/gadget/function/u_phonet.h2
-rw-r--r--drivers/usb/gadget/function/u_printer.h2
-rw-r--r--drivers/usb/gadget/function/u_rndis.h2
-rw-r--r--drivers/usb/gadget/function/u_serial.c57
-rw-r--r--drivers/usb/gadget/function/u_serial.h4
-rw-r--r--drivers/usb/gadget/function/u_tcm.h2
-rw-r--r--drivers/usb/gadget/function/u_uac1.h2
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.h2
-rw-r--r--drivers/usb/gadget/function/u_uac2.h2
-rw-r--r--drivers/usb/gadget/function/u_uvc.h2
-rw-r--r--drivers/usb/gadget/function/uvc.h4
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c4
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.h2
-rw-r--r--drivers/usb/gadget/function/uvc_video.c76
-rw-r--r--drivers/usb/gadget/function/uvc_video.h4
-rw-r--r--drivers/usb/gadget/legacy/inode.c6
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c14
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c16
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c236
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h12
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c112
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h12
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c27
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c1
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c11
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c2
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c2
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c2
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c4
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c140
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c1
-rw-r--r--drivers/usb/gadget/usbstring.c24
-rw-r--r--drivers/usb/host/Kconfig29
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-brcm.c280
-rw-r--r--drivers/usb/host/ehci-fsl.h2
-rw-r--r--drivers/usb/host/ehci-mv.c12
-rw-r--r--drivers/usb/host/ehci-mxc.c15
-rw-r--r--drivers/usb/host/ehci-pci.c6
-rw-r--r--drivers/usb/host/ehci-platform.c4
-rw-r--r--drivers/usb/host/ehci-tegra.c1
-rw-r--r--drivers/usb/host/ehci.h2
-rw-r--r--drivers/usb/host/fhci.h2
-rw-r--r--drivers/usb/host/imx21-hcd.h2
-rw-r--r--drivers/usb/host/ohci-pci.c9
-rw-r--r--drivers/usb/host/ohci-platform.c5
-rw-r--r--drivers/usb/host/ohci-sm501.c7
-rw-r--r--drivers/usb/host/ohci.h2
-rw-r--r--drivers/usb/host/pci-quirks.c24
-rw-r--r--drivers/usb/host/r8a66597.h2
-rw-r--r--drivers/usb/host/u132-hcd.c10
-rw-r--r--drivers/usb/host/uhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-debugfs.h2
-rw-r--r--drivers/usb/host/xhci-ext-caps.h2
-rw-r--r--drivers/usb/host/xhci-mtk.h2
-rw-r--r--drivers/usb/host/xhci-mvebu.h2
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c645
-rw-r--r--drivers/usb/host/xhci-pci.c47
-rw-r--r--drivers/usb/host/xhci-pci.h28
-rw-r--r--drivers/usb/host/xhci-plat.c20
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-rcar.h2
-rw-r--r--drivers/usb/host/xhci-trace.h2
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/isp1760/isp1760-core.h2
-rw-r--r--drivers/usb/isp1760/isp1760-regs.h2
-rw-r--r--drivers/usb/isp1760/isp1760-udc.h2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.h2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.h2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_struct.h2
-rw-r--r--drivers/usb/misc/usb_u132.h2
-rw-r--r--drivers/usb/mtu3/mtu3.h2
-rw-r--r--drivers/usb/mtu3/mtu3_debug.h2
-rw-r--r--drivers/usb/mtu3/mtu3_dr.h2
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h2
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.h2
-rw-r--r--drivers/usb/mtu3/mtu3_trace.h2
-rw-r--r--drivers/usb/musb/davinci.h2
-rw-r--r--drivers/usb/musb/jz4740.c4
-rw-r--r--drivers/usb/musb/mediatek.c6
-rw-r--r--drivers/usb/musb/musb_core.c9
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_debug.h2
-rw-r--r--drivers/usb/musb/musb_debugfs.c10
-rw-r--r--drivers/usb/musb/musb_dma.h2
-rw-r--r--drivers/usb/musb/musb_gadget.h2
-rw-r--r--drivers/usb/musb/musb_host.c10
-rw-r--r--drivers/usb/musb/musb_host.h2
-rw-r--r--drivers/usb/musb/musb_io.h2
-rw-r--r--drivers/usb/musb/musb_regs.h2
-rw-r--r--drivers/usb/musb/musb_trace.h2
-rw-r--r--drivers/usb/musb/omap2430.h2
-rw-r--r--drivers/usb/musb/tusb6010.h2
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h2
-rw-r--r--drivers/usb/phy/phy-jz4770.c12
-rw-r--r--drivers/usb/phy/phy-mv-usb.h2
-rw-r--r--drivers/usb/renesas_usbhs/common.h2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h2
-rw-r--r--drivers/usb/renesas_usbhs/mod.h2
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h2
-rw-r--r--drivers/usb/renesas_usbhs/rcar2.h2
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.h2
-rw-r--r--drivers/usb/renesas_usbhs/rza.h2
-rw-r--r--drivers/usb/roles/class.c4
-rw-r--r--drivers/usb/serial/belkin_sa.h2
-rw-r--r--drivers/usb/serial/ch341.c68
-rw-r--r--drivers/usb/serial/io_16654.h2
-rw-r--r--drivers/usb/serial/io_edgeport.h2
-rw-r--r--drivers/usb/serial/io_ionsp.h2
-rw-r--r--drivers/usb/serial/io_ti.h2
-rw-r--r--drivers/usb/serial/io_usbvend.h2
-rw-r--r--drivers/usb/serial/iuu_phoenix.h2
-rw-r--r--drivers/usb/serial/mct_u232.h2
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/oti6858.h2
-rw-r--r--drivers/usb/serial/pl2303.h2
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/usb_wwan.c4
-rw-r--r--drivers/usb/serial/visor.h2
-rw-r--r--drivers/usb/serial/whiteheat.h2
-rw-r--r--drivers/usb/storage/debug.h2
-rw-r--r--drivers/usb/storage/initializers.h2
-rw-r--r--drivers/usb/storage/protocol.h2
-rw-r--r--drivers/usb/storage/scsiglue.h2
-rw-r--r--drivers/usb/storage/sierra_ms.c4
-rw-r--r--drivers/usb/storage/transport.h2
-rw-r--r--drivers/usb/storage/unusual_alauda.h2
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/unusual_datafab.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/storage/unusual_ene_ub6250.h2
-rw-r--r--drivers/usb/storage/unusual_freecom.h2
-rw-r--r--drivers/usb/storage/unusual_isd200.h2
-rw-r--r--drivers/usb/storage/unusual_jumpshot.h2
-rw-r--r--drivers/usb/storage/unusual_karma.h2
-rw-r--r--drivers/usb/storage/unusual_onetouch.h2
-rw-r--r--drivers/usb/storage/unusual_realtek.h2
-rw-r--r--drivers/usb/storage/unusual_sddr09.h2
-rw-r--r--drivers/usb/storage/unusual_sddr55.h2
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/usb/storage/unusual_usbat.h2
-rw-r--r--drivers/usb/storage/usb.h2
-rw-r--r--drivers/usb/typec/Kconfig3
-rw-r--r--drivers/usb/typec/class.c36
-rw-r--r--drivers/usb/typec/mux/Kconfig2
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c54
-rw-r--r--drivers/usb/typec/tcpm/Kconfig2
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c32
-rw-r--r--drivers/usb/typec/tcpm/fusb302_reg.h2
-rw-r--r--drivers/usb/typec/tps6598x.c64
-rw-r--r--drivers/usb/typec/ucsi/Makefile4
-rw-r--r--drivers/usb/typec/ucsi/psy.c241
-rw-r--r--drivers/usb/typec/ucsi/trace.c10
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c41
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h26
-rw-r--r--drivers/vdpa/Kconfig2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c3
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h4
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c146
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c7
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c353
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c50
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c14
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h15
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c24
-rw-r--r--drivers/vfio/vfio.c13
-rw-r--r--drivers/vfio/vfio_iommu_type1.c623
-rw-r--r--drivers/vhost/Kconfig17
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/scsi.c3
-rw-r--r--drivers/vhost/test.c2
-rw-r--r--drivers/vhost/vdpa.c116
-rw-r--r--drivers/vhost/vhost.c111
-rw-r--r--drivers/vhost/vhost.h8
-rw-r--r--drivers/vhost/vringh.c6
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/backlight/backlight.c21
-rw-r--r--drivers/video/backlight/l4f00242t03.c45
-rw-r--r--drivers/video/backlight/lp855x_bl.c20
-rw-r--r--drivers/video/backlight/qcom-wled.c589
-rw-r--r--drivers/video/console/newport_con.c1
-rw-r--r--drivers/video/fbdev/Kconfig4
-rw-r--r--drivers/video/fbdev/acornfb.c1
-rw-r--r--drivers/video/fbdev/amifb.c4
-rw-r--r--drivers/video/fbdev/arcfb.c10
-rw-r--r--drivers/video/fbdev/atafb.c1
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c1
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c14
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c6
-rw-r--r--drivers/video/fbdev/cirrusfb.c1
-rw-r--r--drivers/video/fbdev/controlfb.c803
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/cyber2000fb.c3
-rw-r--r--drivers/video/fbdev/fb-puv3.c1
-rw-r--r--drivers/video/fbdev/hitfb.c1
-rw-r--r--drivers/video/fbdev/i810/i810_main.c10
-rw-r--r--drivers/video/fbdev/imxfb.c27
-rw-r--r--drivers/video/fbdev/matrox/g450_pll.c22
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.h2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfb_accel.c2
-rw-r--r--drivers/video/fbdev/mx3fb.c20
-rw-r--r--drivers/video/fbdev/neofb.c1
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c14
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c114
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h20
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c43
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c8
-rw-r--r--drivers/video/fbdev/pm2fb.c2
-rw-r--r--drivers/video/fbdev/pm3fb.c8
-rw-r--r--drivers/video/fbdev/ps3fb.c4
-rw-r--r--drivers/video/fbdev/pxa168fb.c5
-rw-r--r--drivers/video/fbdev/q40fb.c1
-rw-r--r--drivers/video/fbdev/riva/riva_hw.c18
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c14
-rw-r--r--drivers/video/fbdev/sa1100fb.c20
-rw-r--r--drivers/video/fbdev/sa1100fb.h3
-rw-r--r--drivers/video/fbdev/savage/savagefb.h2
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c1
-rw-r--r--drivers/video/fbdev/ssd1307fb.c102
-rw-r--r--drivers/video/fbdev/udlfb.c6
-rw-r--r--drivers/video/fbdev/uvesafb.c14
-rw-r--r--drivers/video/fbdev/valkyriefb.c4
-rw-r--r--drivers/video/fbdev/vesafb.c16
-rw-r--r--drivers/video/fbdev/via/debug.h6
-rw-r--r--drivers/video/fbdev/via/viafbdev.c2
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c1
-rw-r--r--drivers/video/fbdev/w100fb.c2
-rw-r--r--drivers/video/hdmi.c65
-rw-r--r--drivers/virtio/Kconfig17
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio_balloon.c9
-rw-r--r--drivers/virtio/virtio_mem.c1965
-rw-r--r--drivers/virtio/virtio_mmio.c4
-rw-r--r--drivers/virtio/virtio_pci_modern.c1
-rw-r--r--drivers/visorbus/controlvmchannel.h2
-rw-r--r--drivers/visorbus/vbuschannel.h2
-rw-r--r--drivers/visorbus/visorbus_private.h2
-rw-r--r--drivers/w1/masters/omap_hdq.c82
-rw-r--r--drivers/w1/slaves/w1_ds2430.c2
-rw-r--r--drivers/w1/slaves/w1_therm.c1624
-rw-r--r--drivers/watchdog/Kconfig15
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/arm_smc_wdt.c188
-rw-r--r--drivers/watchdog/da9062_wdt.c32
-rw-r--r--drivers/watchdog/da9063_wdt.c20
-rw-r--r--drivers/watchdog/iTCO_wdt.c25
-rw-r--r--drivers/watchdog/imx2_wdt.c2
-rw-r--r--drivers/watchdog/imx_sc_wdt.c5
-rw-r--r--drivers/watchdog/intel-mid_wdt.c53
-rw-r--r--drivers/watchdog/m54xx_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c1
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/xen/balloon.c1
-rw-r--r--drivers/xen/gntdev.c6
-rw-r--r--drivers/xen/grant-table.c1
-rw-r--r--drivers/xen/privcmd.c16
-rw-r--r--drivers/xen/time.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c1
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c1
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c1
5984 files changed, 521645 insertions, 98511 deletions
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 7a265c2171c0..6041974c7627 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -745,7 +745,7 @@ static const struct acpi_debugger_ops acpi_aml_debugger = {
.notify_command_complete = acpi_aml_notify_command_complete,
};
-int __init acpi_aml_init(void)
+static int __init acpi_aml_init(void)
{
int ret;
@@ -771,7 +771,7 @@ int __init acpi_aml_init(void)
return 0;
}
-void __exit acpi_aml_exit(void)
+static void __exit acpi_aml_exit(void)
{
if (acpi_aml_initialized) {
acpi_unregister_debugger(&acpi_aml_debugger);
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 953437a216f6..48e5059d67ca 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -151,10 +151,11 @@ void acpi_init_lpit(void)
struct acpi_table_lpit *lpit;
status = acpi_get_table(ACPI_SIG_LPIT, 0, (struct acpi_table_header **)&lpit);
-
if (ACPI_FAILURE(status))
return;
lpit_process((u64)lpit + sizeof(*lpit),
(u64)lpit + lpit->header.length);
+
+ acpi_put_table((struct acpi_table_header *)lpit);
}
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index dee999938213..5e2bfbcf526f 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -1041,7 +1041,7 @@ static int acpi_lpss_do_suspend_late(struct device *dev)
{
int ret;
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_suspend_late(dev);
@@ -1093,6 +1093,9 @@ static int acpi_lpss_resume_early(struct device *dev)
if (pdata->dev_desc->resume_from_noirq)
return 0;
+ if (dev_pm_skip_resume(dev))
+ return 0;
+
return acpi_lpss_do_resume_early(dev);
}
@@ -1102,12 +1105,9 @@ static int acpi_lpss_resume_noirq(struct device *dev)
int ret;
/* Follow acpi_subsys_resume_noirq(). */
- if (dev_pm_may_skip_resume(dev))
+ if (dev_pm_skip_resume(dev))
return 0;
- if (dev_pm_smart_suspend_and_suspended(dev))
- pm_runtime_set_active(dev);
-
ret = pm_generic_resume_noirq(dev);
if (ret)
return ret;
@@ -1169,7 +1169,7 @@ static int acpi_lpss_poweroff_late(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
if (pdata->dev_desc->resume_from_noirq)
@@ -1182,7 +1182,7 @@ static int acpi_lpss_poweroff_noirq(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
if (pdata->dev_desc->resume_from_noirq) {
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index 33a4bcdaa4d7..7d45cce0c3c1 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -624,7 +624,7 @@ static int acpi_tad_probe(struct platform_device *pdev)
*/
device_init_wakeup(dev, true);
dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_LEAVE_SUSPENDED);
+ DPM_FLAG_MAY_SKIP_RESUME);
/*
* The platform bus type layer tells the ACPI PM domain powers up the
* device, so set the runtime PM status of it to "active".
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 6e9ec6e3fe47..5c1e9ea43123 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -73,6 +73,7 @@ static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
}
if (acpi_watchdog_uses_rtc(wdat)) {
+ acpi_put_table((struct acpi_table_header *)wdat);
pr_info("Skipping WDAT on this system because it uses RTC SRAM\n");
return NULL;
}
@@ -117,12 +118,12 @@ void __init acpi_watchdog_init(void)
/* Watchdog disabled by BIOS */
if (!(wdat->flags & ACPI_WDAT_ENABLED))
- return;
+ goto fail_put_wdat;
/* Skip legacy PCI WDT devices */
if (wdat->pci_segment != 0xff || wdat->pci_bus != 0xff ||
wdat->pci_device != 0xff || wdat->pci_function != 0xff)
- return;
+ goto fail_put_wdat;
INIT_LIST_HEAD(&resource_list);
@@ -188,4 +189,6 @@ void __init acpi_watchdog_init(void)
fail_free_resource_list:
resource_list_free(&resource_list);
+fail_put_wdat:
+ acpi_put_table((struct acpi_table_header *)wdat);
}
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 38ffa2c0a496..1030a0ce1599 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -290,6 +290,7 @@ ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
#ifdef ACPI_DEBUGGER
ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_next_cmd_num, 1);
ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_ini_methods);
ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_region_support);
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index cd0f5df0ea23..2cbb56652f1c 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -640,10 +640,10 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_NIC", METHOD_0ARGS, /* ACPI 6.3 */
METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
- {{"_NIG", METHOD_1ARGS(ACPI_TYPE_BUFFER), /* ACPI 6.3 */
+ {{"_NIG", METHOD_0ARGS, /* ACPI 6.3 */
METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
- {{"_NIH", METHOD_0ARGS, /* ACPI 6.3 */
+ {{"_NIH", METHOD_1ARGS(ACPI_TYPE_BUFFER), /* ACPI 6.3 */
METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
{{"_NTT", METHOD_0ARGS,
diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c
index f2df416d0d2d..d41eb9e67500 100644
--- a/drivers/acpi/acpica/dbdisply.c
+++ b/drivers/acpi/acpica/dbdisply.c
@@ -51,6 +51,8 @@ static acpi_adr_space_type acpi_gbl_space_id_list[] = {
ACPI_ADR_SPACE_IPMI,
ACPI_ADR_SPACE_GPIO,
ACPI_ADR_SPACE_GSBUS,
+ ACPI_ADR_SPACE_PLATFORM_COMM,
+ ACPI_ADR_SPACE_PLATFORM_RT,
ACPI_ADR_SPACE_DATA_TABLE,
ACPI_ADR_SPACE_FIXED_HARDWARE
};
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index bb9600b867ee..f5fba14461a6 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -27,7 +27,6 @@ static HISTORY_INFO acpi_gbl_history_buffer[HISTORY_SIZE];
static u16 acpi_gbl_lo_history = 0;
static u16 acpi_gbl_num_history = 0;
static u16 acpi_gbl_next_history_index = 0;
-u32 acpi_gbl_next_cmd_num = 1;
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index c901f5aec739..fa768b3a989e 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -177,7 +177,10 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
arg->common.value.string, ACPI_TYPE_ANY,
ACPI_IMODE_LOAD_PASS1, flags,
walk_state, &node);
- if (ACPI_FAILURE(status)) {
+ if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE)
+ && status == AE_ALREADY_EXISTS) {
+ status = AE_OK;
+ } else if (ACPI_FAILURE(status)) {
ACPI_ERROR_NAMESPACE(walk_state->scope_info,
arg->common.value.string, status);
return_ACPI_STATUS(status);
@@ -514,13 +517,20 @@ acpi_ds_create_field(union acpi_parse_object *op,
info.region_node = region_node;
status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
if (info.region_node->object->region.space_id ==
- ACPI_ADR_SPACE_PLATFORM_COMM
- && !(region_node->object->field.internal_pcc_buffer =
- ACPI_ALLOCATE_ZEROED(info.region_node->object->region.
- length))) {
- return_ACPI_STATUS(AE_NO_MEMORY);
+ ACPI_ADR_SPACE_PLATFORM_COMM) {
+ region_node->object->field.internal_pcc_buffer =
+ ACPI_ALLOCATE_ZEROED(info.region_node->object->region.
+ length);
+ if (!region_node->object->field.internal_pcc_buffer) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
}
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index e85eb31e5075..3323a2ba6a31 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -22,7 +22,7 @@ ACPI_MODULE_NAME("exfield")
*/
#define ACPI_INVALID_PROTOCOL_ID 0x80
#define ACPI_MAX_PROTOCOL_ID 0x0F
-const u8 acpi_protocol_lengths[] = {
+static const u8 acpi_protocol_lengths[] = {
ACPI_INVALID_PROTOCOL_ID, /* 0 - reserved */
ACPI_INVALID_PROTOCOL_ID, /* 1 - reserved */
0x00, /* 2 - ATTRIB_QUICK */
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 177ab88d95de..ed9aedf604a1 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -78,7 +78,8 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"IPMI", /* 0x07 */
"GeneralPurposeIo", /* 0x08 */
"GenericSerialBus", /* 0x09 */
- "PCC" /* 0x0A */
+ "PCC", /* 0x0A */
+ "PlatformRtMechanism" /* 0x0B */
};
const char *acpi_ut_get_region_name(u8 space_id)
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
index 1155fb9dcc3a..19e50fcbf4d6 100644
--- a/drivers/acpi/apei/bert.c
+++ b/drivers/acpi/apei/bert.c
@@ -119,7 +119,7 @@ static int __init bert_init(void)
rc = bert_check_table(bert_tab);
if (rc) {
pr_err(FW_BUG "table invalid.\n");
- return rc;
+ goto out_put_bert_tab;
}
region_len = bert_tab->region_length;
@@ -127,7 +127,7 @@ static int __init bert_init(void)
rc = apei_resources_add(&bert_resources, bert_tab->address,
region_len, true);
if (rc)
- return rc;
+ goto out_put_bert_tab;
rc = apei_resources_request(&bert_resources, "APEI BERT");
if (rc)
goto out_fini;
@@ -142,6 +142,8 @@ static int __init bert_init(void)
apei_resources_release(&bert_resources);
out_fini:
apei_resources_fini(&bert_resources);
+out_put_bert_tab:
+ acpi_put_table((struct acpi_table_header *)bert_tab);
return rc;
}
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 086373f8ccb1..133156759551 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -692,7 +692,7 @@ static int __init einj_init(void)
rc = einj_check_table(einj_tab);
if (rc) {
pr_warn(FW_BUG "Invalid EINJ table.\n");
- return -EINVAL;
+ goto err_put_table;
}
rc = -ENOMEM;
@@ -760,6 +760,8 @@ err_release:
err_fini:
apei_resources_fini(&einj_resources);
debugfs_remove_recursive(einj_debug_dir);
+err_put_table:
+ acpi_put_table((struct acpi_table_header *)einj_tab);
return rc;
}
@@ -780,6 +782,7 @@ static void __exit einj_exit(void)
apei_resources_release(&einj_resources);
apei_resources_fini(&einj_resources);
debugfs_remove_recursive(einj_debug_dir);
+ acpi_put_table((struct acpi_table_header *)einj_tab);
}
module_init(einj_init);
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 2015a0967cbb..2e0b0fcad960 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1122,7 +1122,7 @@ static int __init erst_init(void)
rc = erst_check_table(erst_tab);
if (rc) {
pr_err(FW_BUG "ERST table is invalid.\n");
- goto err;
+ goto err_put_erst_tab;
}
apei_resources_init(&erst_resources);
@@ -1196,6 +1196,8 @@ err_release:
apei_resources_release(&erst_resources);
err_fini:
apei_resources_fini(&erst_resources);
+err_put_erst_tab:
+ acpi_put_table((struct acpi_table_header *)erst_tab);
err:
erst_disable = 1;
return rc;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 24c9642e8fc7..81bf71b10d44 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -40,6 +40,7 @@
#include <linux/sched/clock.h>
#include <linux/uuid.h>
#include <linux/ras.h>
+#include <linux/task_work.h>
#include <acpi/actbl1.h>
#include <acpi/ghes.h>
@@ -167,12 +168,6 @@ int ghes_estatus_pool_init(int num_ghes)
if (!addr)
goto err_pool_alloc;
- /*
- * New allocation must be visible in all pgd before it can be found by
- * an NMI allocating from the pool.
- */
- vmalloc_sync_mappings();
-
rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
if (rc)
goto err_pool_add;
@@ -414,23 +409,46 @@ static void ghes_clear_estatus(struct ghes *ghes,
ghes_ack_error(ghes->generic_v2);
}
-static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev)
+/*
+ * Called as task_work before returning to user-space.
+ * Ensure any queued work has been done before we return to the context that
+ * triggered the notification.
+ */
+static void ghes_kick_task_work(struct callback_head *head)
+{
+ struct acpi_hest_generic_status *estatus;
+ struct ghes_estatus_node *estatus_node;
+ u32 node_len;
+
+ estatus_node = container_of(head, struct ghes_estatus_node, task_work);
+ if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
+ memory_failure_queue_kick(estatus_node->task_work_cpu);
+
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
+ gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
+}
+
+static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
+ int sev)
{
-#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
unsigned long pfn;
int flags = -1;
int sec_sev = ghes_severity(gdata->error_severity);
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
+ if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
+ return false;
+
if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
- return;
+ return false;
pfn = mem_err->physical_addr >> PAGE_SHIFT;
if (!pfn_valid(pfn)) {
pr_warn_ratelimited(FW_WARN GHES_PFX
"Invalid address in generic error data: %#llx\n",
mem_err->physical_addr);
- return;
+ return false;
}
/* iff following two events can be handled properly by now */
@@ -440,9 +458,12 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
flags = 0;
- if (flags != -1)
+ if (flags != -1) {
memory_failure_queue(pfn, flags);
-#endif
+ return true;
+ }
+
+ return false;
}
/*
@@ -490,7 +511,7 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
#endif
}
-static void ghes_do_proc(struct ghes *ghes,
+static bool ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
int sev, sec_sev;
@@ -498,6 +519,7 @@ static void ghes_do_proc(struct ghes *ghes,
guid_t *sec_type;
const guid_t *fru_id = &guid_null;
char *fru_text = "";
+ bool queued = false;
sev = ghes_severity(estatus->error_severity);
apei_estatus_for_each_section(estatus, gdata) {
@@ -515,7 +537,7 @@ static void ghes_do_proc(struct ghes *ghes,
ghes_edac_report_mem_error(sev, mem_err);
arch_apei_report_mem_error(sev, mem_err);
- ghes_handle_memory_failure(gdata, sev);
+ queued = ghes_handle_memory_failure(gdata, sev);
}
else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
ghes_handle_aer(gdata);
@@ -532,6 +554,8 @@ static void ghes_do_proc(struct ghes *ghes,
gdata->error_data_length);
}
}
+
+ return queued;
}
static void __ghes_print_estatus(const char *pfx,
@@ -827,7 +851,9 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
+ bool task_work_pending;
u32 len, node_len;
+ int ret;
llnode = llist_del_all(&ghes_estatus_llist);
/*
@@ -842,14 +868,26 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
len = cper_estatus_len(estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
- ghes_do_proc(estatus_node->ghes, estatus);
+ task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
if (!ghes_estatus_cached(estatus)) {
generic = estatus_node->generic;
if (ghes_print_estatus(NULL, generic, estatus))
ghes_estatus_cache_add(generic, estatus);
}
- gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
- node_len);
+
+ if (task_work_pending && current->mm != &init_mm) {
+ estatus_node->task_work.func = ghes_kick_task_work;
+ estatus_node->task_work_cpu = smp_processor_id();
+ ret = task_work_add(current, &estatus_node->task_work,
+ true);
+ if (ret)
+ estatus_node->task_work.func = NULL;
+ }
+
+ if (!estatus_node->task_work.func)
+ gen_pool_free(ghes_estatus_pool,
+ (unsigned long)estatus_node, node_len);
+
llnode = next;
}
}
@@ -909,6 +947,7 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
estatus_node->ghes = ghes;
estatus_node->generic = ghes->generic;
+ estatus_node->task_work.func = NULL;
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 822402480f7d..953a2fae8b15 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -243,8 +243,8 @@ void __init acpi_hest_init(void)
} else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(HEST_PFX "Failed to get table, %s\n", msg);
- rc = -EINVAL;
- goto err;
+ hest_disable = HEST_DISABLED;
+ return;
}
rc = apei_hest_parse(hest_parse_cmc, NULL);
@@ -266,4 +266,5 @@ void __init acpi_hest_init(void)
return;
err:
hest_disable = HEST_DISABLED;
+ acpi_put_table((struct acpi_table_header *)hest_tab);
}
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 01962c63a711..f2d0e5915dab 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -394,7 +394,7 @@ static int __init gtdt_sbsa_gwdt_init(void)
*/
ret = acpi_gtdt_init(table, &timer_count);
if (ret || !timer_count)
- return ret;
+ goto out_put_gtdt;
for_each_platform_timer(platform_timer) {
if (is_non_secure_watchdog(platform_timer)) {
@@ -408,6 +408,8 @@ static int __init gtdt_sbsa_gwdt_init(void)
if (gwdt_count)
pr_info("found %d SBSA generic Watchdog(s).\n", gwdt_count);
+out_put_gtdt:
+ acpi_put_table(table);
return ret;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 7d04424189df..28a6b387e80e 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -299,61 +299,8 @@ out:
return status;
}
-struct iort_workaround_oem_info {
- char oem_id[ACPI_OEM_ID_SIZE + 1];
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
- u32 oem_revision;
-};
-
-static bool apply_id_count_workaround;
-
-static struct iort_workaround_oem_info wa_info[] __initdata = {
- {
- .oem_id = "HISI ",
- .oem_table_id = "HIP07 ",
- .oem_revision = 0,
- }, {
- .oem_id = "HISI ",
- .oem_table_id = "HIP08 ",
- .oem_revision = 0,
- }
-};
-
-static void __init
-iort_check_id_count_workaround(struct acpi_table_header *tbl)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
- if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
- !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
- wa_info[i].oem_revision == tbl->oem_revision) {
- apply_id_count_workaround = true;
- pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n");
- break;
- }
- }
-}
-
-static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map)
-{
- u32 map_max = map->input_base + map->id_count;
-
- /*
- * The IORT specification revision D (Section 3, table 4, page 9) says
- * Number of IDs = The number of IDs in the range minus one, but the
- * IORT code ignored the "minus one", and some firmware did that too,
- * so apply a workaround here to keep compatible with both the spec
- * compliant and non-spec compliant firmwares.
- */
- if (apply_id_count_workaround)
- map_max--;
-
- return map_max;
-}
-
static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
- u32 *rid_out)
+ u32 *rid_out, bool check_overlap)
{
/* Single mapping does not care for input id */
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
@@ -368,10 +315,37 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
return -ENXIO;
}
- if (rid_in < map->input_base || rid_in > iort_get_map_max(map))
+ if (rid_in < map->input_base ||
+ (rid_in > map->input_base + map->id_count))
return -ENXIO;
+ if (check_overlap) {
+ /*
+ * We already found a mapping for this input ID at the end of
+ * another region. If it coincides with the start of this
+ * region, we assume the prior match was due to the off-by-1
+ * issue mentioned below, and allow it to be superseded.
+ * Otherwise, things are *really* broken, and we just disregard
+ * duplicate matches entirely to retain compatibility.
+ */
+ pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
+ map, rid_in);
+ if (rid_in != map->input_base)
+ return -ENXIO;
+
+ pr_err(FW_BUG "applying workaround.\n");
+ }
+
*rid_out = map->output_base + (rid_in - map->input_base);
+
+ /*
+ * Due to confusion regarding the meaning of the id_count field (which
+ * carries the number of IDs *minus 1*), we may have to disregard this
+ * match if it is at the end of the range, and overlaps with the start
+ * of another one.
+ */
+ if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
+ return -EAGAIN;
return 0;
}
@@ -414,6 +388,7 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
static int iort_get_id_mapping_index(struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
+ struct acpi_iort_pmcg *pmcg;
switch (node->type) {
case ACPI_IORT_NODE_SMMU_V3:
@@ -441,6 +416,10 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node)
return smmu->id_mapping_index;
case ACPI_IORT_NODE_PMCG:
+ pmcg = (struct acpi_iort_pmcg *)node->node_data;
+ if (pmcg->overflow_gsiv || node->mapping_count == 0)
+ return -EINVAL;
+
return 0;
default:
return -EINVAL;
@@ -456,7 +435,8 @@ static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
/* Parse the ID mapping tree to find specified node type */
while (node) {
struct acpi_iort_id_mapping *map;
- int i, index;
+ int i, index, rc = 0;
+ u32 out_ref = 0, map_id = id;
if (IORT_TYPE_MASK(node->type) & type_mask) {
if (id_out)
@@ -490,15 +470,18 @@ static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
if (i == index)
continue;
- if (!iort_id_map(map, node->type, id, &id))
+ rc = iort_id_map(map, node->type, map_id, &id, out_ref);
+ if (!rc)
break;
+ if (rc == -EAGAIN)
+ out_ref = map->output_reference;
}
- if (i == node->mapping_count)
+ if (i == node->mapping_count && !out_ref)
goto fail_map;
node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
- map->output_reference);
+ rc ? out_ref : map->output_reference);
}
fail_map:
@@ -789,15 +772,6 @@ void acpi_configure_pmsi_domain(struct device *dev)
dev_set_msi_domain(dev, msi_domain);
}
-static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias,
- void *data)
-{
- u32 *rid = data;
-
- *rid = alias;
- return 0;
-}
-
#ifdef CONFIG_IOMMU_API
static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
{
@@ -1148,13 +1122,10 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
else
size = 1ULL << 32;
- if (dev_is_pci(dev)) {
- ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
- if (ret == -ENODEV)
- ret = rc_dma_get_range(dev, &size);
- } else {
- ret = nc_dma_get_range(dev, &size);
- }
+ ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
+ if (ret == -ENODEV)
+ ret = dev_is_pci(dev) ? rc_dma_get_range(dev, &size)
+ : nc_dma_get_range(dev, &size);
if (!ret) {
/*
@@ -1692,6 +1663,10 @@ void __init acpi_iort_init(void)
{
acpi_status status;
+ /* iort_table will be used at runtime after the iort init,
+ * so we don't need to call acpi_put_table() to release
+ * the IORT table mapping.
+ */
status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
@@ -1703,6 +1678,5 @@ void __init acpi_iort_init(void)
return;
}
- iort_check_id_count_workaround(iort_table);
iort_init_platform_devices();
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 78cfc70cb320..3c35e57dd854 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -24,7 +24,6 @@
#define PREFIX "ACPI: "
#define ACPI_BUTTON_CLASS "button"
-#define ACPI_BUTTON_FILE_INFO "info"
#define ACPI_BUTTON_FILE_STATE "state"
#define ACPI_BUTTON_TYPE_UNKNOWN 0x00
#define ACPI_BUTTON_NOTIFY_STATUS 0x80
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 8b2e89c20c11..7a99b19bb893 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -350,7 +350,7 @@ static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
*(u16 *)msg, ret);
}
-struct mbox_client cppc_mbox_cl = {
+static struct mbox_client cppc_mbox_cl = {
.tx_done = cppc_chan_tx_done,
.knows_txdone = true,
};
@@ -597,7 +597,7 @@ bool __weak cpc_ffh_supported(void)
*
* Return: 0 for success, errno for failure
*/
-int pcc_data_alloc(int pcc_ss_id)
+static int pcc_data_alloc(int pcc_ss_id)
{
if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
return -EINVAL;
@@ -846,6 +846,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
"acpi_cppc");
if (ret) {
per_cpu(cpc_desc_ptr, pr->id) = NULL;
+ kobject_put(&cpc_ptr->kobj);
goto out_free;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 5832bc10aca8..94d91c67aeae 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -186,7 +186,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
* possibly drop references to the power resources in use.
*/
state = ACPI_STATE_D3_HOT;
- /* If _PR3 is not available, use D3hot as the target state. */
+ /* If D3cold is not supported, use D3hot as the target state. */
if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid)
target_state = state;
} else if (!device->power.states[state].flags.valid) {
@@ -1084,7 +1084,7 @@ int acpi_subsys_suspend_late(struct device *dev)
{
int ret;
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_suspend_late(dev);
@@ -1100,10 +1100,8 @@ int acpi_subsys_suspend_noirq(struct device *dev)
{
int ret;
- if (dev_pm_smart_suspend_and_suspended(dev)) {
- dev->power.may_skip_resume = true;
+ if (dev_pm_skip_suspend(dev))
return 0;
- }
ret = pm_generic_suspend_noirq(dev);
if (ret)
@@ -1116,8 +1114,8 @@ int acpi_subsys_suspend_noirq(struct device *dev)
* acpi_subsys_complete() to take care of fixing up the device's state
* anyway, if need be.
*/
- dev->power.may_skip_resume = device_may_wakeup(dev) ||
- !device_can_wakeup(dev);
+ if (device_can_wakeup(dev) && !device_may_wakeup(dev))
+ dev->power.may_skip_resume = false;
return 0;
}
@@ -1129,17 +1127,9 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
*/
static int acpi_subsys_resume_noirq(struct device *dev)
{
- if (dev_pm_may_skip_resume(dev))
+ if (dev_pm_skip_resume(dev))
return 0;
- /*
- * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
- * during system suspend, so update their runtime PM status to "active"
- * as they will be put into D0 going forward.
- */
- if (dev_pm_smart_suspend_and_suspended(dev))
- pm_runtime_set_active(dev);
-
return pm_generic_resume_noirq(dev);
}
@@ -1153,7 +1143,12 @@ static int acpi_subsys_resume_noirq(struct device *dev)
*/
static int acpi_subsys_resume_early(struct device *dev)
{
- int ret = acpi_dev_resume(dev);
+ int ret;
+
+ if (dev_pm_skip_resume(dev))
+ return 0;
+
+ ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
@@ -1218,7 +1213,7 @@ static int acpi_subsys_poweroff_late(struct device *dev)
{
int ret;
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_poweroff_late(dev);
@@ -1234,7 +1229,7 @@ static int acpi_subsys_poweroff_late(struct device *dev)
*/
static int acpi_subsys_poweroff_noirq(struct device *dev)
{
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_skip_suspend(dev))
return 0;
return pm_generic_poweroff_noirq(dev);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index e4e8b75d39f0..5fab7e350db8 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -10,12 +10,19 @@
#include <linux/platform_device.h>
/*
- * Presentation of attributes which are defined for INT3407. They are:
+ * Presentation of attributes which are defined for INT3407 and INT3532.
+ * They are:
* PMAX : Maximum platform powe
* PSRC : Platform power source
* ARTG : Adapter rating
* CTYP : Charger type
* PBSS : Battery steady power
+ * PROP : Rest of worst case platform Power
+ * PBSS : Power Battery Steady State
+ * PBSS : Power Battery Steady State
+ * RBHF : High Frequency Impedance
+ * VBNL : Instantaneous No-Load Voltage
+ * CMPP : Current Discharge Capability
*/
#define DPTF_POWER_SHOW(name, object) \
static ssize_t name##_show(struct device *dev,\
@@ -39,12 +46,42 @@ DPTF_POWER_SHOW(platform_power_source, PSRC)
DPTF_POWER_SHOW(adapter_rating_mw, ARTG)
DPTF_POWER_SHOW(battery_steady_power_mw, PBSS)
DPTF_POWER_SHOW(charger_type, CTYP)
+DPTF_POWER_SHOW(rest_of_platform_power_mw, PROP)
+DPTF_POWER_SHOW(max_steady_state_power_mw, PBSS)
+DPTF_POWER_SHOW(high_freq_impedance_mohm, RBHF)
+DPTF_POWER_SHOW(no_load_voltage_mv, VBNL)
+DPTF_POWER_SHOW(current_discharge_capbility_ma, CMPP);
static DEVICE_ATTR_RO(max_platform_power_mw);
static DEVICE_ATTR_RO(platform_power_source);
static DEVICE_ATTR_RO(adapter_rating_mw);
static DEVICE_ATTR_RO(battery_steady_power_mw);
static DEVICE_ATTR_RO(charger_type);
+static DEVICE_ATTR_RO(rest_of_platform_power_mw);
+static DEVICE_ATTR_RO(max_steady_state_power_mw);
+static DEVICE_ATTR_RO(high_freq_impedance_mohm);
+static DEVICE_ATTR_RO(no_load_voltage_mv);
+static DEVICE_ATTR_RO(current_discharge_capbility_ma);
+
+static ssize_t prochot_confirm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi_dev = dev_get_drvdata(dev);
+ acpi_status status;
+ int seq_no;
+
+ if (kstrtouint(buf, 0, &seq_no) < 0)
+ return -EINVAL;
+
+ status = acpi_execute_simple_method(acpi_dev->handle, "PBOK", seq_no);
+ if (ACPI_SUCCESS(status))
+ return count;
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR_WO(prochot_confirm);
static struct attribute *dptf_power_attrs[] = {
&dev_attr_max_platform_power_mw.attr,
@@ -52,6 +89,8 @@ static struct attribute *dptf_power_attrs[] = {
&dev_attr_adapter_rating_mw.attr,
&dev_attr_battery_steady_power_mw.attr,
&dev_attr_charger_type.attr,
+ &dev_attr_rest_of_platform_power_mw.attr,
+ &dev_attr_prochot_confirm.attr,
NULL
};
@@ -60,10 +99,79 @@ static const struct attribute_group dptf_power_attribute_group = {
.name = "dptf_power"
};
+static struct attribute *dptf_battery_attrs[] = {
+ &dev_attr_max_platform_power_mw.attr,
+ &dev_attr_max_steady_state_power_mw.attr,
+ &dev_attr_high_freq_impedance_mohm.attr,
+ &dev_attr_no_load_voltage_mv.attr,
+ &dev_attr_current_discharge_capbility_ma.attr,
+ NULL
+};
+
+static const struct attribute_group dptf_battery_attribute_group = {
+ .attrs = dptf_battery_attrs,
+ .name = "dptf_battery"
+};
+
+#define MAX_POWER_CHANGED 0x80
+#define POWER_STATE_CHANGED 0x81
+#define STEADY_STATE_POWER_CHANGED 0x83
+#define POWER_PROP_CHANGE_EVENT 0x84
+#define IMPEDANCED_CHNGED 0x85
+#define VOLTAGE_CURRENT_CHANGED 0x86
+
+static long long dptf_participant_type(acpi_handle handle)
+{
+ unsigned long long ptype;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, "PTYP", NULL, &ptype);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return ptype;
+}
+
+static void dptf_power_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct platform_device *pdev = data;
+ char *attr;
+
+ switch (event) {
+ case POWER_STATE_CHANGED:
+ attr = "platform_power_source";
+ break;
+ case POWER_PROP_CHANGE_EVENT:
+ attr = "rest_of_platform_power_mw";
+ break;
+ case MAX_POWER_CHANGED:
+ attr = "max_platform_power_mw";
+ break;
+ case STEADY_STATE_POWER_CHANGED:
+ attr = "max_steady_state_power_mw";
+ break;
+ case VOLTAGE_CURRENT_CHANGED:
+ attr = "no_load_voltage_mv";
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported event [0x%x]\n", event);
+ return;
+ }
+
+ /*
+ * Notify that an attribute is changed, so that user space can read
+ * again.
+ */
+ if (dptf_participant_type(handle) == 0x0CULL)
+ sysfs_notify(&pdev->dev.kobj, "dptf_battery", attr);
+ else
+ sysfs_notify(&pdev->dev.kobj, "dptf_power", attr);
+}
+
static int dptf_power_add(struct platform_device *pdev)
{
+ const struct attribute_group *attr_group;
struct acpi_device *acpi_dev;
- acpi_status status;
unsigned long long ptype;
int result;
@@ -71,17 +179,29 @@ static int dptf_power_add(struct platform_device *pdev)
if (!acpi_dev)
return -ENODEV;
- status = acpi_evaluate_integer(acpi_dev->handle, "PTYP", NULL, &ptype);
- if (ACPI_FAILURE(status))
+ ptype = dptf_participant_type(acpi_dev->handle);
+ if (ptype == 0x11)
+ attr_group = &dptf_power_attribute_group;
+ else if (ptype == 0x0C)
+ attr_group = &dptf_battery_attribute_group;
+ else
return -ENODEV;
- if (ptype != 0x11)
- return -ENODEV;
+ result = acpi_install_notify_handler(acpi_dev->handle,
+ ACPI_DEVICE_NOTIFY,
+ dptf_power_notify,
+ (void *)pdev);
+ if (result)
+ return result;
result = sysfs_create_group(&pdev->dev.kobj,
- &dptf_power_attribute_group);
- if (result)
+ attr_group);
+ if (result) {
+ acpi_remove_notify_handler(acpi_dev->handle,
+ ACPI_DEVICE_NOTIFY,
+ dptf_power_notify);
return result;
+ }
platform_set_drvdata(pdev, acpi_dev);
@@ -90,14 +210,23 @@ static int dptf_power_add(struct platform_device *pdev)
static int dptf_power_remove(struct platform_device *pdev)
{
+ struct acpi_device *acpi_dev = platform_get_drvdata(pdev);
+
+ acpi_remove_notify_handler(acpi_dev->handle,
+ ACPI_DEVICE_NOTIFY,
+ dptf_power_notify);
- sysfs_remove_group(&pdev->dev.kobj, &dptf_power_attribute_group);
+ if (dptf_participant_type(acpi_dev->handle) == 0x0CULL)
+ sysfs_remove_group(&pdev->dev.kobj, &dptf_battery_attribute_group);
+ else
+ sysfs_remove_group(&pdev->dev.kobj, &dptf_power_attribute_group);
return 0;
}
static const struct acpi_device_id int3407_device_ids[] = {
{"INT3407", 0},
+ {"INT3532", 0},
{"INTC1047", 0},
{"", 0},
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 1af2125e17d5..04ce2b96c3da 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -34,7 +34,6 @@
#define ACPI_EC_CLASS "embedded_controller"
#define ACPI_EC_DEVICE_NAME "Embedded Controller"
-#define ACPI_EC_FILE_INFO "info"
/* EC status register */
#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
@@ -1783,13 +1782,14 @@ static void __init acpi_ec_ecdt_start(void)
return;
status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
- if (ACPI_FAILURE(status))
- return;
+ if (ACPI_SUCCESS(status)) {
+ boot_ec->handle = handle;
- boot_ec->handle = handle;
+ /* Add a special ACPI device object to represent the boot EC. */
+ acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
+ }
- /* Add a special ACPI device object to represent the boot EC. */
- acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
+ acpi_put_table((struct acpi_table_header *)ecdt_ptr);
}
/*
@@ -1891,12 +1891,12 @@ void __init acpi_ec_ecdt_probe(void)
* Asus X50GL:
* https://bugzilla.kernel.org/show_bug.cgi?id=11880
*/
- return;
+ goto out;
}
ec = acpi_ec_alloc();
if (!ec)
- return;
+ goto out;
if (EC_FLAGS_CORRECT_ECDT) {
ec->command_addr = ecdt_ptr->data.address;
@@ -1922,13 +1922,16 @@ void __init acpi_ec_ecdt_probe(void)
ret = acpi_ec_setup(ec, NULL);
if (ret) {
acpi_ec_free(ec);
- return;
+ goto out;
}
boot_ec = ec;
boot_ec_is_ecdt = true;
pr_info("Boot ECDT EC used to handle transactions\n");
+
+out:
+ acpi_put_table((struct acpi_table_header *)ecdt_ptr);
}
#ifdef CONFIG_PM_SLEEP
@@ -2017,7 +2020,7 @@ bool acpi_ec_dispatch_gpe(void)
*/
ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
if (ret == ACPI_INTERRUPT_HANDLED) {
- pm_pr_dbg("EC GPE dispatched\n");
+ pm_pr_dbg("ACPI EC GPE dispatched\n");
/* Flush the event and query workqueues. */
acpi_ec_flush_work();
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index aba0d0027586..ccd900690b6f 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -79,6 +79,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
struct resource r;
struct acpi_resource_irq *p = &ares->data.irq;
struct acpi_resource_extended_irq *pext = &ares->data.extended_irq;
+ char ev_name[5];
+ u8 trigger;
if (ares->type == ACPI_RESOURCE_TYPE_END_TAG)
return AE_OK;
@@ -87,14 +89,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
dev_err(dev, "unable to parse IRQ resource\n");
return AE_ERROR;
}
- if (ares->type == ACPI_RESOURCE_TYPE_IRQ)
+ if (ares->type == ACPI_RESOURCE_TYPE_IRQ) {
gsi = p->interrupts[0];
- else
+ trigger = p->triggering;
+ } else {
gsi = pext->interrupts[0];
+ trigger = pext->triggering;
+ }
irq = r.start;
- if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) {
+ switch (gsi) {
+ case 0 ... 255:
+ sprintf(ev_name, "_%c%02hhX",
+ trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
+
+ if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
+ break;
+ /* fall through */
+ default:
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
+ break;
+
dev_err(dev, "cannot locate _EVT method\n");
return AE_ERROR;
}
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 47b4969d9b93..5be5a977da1b 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -35,6 +35,7 @@ int pxm_to_node(int pxm)
return NUMA_NO_NODE;
return pxm_to_node_map[pxm];
}
+EXPORT_SYMBOL(pxm_to_node);
int node_to_pxm(int node)
{
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index ed3d2182cf2c..606da5d77ad3 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -31,8 +31,6 @@
ACPI_MODULE_NAME("pci_link");
#define ACPI_PCI_LINK_CLASS "pci_irq_routing"
#define ACPI_PCI_LINK_DEVICE_NAME "PCI Interrupt Link"
-#define ACPI_PCI_LINK_FILE_INFO "info"
-#define ACPI_PCI_LINK_FILE_STATUS "state"
#define ACPI_PCI_LINK_MAX_POSSIBLE 16
static int acpi_pci_link_add(struct acpi_device *device,
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 6b347d9920cc..54b36b7ad47d 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -29,7 +29,7 @@ struct mcfg_fixup {
u32 oem_revision;
u16 segment;
struct resource bus_range;
- struct pci_ecam_ops *ops;
+ const struct pci_ecam_ops *ops;
struct resource cfgres;
};
@@ -165,7 +165,7 @@ static int pci_mcfg_quirk_matches(struct mcfg_fixup *f, u16 segment,
static void pci_mcfg_apply_quirks(struct acpi_pci_root *root,
struct resource *cfgres,
- struct pci_ecam_ops **ecam_ops)
+ const struct pci_ecam_ops **ecam_ops)
{
#ifdef CONFIG_PCI_QUIRKS
u16 segment = root->segment;
@@ -191,9 +191,9 @@ static void pci_mcfg_apply_quirks(struct acpi_pci_root *root,
static LIST_HEAD(pci_mcfg_list);
int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
- struct pci_ecam_ops **ecam_ops)
+ const struct pci_ecam_ops **ecam_ops)
{
- struct pci_ecam_ops *ops = &pci_generic_ecam_ops;
+ const struct pci_ecam_ops *ops = &pci_generic_ecam_ops;
struct resource *bus_res = &root->secondary;
u16 seg = root->segment;
struct mcfg_entry *e;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ac8ad6cb82aa..f90e841c59f5 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -483,13 +483,8 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
if (IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
control |= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
- if (pci_aer_available()) {
- if (aer_acpi_firmware_first())
- dev_info(&device->dev,
- "PCIe AER handled by firmware\n");
- else
- control |= OSC_PCI_EXPRESS_AER_CONTROL;
- }
+ if (pci_aer_available())
+ control |= OSC_PCI_EXPRESS_AER_CONTROL;
/*
* Per the Downstream Port Containment Related Enhancements ECN to
@@ -938,7 +933,7 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
* assignments made by firmware for this host bridge.
*/
obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 1,
- IGNORE_PCI_BOOT_CONFIG_DSM, NULL);
+ DSM_PCI_PRESERVE_BOOT_CONFIG, NULL);
if (obj && obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 0)
host_bridge->preserve_config = 1;
ACPI_FREE(obj);
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index 7ccd7d9660bc..a5101b07611a 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -102,6 +102,7 @@ static struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = {
.power_table_count = ARRAY_SIZE(chtdc_ti_power_table),
.thermal_table = chtdc_ti_thermal_table,
.thermal_table_count = ARRAY_SIZE(chtdc_ti_thermal_table),
+ .pmic_i2c_address = 0x5e,
};
static int chtdc_ti_pmic_opregion_probe(struct platform_device *pdev)
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index fe1e7bc91a5e..837b875d075e 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -36,8 +36,6 @@
ACPI_MODULE_NAME("power");
#define ACPI_POWER_CLASS "power_resource"
#define ACPI_POWER_DEVICE_NAME "Power Resource"
-#define ACPI_POWER_FILE_INFO "info"
-#define ACPI_POWER_FILE_STATUS "state"
#define ACPI_POWER_RESOURCE_STATE_OFF 0x00
#define ACPI_POWER_RESOURCE_STATE_ON 0x01
#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index dcc289e30166..75534c5b5433 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -308,11 +308,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
if (ret)
return ret;
- /*
- * It is expected that there will be at least 2 states, C1 and
- * something else (C2 or C3), so fail if that is not the case.
- */
- if (pr->power.count < 2)
+ if (!pr->power.count)
return -EFAULT;
pr->flags.has_cst = 1;
@@ -468,8 +464,7 @@ static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
if (pr->power.states[i].valid) {
pr->power.count = i;
- if (pr->power.states[i].type >= ACPI_STATE_C2)
- pr->flags.power = 1;
+ pr->flags.power = 1;
}
}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 6e88224f60f0..f158b8c30113 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -28,9 +28,6 @@
#define ACPI_SBS_CLASS "sbs"
#define ACPI_AC_CLASS "ac_adapter"
#define ACPI_SBS_DEVICE_NAME "Smart Battery System"
-#define ACPI_SBS_FILE_INFO "info"
-#define ACPI_SBS_FILE_STATE "state"
-#define ACPI_SBS_FILE_ALARM "alarm"
#define ACPI_BATTERY_DIR_NAME "BAT%i"
#define ACPI_AC_DIR_NAME "AC0"
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 6d3448895382..8777faced51a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -15,8 +15,7 @@
#include <linux/nls.h>
#include <linux/dma-mapping.h>
#include <linux/platform_data/x86/apple.h>
-
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "internal.h"
@@ -919,12 +918,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
if (buffer.length && package
&& package->type == ACPI_TYPE_PACKAGE
- && package->package.count) {
- int err = acpi_extract_power_resources(package, 0,
- &ps->resources);
- if (!err)
- device->power.flags.power_resources = 1;
- }
+ && package->package.count)
+ acpi_extract_power_resources(package, 0, &ps->resources);
+
ACPI_FREE(buffer.pointer);
}
@@ -971,14 +967,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
acpi_bus_init_power_state(device, i);
INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
- if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
- device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
- /* Set defaults for D0 and D3hot states (always valid) */
+ /* Set the defaults for D0 and D3hot (always supported). */
device->power.states[ACPI_STATE_D0].flags.valid = 1;
device->power.states[ACPI_STATE_D0].power = 100;
device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1;
+ /*
+ * Use power resources only if the D0 list of them is populated, because
+ * some platforms may provide _PR3 only to indicate D3cold support and
+ * in those cases the power resources list returned by it may be bogus.
+ */
+ if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) {
+ device->power.flags.power_resources = 1;
+ /*
+ * D3cold is supported if the D3hot list of power resources is
+ * not empty.
+ */
+ if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
+ device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
+ }
+
if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
}
@@ -2157,10 +2166,13 @@ static void __init acpi_get_spcr_uart_addr(void)
status = acpi_get_table(ACPI_SIG_SPCR, 0,
(struct acpi_table_header **)&spcr_ptr);
- if (ACPI_SUCCESS(status))
- spcr_uart_addr = spcr_ptr->serial_port.address;
- else
- printk(KERN_WARNING PREFIX "STAO table present, but SPCR is missing\n");
+ if (ACPI_FAILURE(status)) {
+ pr_warn(PREFIX "STAO table present, but SPCR is missing\n");
+ return;
+ }
+
+ spcr_uart_addr = spcr_ptr->serial_port.address;
+ acpi_put_table((struct acpi_table_header *)spcr_ptr);
}
static bool acpi_scan_initialized;
@@ -2196,10 +2208,12 @@ int __init acpi_scan_init(void)
(struct acpi_table_header **)&stao_ptr);
if (ACPI_SUCCESS(status)) {
if (stao_ptr->header.length > sizeof(struct acpi_table_stao))
- printk(KERN_INFO PREFIX "STAO Name List not yet supported.");
+ pr_info(PREFIX "STAO Name List not yet supported.\n");
if (stao_ptr->ignore_uart)
acpi_get_spcr_uart_addr();
+
+ acpi_put_table((struct acpi_table_header *)stao_ptr);
}
acpi_gpe_apply_masked_gpes();
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index fd9d4e8318e9..aff13bf4d947 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -992,23 +992,31 @@ static bool acpi_s2idle_wake(void)
* wakeup is pending anyway and the SCI is not the source of
* it).
*/
- if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
+ if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
+ pm_pr_dbg("Wakeup unrelated to ACPI SCI\n");
return true;
+ }
/*
* If the status bit of any enabled fixed event is set, the
* wakeup is regarded as valid.
*/
- if (acpi_any_fixed_event_status_set())
+ if (acpi_any_fixed_event_status_set()) {
+ pm_pr_dbg("ACPI fixed event wakeup\n");
return true;
+ }
/* Check wakeups from drivers sharing the SCI. */
- if (acpi_check_wakeup_handlers())
+ if (acpi_check_wakeup_handlers()) {
+ pm_pr_dbg("ACPI custom handler wakeup\n");
return true;
+ }
/* Check non-EC GPE wakeups and dispatch the EC GPE. */
- if (acpi_ec_dispatch_gpe())
+ if (acpi_ec_dispatch_gpe()) {
+ pm_pr_dbg("ACPI non-EC GPE wakeup\n");
return true;
+ }
/*
* Cancel the SCI wakeup and process all pending events in case
@@ -1027,8 +1035,10 @@ static bool acpi_s2idle_wake(void)
* are pending here, they must be resulting from the processing
* of EC events above or coming from somewhere else.
*/
- if (pm_wakeup_pending())
+ if (pm_wakeup_pending()) {
+ pm_pr_dbg("Wakeup after ACPI Notify sync\n");
return true;
+ }
rearm_wake_irq(acpi_sci_irq);
}
@@ -1280,8 +1290,10 @@ static void acpi_sleep_hibernate_setup(void)
return;
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
- if (facs)
+ if (facs) {
s4_hardware_signature = facs->hardware_signature;
+ acpi_put_table((struct acpi_table_header *)facs);
+ }
}
#else /* !CONFIG_HIBERNATION */
static inline void acpi_sleep_hibernate_setup(void) {}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index c60d2c6d31d6..3a89909b50a6 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -993,8 +993,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
error = kobject_init_and_add(&hotplug->kobj,
&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
- if (error)
+ if (error) {
+ kobject_put(&hotplug->kobj);
goto err_out;
+ }
kobject_uevent(&hotplug->kobj, KOBJ_ADD);
return;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 804ac0df58ec..838b719ec7ce 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -606,6 +606,31 @@ acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
}
/**
+ * acpi_evaluate_reg: Evaluate _REG method to register OpRegion presence
+ * @handle: ACPI device handle
+ * @space_id: ACPI address space id to register OpRegion presence for
+ * @function: Parameter to pass to _REG one of ACPI_REG_CONNECT or
+ * ACPI_REG_DISCONNECT
+ *
+ * Evaluate device's _REG method to register OpRegion presence.
+ */
+acpi_status acpi_evaluate_reg(acpi_handle handle, u8 space_id, u32 function)
+{
+ struct acpi_object_list arg_list;
+ union acpi_object params[2];
+
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = space_id;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = function;
+ arg_list.count = 2;
+ arg_list.pointer = params;
+
+ return acpi_evaluate_object(handle, "_REG", &arg_list, NULL);
+}
+EXPORT_SYMBOL(acpi_evaluate_reg);
+
+/**
* acpi_evaluate_dsm - evaluate device's _DSM method
* @handle: ACPI device handle
* @guid: GUID of requested functions, should be 16 bytes
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index b4994e50608d..2499d7e3c710 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -361,6 +361,16 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "JV50"),
},
},
+ {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */
+ .callback = video_detect_force_native,
+ .ident = "Acer TravelMate 5735Z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5735Z"),
+ DMI_MATCH(DMI_BOARD_NAME, "BA51_MV"),
+ },
+ },
/*
* Desktops which falsely report a backlight and which our heuristics
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 8558b629880b..ecc304149067 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -505,7 +505,7 @@ static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func);
#define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000))
-static void amba_deferred_retry_func(struct work_struct *dummy)
+static int amba_deferred_retry(void)
{
struct deferred_device *ddev, *tmp;
@@ -521,11 +521,19 @@ static void amba_deferred_retry_func(struct work_struct *dummy)
kfree(ddev);
}
+ mutex_unlock(&deferred_devices_lock);
+
+ return 0;
+}
+late_initcall(amba_deferred_retry);
+
+static void amba_deferred_retry_func(struct work_struct *dummy)
+{
+ amba_deferred_retry();
+
if (!list_empty(&deferred_devices))
schedule_delayed_work(&deferred_retry_work,
DEFERRED_DEVICE_TIMEOUT);
-
- mutex_unlock(&deferred_devices_lock);
}
/**
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2d8b9b91dee0..42c672f1584e 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -212,7 +212,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->vma_vm_mm;
if (mm) {
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = alloc->vma;
}
@@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_alloc_page_end(alloc, index);
}
if (mm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
return 0;
@@ -303,7 +303,7 @@ err_page_ptr_cleared:
}
err_no_vma:
if (mm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
@@ -932,8 +932,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mm = alloc->vma_vm_mm;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!down_read_trylock(&mm->mmap_sem))
- goto err_down_read_mmap_sem_failed;
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
vma = binder_alloc_get_vma(alloc);
list_lru_isolate(lru, item);
@@ -946,7 +946,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
trace_binder_unmap_kernel_start(alloc, index);
@@ -960,7 +960,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY;
-err_down_read_mmap_sem_failed:
+err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
err_page_already_freed:
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 9ecad74183a3..7cf566aafe1f 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -650,7 +650,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
struct binderfs_info *info;
struct binderfs_mount_opts *ctx = fc->fs_private;
struct inode *inode = NULL;
- struct binderfs_device device_info = { 0 };
+ struct binderfs_device device_info = {};
const char *name;
size_t len;
@@ -747,7 +747,7 @@ static const struct fs_context_operations binderfs_fs_context_ops = {
static int binderfs_init_fs_context(struct fs_context *fc)
{
- struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct binderfs_mount_opts *ctx;
ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
if (!ctx)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index beca5f91bb4c..69361ec43db5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5209,7 +5209,7 @@ void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
* sata_link_init_spd - Initialize link->sata_spd_limit
* @link: Link to configure sata_spd_limit for
*
- * Initialize @link->[hw_]sata_spd_limit to the currently
+ * Initialize ``link->[hw_]sata_spd_limit`` to the currently
* configured value.
*
* LOCKING:
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 36e588d88b95..435781a16875 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -649,7 +649,7 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
- qc->extrabytes = scmd->request->extra_len;
+ qc->extrabytes = scmd->extra_len;
qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes;
}
@@ -1017,16 +1017,11 @@ void ata_scsi_sdev_config(struct scsi_device *sdev)
* RETURNS:
* 1 if ; otherwise, 0.
*/
-static int atapi_drain_needed(struct request *rq)
+bool ata_scsi_dma_need_drain(struct request *rq)
{
- if (likely(!blk_rq_is_passthrough(rq)))
- return 0;
-
- if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
- return 0;
-
return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
}
+EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
{
@@ -1039,21 +1034,21 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
blk_queue_max_hw_sectors(q, dev->max_sectors);
if (dev->class == ATA_DEV_ATAPI) {
- void *buf;
-
sdev->sector_size = ATA_SECT_SIZE;
/* set DMA padding */
blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
- /* configure draining */
- buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
- if (!buf) {
+ /* make room for appending the drain */
+ blk_queue_max_segments(q, queue_max_segments(q) - 1);
+
+ sdev->dma_drain_len = ATAPI_MAX_DRAIN;
+ sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len,
+ q->bounce_gfp | GFP_KERNEL);
+ if (!sdev->dma_drain_buf) {
ata_dev_err(dev, "drain buffer allocation failed\n");
return -ENOMEM;
}
-
- blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
} else {
sdev->sector_size = ata_id_logical_sector_size(dev->id);
sdev->manage_start_stop = 1;
@@ -1135,7 +1130,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
void ata_scsi_slave_destroy(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
- struct request_queue *q = sdev->request_queue;
unsigned long flags;
struct ata_device *dev;
@@ -1152,9 +1146,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
}
spin_unlock_irqrestore(ap->lock, flags);
- kfree(q->dma_drain_buffer);
- q->dma_drain_buffer = NULL;
- q->dma_drain_size = 0;
+ kfree(sdev->dma_drain_buf);
}
EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 8c37294f1d1e..cfb0d16b60ad 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -306,7 +306,7 @@ config ATM_IA
for more info about the cards. Say Y (or M to compile as a module
named iphase) here if you have one of these cards.
- See the file <file:Documentation/networking/iphase.txt> for further
+ See the file <file:Documentation/networking/iphase.rst> for further
details.
config ATM_IA_DEBUG
@@ -336,7 +336,7 @@ config ATM_FORE200E
on PCI and SBUS hosts. Say Y (or M to compile as a module
named fore_200e) here if you have one of these ATM adapters.
- See the file <file:Documentation/networking/fore200e.txt> for
+ See the file <file:Documentation/networking/fore200e.rst> for
further details.
config ATM_FORE200E_USE_TASKLET
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 8fbd36eb8941..f4ad7ce25ae8 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -25,6 +25,7 @@
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/firmware.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/string.h>
#include <asm/page.h>
@@ -40,7 +41,6 @@
#include <asm/idprom.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
-#include <asm/pgtable.h>
#endif
#if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 5f0bc74d2409..8d7001712062 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -149,8 +149,9 @@ config DEBUG_TEST_DRIVER_REMOVE
test this functionality.
config PM_QOS_KUNIT_TEST
- bool "KUnit Test for PM QoS features"
+ bool "KUnit Test for PM QoS features" if !KUNIT_ALL_TESTS
depends on KUNIT=y
+ default KUNIT_ALL_TESTS
config HMEM_REPORTING
bool
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 40fb069a8a7e..95c22c0f9036 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -153,6 +153,7 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
extern void device_block_probing(void);
extern void device_unblock_probing(void);
+extern void driver_deferred_probe_force_trigger(void);
/* /sys/devices directory */
extern struct kset *devices_kset;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 0cad34f1eede..67d39a90b45c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -49,6 +49,9 @@ static LIST_HEAD(wait_for_suppliers);
static DEFINE_MUTEX(wfs_lock);
static LIST_HEAD(deferred_sync);
static unsigned int defer_sync_state_count = 1;
+static unsigned int defer_fw_devlink_count;
+static DEFINE_MUTEX(defer_fw_devlink_lock);
+static bool fw_devlink_is_permissive(void);
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
@@ -529,7 +532,7 @@ static void device_link_add_missing_supplier_links(void)
int ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
if (!ret)
list_del_init(&dev->links.needs_suppliers);
- else if (ret != -ENODEV)
+ else if (ret != -ENODEV || fw_devlink_is_permissive())
dev->links.need_for_probe = false;
}
mutex_unlock(&wfs_lock);
@@ -643,9 +646,17 @@ static void device_links_missing_supplier(struct device *dev)
{
struct device_link *link;
- list_for_each_entry(link, &dev->links.suppliers, c_node)
- if (link->status == DL_STATE_CONSUMER_PROBE)
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->status != DL_STATE_CONSUMER_PROBE)
+ continue;
+
+ if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ } else {
+ WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+ }
}
/**
@@ -684,11 +695,11 @@ int device_links_check_suppliers(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED) ||
- link->flags & DL_FLAG_SYNC_STATE_ONLY)
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
- if (link->status != DL_STATE_AVAILABLE) {
+ if (link->status != DL_STATE_AVAILABLE &&
+ !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
device_links_missing_supplier(dev);
ret = -EPROBE_DEFER;
break;
@@ -949,11 +960,21 @@ static void __device_links_no_driver(struct device *dev)
if (!(link->flags & DL_FLAG_MANAGED))
continue;
- if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
+ if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
device_link_drop_managed(link);
- else if (link->status == DL_STATE_CONSUMER_PROBE ||
- link->status == DL_STATE_ACTIVE)
+ continue;
+ }
+
+ if (link->status != DL_STATE_CONSUMER_PROBE &&
+ link->status != DL_STATE_ACTIVE)
+ continue;
+
+ if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ } else {
+ WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
}
dev->links.status = DL_DEV_NO_DRIVER;
@@ -1162,6 +1183,150 @@ static void device_links_purge(struct device *dev)
device_links_write_unlock();
}
+static u32 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
+static int __init fw_devlink_setup(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ if (strcmp(arg, "off") == 0) {
+ fw_devlink_flags = 0;
+ } else if (strcmp(arg, "permissive") == 0) {
+ fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
+ } else if (strcmp(arg, "on") == 0) {
+ fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
+ } else if (strcmp(arg, "rpm") == 0) {
+ fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER |
+ DL_FLAG_PM_RUNTIME;
+ }
+ return 0;
+}
+early_param("fw_devlink", fw_devlink_setup);
+
+u32 fw_devlink_get_flags(void)
+{
+ return fw_devlink_flags;
+}
+
+static bool fw_devlink_is_permissive(void)
+{
+ return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
+}
+
+static void fw_devlink_link_device(struct device *dev)
+{
+ int fw_ret;
+
+ if (!fw_devlink_flags)
+ return;
+
+ mutex_lock(&defer_fw_devlink_lock);
+ if (!defer_fw_devlink_count)
+ device_link_add_missing_supplier_links();
+
+ /*
+ * The device's fwnode not having add_links() doesn't affect if other
+ * consumers can find this device as a supplier. So, this check is
+ * intentionally placed after device_link_add_missing_supplier_links().
+ */
+ if (!fwnode_has_op(dev->fwnode, add_links))
+ goto out;
+
+ /*
+ * If fw_devlink is being deferred, assume all devices have mandatory
+ * suppliers they need to link to later. Then, when the fw_devlink is
+ * resumed, all these devices will get a chance to try and link to any
+ * suppliers they have.
+ */
+ if (!defer_fw_devlink_count) {
+ fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
+ if (fw_ret == -ENODEV && fw_devlink_is_permissive())
+ fw_ret = -EAGAIN;
+ } else {
+ fw_ret = -ENODEV;
+ }
+
+ if (fw_ret == -ENODEV)
+ device_link_wait_for_mandatory_supplier(dev);
+ else if (fw_ret)
+ device_link_wait_for_optional_supplier(dev);
+
+out:
+ mutex_unlock(&defer_fw_devlink_lock);
+}
+
+/**
+ * fw_devlink_pause - Pause parsing of fwnode to create device links
+ *
+ * Calling this function defers any fwnode parsing to create device links until
+ * fw_devlink_resume() is called. Both these functions are ref counted and the
+ * caller needs to match the calls.
+ *
+ * While fw_devlink is paused:
+ * - Any device that is added won't have its fwnode parsed to create device
+ * links.
+ * - The probe of the device will also be deferred during this period.
+ * - Any devices that were already added, but waiting for suppliers won't be
+ * able to link to newly added devices.
+ *
+ * Once fw_devlink_resume():
+ * - All the fwnodes that was not parsed will be parsed.
+ * - All the devices that were deferred probing will be reattempted if they
+ * aren't waiting for any more suppliers.
+ *
+ * This pair of functions, is mainly meant to optimize the parsing of fwnodes
+ * when a lot of devices that need to link to each other are added in a short
+ * interval of time. For example, adding all the top level devices in a system.
+ *
+ * For example, if N devices are added and:
+ * - All the consumers are added before their suppliers
+ * - All the suppliers of the N devices are part of the N devices
+ *
+ * Then:
+ *
+ * - With the use of fw_devlink_pause() and fw_devlink_resume(), each device
+ * will only need one parsing of its fwnode because it is guaranteed to find
+ * all the supplier devices already registered and ready to link to. It won't
+ * have to do another pass later to find one or more suppliers it couldn't
+ * find in the first parse of the fwnode. So, we'll only need O(N) fwnode
+ * parses.
+ *
+ * - Without the use of fw_devlink_pause() and fw_devlink_resume(), we would
+ * end up doing O(N^2) parses of fwnodes because every device that's added is
+ * guaranteed to trigger a parse of the fwnode of every device added before
+ * it. This O(N^2) parse is made worse by the fact that when a fwnode of a
+ * device is parsed, all it descendant devices might need to have their
+ * fwnodes parsed too (even if the devices themselves aren't added).
+ */
+void fw_devlink_pause(void)
+{
+ mutex_lock(&defer_fw_devlink_lock);
+ defer_fw_devlink_count++;
+ mutex_unlock(&defer_fw_devlink_lock);
+}
+
+/** fw_devlink_resume - Resume parsing of fwnode to create device links
+ *
+ * This function is used in conjunction with fw_devlink_pause() and is ref
+ * counted. See documentation for fw_devlink_pause() for more details.
+ */
+void fw_devlink_resume(void)
+{
+ mutex_lock(&defer_fw_devlink_lock);
+ if (!defer_fw_devlink_count) {
+ WARN(true, "Unmatched fw_devlink pause/resume!");
+ goto out;
+ }
+
+ defer_fw_devlink_count--;
+ if (defer_fw_devlink_count)
+ goto out;
+
+ device_link_add_missing_supplier_links();
+ driver_deferred_probe_force_trigger();
+out:
+ mutex_unlock(&defer_fw_devlink_lock);
+}
/* Device links support end. */
int (*platform_notify)(struct device *dev) = NULL;
@@ -1393,7 +1558,7 @@ static void device_release(struct kobject *kobj)
else if (dev->class && dev->class->dev_release)
dev->class->dev_release(dev);
else
- WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n",
+ WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
dev_name(dev));
kfree(p);
}
@@ -2364,36 +2529,6 @@ static int device_private_init(struct device *dev)
return 0;
}
-static u32 fw_devlink_flags;
-static int __init fw_devlink_setup(char *arg)
-{
- if (!arg)
- return -EINVAL;
-
- if (strcmp(arg, "off") == 0) {
- fw_devlink_flags = 0;
- } else if (strcmp(arg, "permissive") == 0) {
- fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
- } else if (strcmp(arg, "on") == 0) {
- fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
- } else if (strcmp(arg, "rpm") == 0) {
- fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER |
- DL_FLAG_PM_RUNTIME;
- }
- return 0;
-}
-early_param("fw_devlink", fw_devlink_setup);
-
-u32 fw_devlink_get_flags(void)
-{
- return fw_devlink_flags;
-}
-
-static bool fw_devlink_is_permissive(void)
-{
- return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
-}
-
/**
* device_add - add device to device hierarchy.
* @dev: device.
@@ -2426,9 +2561,8 @@ int device_add(struct device *dev)
struct device *parent;
struct kobject *kobj;
struct class_interface *class_intf;
- int error = -EINVAL, fw_ret;
+ int error = -EINVAL;
struct kobject *glue_dir = NULL;
- bool is_fwnode_dev = false;
dev = get_device(dev);
if (!dev)
@@ -2526,11 +2660,6 @@ int device_add(struct device *dev)
kobject_uevent(&dev->kobj, KOBJ_ADD);
- if (dev->fwnode && !dev->fwnode->dev) {
- dev->fwnode->dev = dev;
- is_fwnode_dev = true;
- }
-
/*
* Check if any of the other devices (consumers) have been waiting for
* this device (supplier) to be added so that they can create a device
@@ -2539,19 +2668,13 @@ int device_add(struct device *dev)
* This needs to happen after device_pm_add() because device_link_add()
* requires the supplier be registered before it's called.
*
- * But this also needs to happe before bus_probe_device() to make sure
+ * But this also needs to happen before bus_probe_device() to make sure
* waiting consumers can link to it before the driver is bound to the
* device and the driver sync_state callback is called for this device.
*/
- device_link_add_missing_supplier_links();
-
- if (fw_devlink_flags && is_fwnode_dev &&
- fwnode_has_op(dev->fwnode, add_links)) {
- fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
- if (fw_ret == -ENODEV && !fw_devlink_is_permissive())
- device_link_wait_for_mandatory_supplier(dev);
- else if (fw_ret)
- device_link_wait_for_optional_supplier(dev);
+ if (dev->fwnode && !dev->fwnode->dev) {
+ dev->fwnode->dev = dev;
+ fw_devlink_link_device(dev);
}
bus_probe_device(dev);
@@ -3213,40 +3336,6 @@ error:
}
/**
- * device_create_vargs - creates a device and registers it with sysfs
- * @class: pointer to the struct class that this device should be registered to
- * @parent: pointer to the parent struct device of this new device, if any
- * @devt: the dev_t for the char device to be added
- * @drvdata: the data to be added to the device for callbacks
- * @fmt: string for the device's name
- * @args: va_list for the device's name
- *
- * This function can be used by char device classes. A struct device
- * will be created in sysfs, registered to the specified class.
- *
- * A "dev" file will be created, showing the dev_t for the device, if
- * the dev_t is not 0,0.
- * If a pointer to a parent struct device is passed in, the newly created
- * struct device will be a child of that device in sysfs.
- * The pointer to the struct device will be returned from the call.
- * Any further sysfs files that might be required can be created using this
- * pointer.
- *
- * Returns &struct device pointer on success, or ERR_PTR() on error.
- *
- * Note: the struct class passed to this function must have previously
- * been created with a call to class_create().
- */
-struct device *device_create_vargs(struct class *class, struct device *parent,
- dev_t devt, void *drvdata, const char *fmt,
- va_list args)
-{
- return device_create_groups_vargs(class, parent, devt, drvdata, NULL,
- fmt, args);
-}
-EXPORT_SYMBOL_GPL(device_create_vargs);
-
-/**
* device_create - creates a device and registers it with sysfs
* @class: pointer to the struct class that this device should be registered to
* @parent: pointer to the parent struct device of this new device, if any
@@ -3277,7 +3366,8 @@ struct device *device_create(struct class *class, struct device *parent,
struct device *dev;
va_start(vargs, fmt);
- dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs);
+ dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
+ fmt, vargs);
va_end(vargs);
return dev;
}
@@ -3915,6 +4005,7 @@ void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
else
dev->fwnode = fwnode;
}
+EXPORT_SYMBOL_GPL(set_secondary_fwnode);
/**
* device_set_of_node_from_dev - reuse device-tree node of another device
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 9a1c00fbbaef..d2136ab9b14a 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -562,6 +562,12 @@ ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_srbds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -570,6 +576,7 @@ static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
+static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -580,6 +587,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_mds.attr,
&dev_attr_tsx_async_abort.attr,
&dev_attr_itlb_multihit.attr,
+ &dev_attr_srbds.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 94037be7f5d7..9a1d940342ac 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -164,6 +164,11 @@ static void driver_deferred_probe_trigger(void)
if (!driver_deferred_probe_enable)
return;
+ driver_deferred_probe_force_trigger();
+}
+
+void driver_deferred_probe_force_trigger(void)
+{
/*
* A successful probe means that all the devices in the pending list
* should be triggered to be reprobed. Move all the deferred devices
@@ -254,12 +259,12 @@ __setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
int driver_deferred_probe_check_state(struct device *dev)
{
if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
- dev_warn(dev, "ignoring dependency for device, assuming no driver");
+ dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
return -ENODEV;
}
if (!driver_deferred_probe_timeout && initcalls_done) {
- dev_warn(dev, "deferred probe timeout, ignoring dependency");
+ dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
return -ETIMEDOUT;
}
@@ -275,7 +280,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
flush_work(&deferred_probe_work);
list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
- dev_info(private->device, "deferred probe pending");
+ dev_info(private->device, "deferred probe pending\n");
wake_up(&probe_timeout_waitqueue);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
@@ -336,7 +341,7 @@ bool device_is_bound(struct device *dev)
static void driver_bound(struct device *dev)
{
if (device_is_bound(dev)) {
- printk(KERN_WARNING "%s: device %s already bound\n",
+ pr_warn("%s: device %s already bound\n",
__func__, kobject_name(&dev->kobj));
return;
}
@@ -505,8 +510,8 @@ re_probe:
}
if (driver_sysfs_add(dev)) {
- printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
- __func__, dev_name(dev));
+ pr_err("%s: driver_sysfs_add(%s) failed\n",
+ __func__, dev_name(dev));
goto probe_failed;
}
@@ -597,9 +602,8 @@ pinctrl_bind_failed:
break;
default:
/* driver matched but the probe failed */
- printk(KERN_WARNING
- "%s: probe of %s failed with error %d\n",
- drv->name, dev_name(dev), ret);
+ pr_warn("%s: probe of %s failed with error %d\n",
+ drv->name, dev_name(dev), ret);
}
/*
* Ignore errors returned by ->probe so that the next driver can try
@@ -624,8 +628,8 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv)
ret = really_probe(dev, drv);
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
- printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
- dev_name(dev), ret, (s64) ktime_to_us(delta));
+ pr_debug("probe of %s returned %d after %lld usecs\n",
+ dev_name(dev), ret, (s64) ktime_to_us(delta));
return ret;
}
@@ -713,8 +717,7 @@ static inline bool cmdline_requested_async_probing(const char *drv_name)
static int __init save_async_options(char *buf)
{
if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
- printk(KERN_WARNING
- "Too long list of driver names for 'driver_async_probe'!\n");
+ pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
return 0;
@@ -789,7 +792,7 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
dev_dbg(dev, "Device match requests probe deferral\n");
driver_deferred_probe_add(dev);
} else if (ret < 0) {
- dev_dbg(dev, "Bus failed to match device: %d", ret);
+ dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
} /* ret > 0 means positive match */
@@ -1022,7 +1025,7 @@ static int __driver_attach(struct device *dev, void *data)
dev_dbg(dev, "Device match requests probe deferral\n");
driver_deferred_probe_add(dev);
} else if (ret < 0) {
- dev_dbg(dev, "Bus failed to match device: %d", ret);
+ dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
} /* ret > 0 means positive match */
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 1e9c96e3ed63..5327bfc6ba71 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -9,6 +9,7 @@
#include <linux/umh.h>
#include <linux/sysctl.h>
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include "fallback.h"
#include "firmware.h"
@@ -17,6 +18,8 @@
* firmware fallback mechanism
*/
+MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
+
extern struct firmware_fallback_config fw_fallback_config;
/* These getters are vetted to use int properly */
@@ -460,7 +463,7 @@ static const struct attribute_group *fw_dev_attr_groups[] = {
static struct fw_sysfs *
fw_create_instance(struct firmware *firmware, const char *fw_name,
- struct device *device, enum fw_opt opt_flags)
+ struct device *device, u32 opt_flags)
{
struct fw_sysfs *fw_sysfs;
struct device *f_dev;
@@ -493,7 +496,7 @@ exit:
* In charge of constructing a sysfs fallback interface for firmware loading.
**/
static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
- enum fw_opt opt_flags, long timeout)
+ u32 opt_flags, long timeout)
{
int retval = 0;
struct device *f_dev = &fw_sysfs->dev;
@@ -547,7 +550,7 @@ err_put_dev:
static int fw_load_from_user_helper(struct firmware *firmware,
const char *name, struct device *device,
- enum fw_opt opt_flags)
+ u32 opt_flags)
{
struct fw_sysfs *fw_sysfs;
long timeout;
@@ -588,7 +591,7 @@ out_unlock:
return ret;
}
-static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
+static bool fw_force_sysfs_fallback(u32 opt_flags)
{
if (fw_fallback_config.force_sysfs_fallback)
return true;
@@ -597,7 +600,7 @@ static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
return true;
}
-static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
+static bool fw_run_sysfs_fallback(u32 opt_flags)
{
int ret;
@@ -640,7 +643,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
**/
int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
- enum fw_opt opt_flags,
+ u32 opt_flags,
int ret)
{
if (!fw_run_sysfs_fallback(opt_flags))
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index 06f4577733a8..2afdb6adb23f 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -33,7 +33,7 @@ struct firmware_fallback_config {
#ifdef CONFIG_FW_LOADER_USER_HELPER
int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
- enum fw_opt opt_flags,
+ u32 opt_flags,
int ret);
void kill_pending_fw_fallback_reqs(bool only_kill_custom);
@@ -45,7 +45,7 @@ void unregister_sysfs_loader(void);
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
- enum fw_opt opt_flags,
+ u32 opt_flags,
int ret)
{
/* Keep carrying over the same error */
@@ -67,10 +67,10 @@ static inline void unregister_sysfs_loader(void)
#endif /* CONFIG_FW_LOADER_USER_HELPER */
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
-int firmware_fallback_platform(struct fw_priv *fw_priv, enum fw_opt opt_flags);
+int firmware_fallback_platform(struct fw_priv *fw_priv, u32 opt_flags);
#else
static inline int firmware_fallback_platform(struct fw_priv *fw_priv,
- enum fw_opt opt_flags)
+ u32 opt_flags)
{
return -ENOENT;
}
diff --git a/drivers/base/firmware_loader/fallback_platform.c b/drivers/base/firmware_loader/fallback_platform.c
index c88c745590fe..cdd2c9a9f38a 100644
--- a/drivers/base/firmware_loader/fallback_platform.c
+++ b/drivers/base/firmware_loader/fallback_platform.c
@@ -8,7 +8,7 @@
#include "fallback.h"
#include "firmware.h"
-int firmware_fallback_platform(struct fw_priv *fw_priv, enum fw_opt opt_flags)
+int firmware_fallback_platform(struct fw_priv *fw_priv, u32 opt_flags)
{
const u8 *data;
size_t size;
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index a182e318bd09..46a731dede6f 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -21,7 +21,7 @@ struct firmware_fallback_config fw_fallback_config = {
.loading_timeout = 60,
.old_timeout = 60,
};
-EXPORT_SYMBOL_GPL(fw_fallback_config);
+EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
#ifdef CONFIG_SYSCTL
struct ctl_table firmware_config_table[] = {
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 25836a6afc9f..933e2192fbe8 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -136,8 +136,7 @@ static inline void fw_state_done(struct fw_priv *fw_priv)
__fw_state_set(fw_priv, FW_STATUS_DONE);
}
-int assign_fw(struct firmware *fw, struct device *device,
- enum fw_opt opt_flags);
+int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags);
#ifdef CONFIG_FW_LOADER_PAGED_BUF
void fw_free_paged_buf(struct fw_priv *fw_priv);
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 76f79913916d..ca871b13524e 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -210,7 +210,7 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
static int alloc_lookup_fw_priv(const char *fw_name,
struct firmware_cache *fwc,
struct fw_priv **fw_priv, void *dbuf,
- size_t size, enum fw_opt opt_flags)
+ size_t size, u32 opt_flags)
{
struct fw_priv *tmp;
@@ -548,9 +548,6 @@ static void firmware_free_data(const struct firmware *fw)
static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
{
fw->priv = fw_priv;
-#ifdef CONFIG_FW_LOADER_USER_HELPER
- fw->pages = fw_priv->pages;
-#endif
fw->size = fw_priv->size;
fw->data = fw_priv->data;
@@ -635,8 +632,7 @@ static int fw_add_devm_name(struct device *dev, const char *name)
}
#endif
-int assign_fw(struct firmware *fw, struct device *device,
- enum fw_opt opt_flags)
+int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags)
{
struct fw_priv *fw_priv = fw->priv;
int ret;
@@ -687,7 +683,7 @@ int assign_fw(struct firmware *fw, struct device *device,
static int
_request_firmware_prepare(struct firmware **firmware_p, const char *name,
struct device *device, void *dbuf, size_t size,
- enum fw_opt opt_flags)
+ u32 opt_flags)
{
struct firmware *firmware;
struct fw_priv *fw_priv;
@@ -753,7 +749,7 @@ static void fw_abort_batch_reqs(struct firmware *fw)
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device, void *buf, size_t size,
- enum fw_opt opt_flags)
+ u32 opt_flags)
{
struct firmware *fw = NULL;
int ret;
@@ -990,7 +986,7 @@ struct firmware_work {
struct device *device;
void *context;
void (*cont)(const struct firmware *fw, void *context);
- enum fw_opt opt_flags;
+ u32 opt_flags;
};
static void request_firmware_work_func(struct work_struct *work)
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index dbec3a05590a..2b09b68b9f78 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -21,6 +21,7 @@
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/slab.h>
+#include <linux/xarray.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
@@ -74,6 +75,13 @@ static struct bus_type memory_subsys = {
.offline = memory_subsys_offline,
};
+/*
+ * Memory blocks are cached in a local radix tree to avoid
+ * a costly linear search for the corresponding device on
+ * the subsystem bus.
+ */
+static DEFINE_XARRAY(memory_blocks);
+
static BLOCKING_NOTIFIER_HEAD(memory_chain);
int register_memory_notifier(struct notifier_block *nb)
@@ -489,22 +497,23 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
return 0;
}
-/* A reference for the returned memory block device is acquired. */
+/*
+ * A reference for the returned memory block device is acquired.
+ *
+ * Called under device_hotplug_lock.
+ */
static struct memory_block *find_memory_block_by_id(unsigned long block_id)
{
- struct device *dev;
+ struct memory_block *mem;
- dev = subsys_find_device_by_id(&memory_subsys, block_id, NULL);
- return dev ? to_memory_block(dev) : NULL;
+ mem = xa_load(&memory_blocks, block_id);
+ if (mem)
+ get_device(&mem->dev);
+ return mem;
}
/*
- * For now, we have a linear search to go find the appropriate
- * memory_block corresponding to a particular phys_index. If
- * this gets to be a real problem, we can always use a radix
- * tree or something here.
- *
- * This could be made generic for all device subsystems.
+ * Called under device_hotplug_lock.
*/
struct memory_block *find_memory_block(struct mem_section *section)
{
@@ -548,9 +557,16 @@ int register_memory(struct memory_block *memory)
memory->dev.offline = memory->state == MEM_OFFLINE;
ret = device_register(&memory->dev);
- if (ret)
+ if (ret) {
put_device(&memory->dev);
-
+ return ret;
+ }
+ ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
+ GFP_KERNEL));
+ if (ret) {
+ put_device(&memory->dev);
+ device_unregister(&memory->dev);
+ }
return ret;
}
@@ -604,6 +620,8 @@ static void unregister_memory(struct memory_block *memory)
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
return;
+ WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
+
/* drop the ref. we got via find_memory_block() */
put_device(&memory->dev);
device_unregister(&memory->dev);
@@ -750,6 +768,8 @@ void __init memory_dev_init(void)
*
* In case func() returns an error, walking is aborted and the error is
* returned.
+ *
+ * Called under device_hotplug_lock.
*/
int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func)
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 10d7e818e118..5b02f69769e8 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -415,6 +415,9 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d AnonPages: %8lu kB\n"
"Node %d Shmem: %8lu kB\n"
"Node %d KernelStack: %8lu kB\n"
+#ifdef CONFIG_SHADOW_CALL_STACK
+ "Node %d ShadowCallStack:%8lu kB\n"
+#endif
"Node %d PageTables: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
@@ -438,8 +441,11 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
nid, K(i.sharedram),
nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
+#ifdef CONFIG_SHADOW_CALL_STACK
+ nid, sum_zone_node_page_state(nid, NR_KERNEL_SCS_KB),
+#endif
nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
- nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
+ nid, 0UL,
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
nid, K(sreclaimable +
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 8da314b81eab..c4a17e5edf8b 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -387,7 +387,7 @@ void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
*
* @domain: The platform-msi domain
* @virq: The base irq from which to perform the allocate operation
- * @nvec: How many interrupts to free from @virq
+ * @nr_irqs: How many interrupts to free from @virq
*
* Return 0 on success, or an error code on failure. Must be called
* with irq_domain_mutex held (which can only be done as part of a
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index b27d0f6c18c9..c0d0a5490ac6 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -147,28 +147,30 @@ EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
* request_irq() APIs. This is the same as platform_get_irq(), except that it
* does not print an error message if an IRQ can not be obtained.
*
- * Example:
+ * For example::
+ *
* int irq = platform_get_irq_optional(pdev, 0);
* if (irq < 0)
* return irq;
*
- * Return: IRQ number on success, negative error number on failure.
+ * Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
{
+ int ret;
#ifdef CONFIG_SPARC
/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
if (!dev || num >= dev->archdata.num_irqs)
return -ENXIO;
- return dev->archdata.irqs[num];
+ ret = dev->archdata.irqs[num];
+ goto out;
#else
struct resource *r;
- int ret;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
ret = of_irq_get(dev->dev.of_node, num);
if (ret > 0 || ret == -EPROBE_DEFER)
- return ret;
+ goto out;
}
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
@@ -176,7 +178,7 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
if (r && r->flags & IORESOURCE_DISABLED) {
ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
if (ret)
- return ret;
+ goto out;
}
}
@@ -190,13 +192,17 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
struct irq_data *irqd;
irqd = irq_get_irq_data(r->start);
- if (!irqd)
- return -ENXIO;
+ if (!irqd) {
+ ret = -ENXIO;
+ goto out;
+ }
irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
}
- if (r)
- return r->start;
+ if (r) {
+ ret = r->start;
+ goto out;
+ }
/*
* For the index 0 interrupt, allow falling back to GpioInt
@@ -209,11 +215,14 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
/* Our callers expect -ENXIO for missing IRQs. */
if (ret >= 0 || ret == -EPROBE_DEFER)
- return ret;
+ goto out;
}
- return -ENXIO;
+ ret = -ENXIO;
#endif
+out:
+ WARN(ret == 0, "0 is an invalid IRQ number\n");
+ return ret;
}
EXPORT_SYMBOL_GPL(platform_get_irq_optional);
@@ -226,12 +235,13 @@ EXPORT_SYMBOL_GPL(platform_get_irq_optional);
* IRQ fails. Device drivers should check the return value for errors so as to
* not pass a negative integer value to the request_irq() APIs.
*
- * Example:
+ * For example::
+ *
* int irq = platform_get_irq(pdev, 0);
* if (irq < 0)
* return irq;
*
- * Return: IRQ number on success, negative error number on failure.
+ * Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq(struct platform_device *dev, unsigned int num)
{
@@ -303,8 +313,10 @@ static int __platform_get_irq_byname(struct platform_device *dev,
}
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
- if (r)
+ if (r) {
+ WARN(r->start == 0, "0 is an invalid IRQ number\n");
return r->start;
+ }
return -ENXIO;
}
@@ -316,7 +328,7 @@ static int __platform_get_irq_byname(struct platform_device *dev,
*
* Get an IRQ like platform_get_irq(), but then by name rather then by index.
*
- * Return: IRQ number on success, negative error number on failure.
+ * Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq_byname(struct platform_device *dev, const char *name)
{
@@ -338,7 +350,7 @@ EXPORT_SYMBOL_GPL(platform_get_irq_byname);
* Get an optional IRQ by name like platform_get_irq_byname(). Except that it
* does not print an error message if an IRQ can not be obtained.
*
- * Return: IRQ number on success, negative error number on failure.
+ * Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq_byname_optional(struct platform_device *dev,
const char *name)
@@ -670,7 +682,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo)
{
- int ret = -ENOMEM;
+ int ret;
struct platform_device *pdev;
pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
@@ -851,6 +863,8 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv,
/* temporary section violation during probe() */
drv->probe = probe;
retval = code = __platform_driver_register(drv, module);
+ if (retval)
+ return retval;
/*
* Fixup that section violation, being paranoid about code scanning
@@ -975,7 +989,7 @@ EXPORT_SYMBOL_GPL(__platform_register_drivers);
* @drivers: an array of drivers to unregister
* @count: the number of drivers to unregister
*
- * Unegisters platform drivers specified by an array. This is typically used
+ * Unregisters platform drivers specified by an array. This is typically used
* to complement an earlier call to platform_register_drivers(). Drivers are
* unregistered in the reverse order in which they were registered.
*/
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 0e07e17c2def..9dd85bea4026 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -519,7 +519,7 @@ static void dpm_watchdog_handler(struct timer_list *t)
struct dpm_watchdog *wd = from_timer(wd, t, timer);
dev_emerg(wd->dev, "**** DPM device timeout ****\n");
- show_stack(wd->tsk, NULL);
+ show_stack(wd->tsk, NULL, KERN_EMERG);
panic("%s %s: unrecoverable failure\n",
dev_driver_string(wd->dev), dev_name(wd->dev));
}
@@ -562,72 +562,26 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
/*------------------------- Resume routines -------------------------*/
/**
- * suspend_event - Return a "suspend" message for given "resume" one.
- * @resume_msg: PM message representing a system-wide resume transition.
- */
-static pm_message_t suspend_event(pm_message_t resume_msg)
-{
- switch (resume_msg.event) {
- case PM_EVENT_RESUME:
- return PMSG_SUSPEND;
- case PM_EVENT_THAW:
- case PM_EVENT_RESTORE:
- return PMSG_FREEZE;
- case PM_EVENT_RECOVER:
- return PMSG_HIBERNATE;
- }
- return PMSG_ON;
-}
-
-/**
- * dev_pm_may_skip_resume - System-wide device resume optimization check.
+ * dev_pm_skip_resume - System-wide device resume optimization check.
* @dev: Target device.
*
- * Checks whether or not the device may be left in suspend after a system-wide
- * transition to the working state.
+ * Return:
+ * - %false if the transition under way is RESTORE.
+ * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
+ * - The logical negation of %power.must_resume otherwise (that is, when the
+ * transition under way is RESUME).
*/
-bool dev_pm_may_skip_resume(struct device *dev)
+bool dev_pm_skip_resume(struct device *dev)
{
- return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
-}
-
-static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
+ if (pm_transition.event == PM_EVENT_RESTORE)
+ return false;
- if (info_p)
- *info_p = info;
+ if (pm_transition.event == PM_EVENT_THAW)
+ return dev_pm_skip_suspend(dev);
- return callback;
+ return !dev->power.must_resume;
}
-static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
- pm_message_t state,
- const char **info_p);
-
-static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
- pm_message_t state,
- const char **info_p);
-
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -639,8 +593,8 @@ static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
*/
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
bool skip_resume;
int error = 0;
@@ -656,37 +610,41 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (!dpm_wait_for_superior(dev, async))
goto Out;
- skip_resume = dev_pm_may_skip_resume(dev);
+ skip_resume = dev_pm_skip_resume(dev);
+ /*
+ * If the driver callback is skipped below or by the middle layer
+ * callback and device_resume_early() also skips the driver callback for
+ * this device later, it needs to appear as "suspended" to PM-runtime,
+ * so change its status accordingly.
+ *
+ * Otherwise, the device is going to be resumed, so set its PM-runtime
+ * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
+ * to avoid confusing drivers that don't use it.
+ */
+ if (skip_resume)
+ pm_runtime_set_suspended(dev);
+ else if (dev_pm_skip_suspend(dev))
+ pm_runtime_set_active(dev);
- callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ }
if (callback)
goto Run;
if (skip_resume)
goto Skip;
- if (dev_pm_smart_suspend_and_suspended(dev)) {
- pm_message_t suspend_msg = suspend_event(state);
-
- /*
- * If "freeze" callbacks have been skipped during a transition
- * related to hibernation, the subsequent "thaw" callbacks must
- * be skipped too or bad things may happen. Otherwise, resume
- * callbacks are going to be run for the device, so its runtime
- * PM status must be changed to reflect the new state after the
- * transition under way.
- */
- if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
- !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
- if (state.event == PM_EVENT_THAW) {
- skip_resume = true;
- goto Skip;
- } else {
- pm_runtime_set_active(dev);
- }
- }
- }
-
if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
@@ -698,20 +656,6 @@ Run:
Skip:
dev->power.is_noirq_suspended = false;
- if (skip_resume) {
- /* Make the next phases of resume skip the device. */
- dev->power.is_late_suspended = false;
- dev->power.is_suspended = false;
- /*
- * The device is going to be left in suspend, but it might not
- * have been in runtime suspend before the system suspended, so
- * its runtime PM status needs to be updated to avoid confusing
- * the runtime PM framework when runtime PM is enabled for the
- * device again.
- */
- pm_runtime_set_suspended(dev);
- }
-
Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
@@ -810,35 +754,6 @@ void dpm_resume_noirq(pm_message_t state)
cpuidle_resume();
}
-static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "early power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "early type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "early class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "early bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
-
- if (info_p)
- *info_p = info;
-
- return callback;
-}
-
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -849,8 +764,8 @@ static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
*/
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
@@ -865,17 +780,37 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
if (!dpm_wait_for_superior(dev, async))
goto Out;
- callback = dpm_subsys_resume_early_cb(dev, state, &info);
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
+ if (callback)
+ goto Run;
+
+ if (dev_pm_skip_resume(dev))
+ goto Skip;
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev->driver && dev->driver->pm) {
info = "early driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
dev->power.is_late_suspended = false;
- Out:
+Out:
TRACE_RESUME(error);
pm_runtime_enable(dev);
@@ -1245,61 +1180,6 @@ static void dpm_superior_set_must_resume(struct device *dev)
device_links_read_unlock(idx);
}
-static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
-
- if (info_p)
- *info_p = info;
-
- return callback;
-}
-
-static bool device_must_resume(struct device *dev, pm_message_t state,
- bool no_subsys_suspend_noirq)
-{
- pm_message_t resume_msg = resume_event(state);
-
- /*
- * If all of the device driver's "noirq", "late" and "early" callbacks
- * are invoked directly by the core, the decision to allow the device to
- * stay in suspend can be based on its current runtime PM status and its
- * wakeup settings.
- */
- if (no_subsys_suspend_noirq &&
- !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
- !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
- !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
- return !pm_runtime_status_suspended(dev) &&
- (resume_msg.event != PM_EVENT_RESUME ||
- (device_can_wakeup(dev) && !device_may_wakeup(dev)));
-
- /*
- * The only safe strategy here is to require that if the device may not
- * be left in suspend, resume callbacks must be invoked for it.
- */
- return !dev->power.may_skip_resume;
-}
-
/**
* __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1311,9 +1191,8 @@ static bool device_must_resume(struct device *dev, pm_message_t state,
*/
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
- bool no_subsys_cb = false;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
@@ -1327,13 +1206,23 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ }
if (callback)
goto Run;
- no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
-
- if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
+ if (dev_pm_skip_suspend(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
@@ -1351,13 +1240,16 @@ Run:
Skip:
dev->power.is_noirq_suspended = true;
- if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
- dev->power.must_resume = dev->power.must_resume ||
- atomic_read(&dev->power.usage_count) > 1 ||
- device_must_resume(dev, state, no_subsys_cb);
- } else {
+ /*
+ * Skipping the resume of devices that were in use right before the
+ * system suspend (as indicated by their PM-runtime usage counters)
+ * would be suboptimal. Also resume them if doing that is not allowed
+ * to be skipped.
+ */
+ if (atomic_read(&dev->power.usage_count) > 1 ||
+ !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+ dev->power.may_skip_resume))
dev->power.must_resume = true;
- }
if (dev->power.must_resume)
dpm_superior_set_must_resume(dev);
@@ -1474,35 +1366,6 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
spin_unlock_irq(&parent->power.lock);
}
-static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "late power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "late type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "late class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "late bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
-
- if (info_p)
- *info_p = info;
-
- return callback;
-}
-
/**
* __device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1513,8 +1376,8 @@ static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
*/
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
@@ -1535,12 +1398,23 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- callback = dpm_subsys_suspend_late_cb(dev, state, &info);
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
if (callback)
goto Run;
- if (dev_pm_smart_suspend_and_suspended(dev) &&
- !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
+ if (dev_pm_skip_suspend(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
@@ -1766,7 +1640,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dev->power.direct_complete = false;
}
- dev->power.may_skip_resume = false;
+ dev->power.may_skip_resume = true;
dev->power.must_resume = false;
dpm_watchdog_set(&wd, dev);
@@ -1970,7 +1844,7 @@ unlock:
spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
(ret > 0 || dev->power.no_pm_callbacks) &&
- !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
spin_unlock_irq(&dev->power.lock);
return 0;
}
@@ -2128,7 +2002,7 @@ void device_pm_check_callbacks(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
-bool dev_pm_smart_suspend_and_suspended(struct device *dev)
+bool dev_pm_skip_suspend(struct device *dev)
{
return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
pm_runtime_status_suspended(dev);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 99c7da112c95..9f62790f644c 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -523,13 +523,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
repeat:
retval = rpm_check_suspend_allowed(dev);
-
if (retval < 0)
- ; /* Conditions are wrong. */
+ goto out; /* Conditions are wrong. */
/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
- else if (dev->power.runtime_status == RPM_RESUMING &&
- !(rpmflags & RPM_ASYNC))
+ if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
retval = -EAGAIN;
if (retval)
goto out;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 2b99fe1eb207..24d25cf8ab14 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -666,7 +666,7 @@ int dpm_sysfs_add(struct device *dev)
if (rc)
return rc;
- if (pm_runtime_callbacks_present(dev)) {
+ if (!pm_runtime_has_no_callbacks(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
if (rc)
goto err_out;
@@ -709,7 +709,7 @@ int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
if (rc)
return rc;
- if (pm_runtime_callbacks_present(dev)) {
+ if (!pm_runtime_has_no_callbacks(dev)) {
rc = sysfs_group_change_owner(
&dev->kobj, &pm_runtime_attr_group, kuid, kgid);
if (rc)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 5f35c0ccf5e0..1e6d75e65938 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -708,14 +708,23 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
struct fwnode_handle *child)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- struct fwnode_handle *fwnode = NULL;
+ struct fwnode_handle *fwnode = NULL, *next;
if (dev->of_node)
fwnode = &dev->of_node->fwnode;
else if (adev)
fwnode = acpi_fwnode_handle(adev);
- return fwnode_get_next_child_node(fwnode, child);
+ /* Try to find a child in primary fwnode */
+ next = fwnode_get_next_child_node(fwnode, child);
+ if (next)
+ return next;
+
+ /* When no more children in primary, continue with secondary */
+ if (!IS_ERR_OR_NULL(fwnode->secondary))
+ next = fwnode_get_next_child_node(fwnode->secondary, child);
+
+ return next;
}
EXPORT_SYMBOL_GPL(device_get_next_child_node);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index e72843fe41df..089e5dc7144a 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -227,6 +227,9 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
if (*ppos < 0 || !count)
return -EINVAL;
+ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
+ count = PAGE_SIZE << (MAX_ORDER - 1);
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -371,6 +374,9 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
if (*ppos < 0 || !count)
return -EINVAL;
+ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
+ count = PAGE_SIZE << (MAX_ORDER - 1);
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 008f8da69d97..62b95a9212ae 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -246,6 +246,63 @@ static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
.max_raw_write = I2C_SMBUS_BLOCK_MAX,
};
+static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (count < 2)
+ return -EINVAL;
+
+ count--;
+ return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
+ (u8 *)data + 1);
+}
+
+static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg,
+ size_t reg_size, void *val,
+ size_t val_size)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret, count, len = val_size;
+
+ if (reg_size != 2)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(i2c, ((u16 *)reg)[0] & 0xff,
+ ((u16 *)reg)[0] >> 8);
+ if (ret < 0)
+ return ret;
+
+ count = 0;
+ do {
+ /* Current Address Read */
+ ret = i2c_smbus_read_byte(i2c);
+ if (ret < 0)
+ break;
+
+ *((u8 *)val++) = ret;
+ count++;
+ len--;
+ } while (len > 0);
+
+ if (count == val_size)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
+ .write = regmap_i2c_smbus_i2c_write_reg16,
+ .read = regmap_i2c_smbus_i2c_read_reg16,
+ .max_raw_read = I2C_SMBUS_BLOCK_MAX,
+ .max_raw_write = I2C_SMBUS_BLOCK_MAX,
+};
+
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
const struct regmap_config *config)
{
@@ -255,6 +312,10 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK))
return &regmap_i2c_smbus_i2c_block;
+ else if (config->val_bits == 8 && config->reg_bits == 16 &&
+ i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return &regmap_i2c_smbus_i2c_block_reg16;
else if (config->val_bits == 16 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_WORD_DATA))
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 3d64c9331a82..4340e1d268b6 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -541,8 +541,9 @@ static const struct irq_domain_ops regmap_domain_ops = {
};
/**
- * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
+ * regmap_add_irq_chip_np() - Use standard regmap IRQ controller handling
*
+ * @np: The device_node where the IRQ domain should be added to.
* @map: The regmap for the device.
* @irq: The IRQ the device uses to signal interrupts.
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
@@ -556,9 +557,10 @@ static const struct irq_domain_ops regmap_domain_ops = {
* register cache. The chip driver is responsible for restoring the
* register values used by the IRQ controller over suspend and resume.
*/
-int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
- int irq_base, const struct regmap_irq_chip *chip,
- struct regmap_irq_chip_data **data)
+int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data *d;
int i;
@@ -769,12 +771,10 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
}
if (irq_base)
- d->domain = irq_domain_add_legacy(map->dev->of_node,
- chip->num_irqs, irq_base, 0,
- &regmap_domain_ops, d);
+ d->domain = irq_domain_add_legacy(np, chip->num_irqs, irq_base,
+ 0, &regmap_domain_ops, d);
else
- d->domain = irq_domain_add_linear(map->dev->of_node,
- chip->num_irqs,
+ d->domain = irq_domain_add_linear(np, chip->num_irqs,
&regmap_domain_ops, d);
if (!d->domain) {
dev_err(map->dev, "Failed to create IRQ domain\n");
@@ -808,6 +808,30 @@ err_alloc:
kfree(d);
return ret;
}
+EXPORT_SYMBOL_GPL(regmap_add_irq_chip_np);
+
+/**
+ * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
+ *
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts.
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success.
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * This is the same as regmap_add_irq_chip_np, except that the device
+ * node of the regmap is used.
+ */
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+ int irq_base, const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ return regmap_add_irq_chip_np(map->dev->of_node, map, irq, irq_flags,
+ irq_base, chip, data);
+}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
/**
@@ -875,9 +899,10 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
}
/**
- * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
+ * devm_regmap_add_irq_chip_np() - Resource manager regmap_add_irq_chip_np()
*
* @dev: The device pointer on which irq_chip belongs to.
+ * @np: The device_node where the IRQ domain should be added to.
* @map: The regmap for the device.
* @irq: The IRQ the device uses to signal interrupts
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
@@ -890,10 +915,11 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
* The &regmap_irq_chip_data will be automatically released when the device is
* unbound.
*/
-int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
- int irq_flags, int irq_base,
- const struct regmap_irq_chip *chip,
- struct regmap_irq_chip_data **data)
+int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np,
+ struct regmap *map, int irq, int irq_flags,
+ int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data **ptr, *d;
int ret;
@@ -903,8 +929,8 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
if (!ptr)
return -ENOMEM;
- ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
- chip, &d);
+ ret = regmap_add_irq_chip_np(np, map, irq, irq_flags, irq_base,
+ chip, &d);
if (ret < 0) {
devres_free(ptr);
return ret;
@@ -915,6 +941,32 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
*data = d;
return 0;
}
+EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_np);
+
+/**
+ * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
+ *
+ * @dev: The device pointer on which irq_chip belongs to.
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * The &regmap_irq_chip_data will be automatically released when the device is
+ * unbound.
+ */
+int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ return devm_regmap_add_irq_chip_np(dev, map->dev->of_node, map, irq,
+ irq_flags, irq_base, chip, data);
+}
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
/**
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 59f911e57719..c472f624382d 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -827,6 +827,7 @@ struct regmap *__regmap_init(struct device *dev,
} else if (!bus->read || !bus->write) {
map->reg_read = _regmap_bus_reg_read;
map->reg_write = _regmap_bus_reg_write;
+ map->reg_update_bits = bus->reg_update_bits;
map->defer_caching = false;
goto skip_format_initialization;
@@ -2936,6 +2937,28 @@ int regmap_update_bits_base(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_update_bits_base);
+/**
+ * regmap_test_bits() - Check if all specified bits are set in a register.
+ *
+ * @map: Register map to operate on
+ * @reg: Register to read from
+ * @bits: Bits to test
+ *
+ * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the
+ * tested bits is not set and 1 if all tested bits are set.
+ */
+int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
+{
+ unsigned int val, ret;
+
+ ret = regmap_read(map, reg, &val);
+ if (ret)
+ return ret;
+
+ return (val & bits) == bits;
+}
+EXPORT_SYMBOL_GPL(regmap_test_bits);
+
void regmap_async_complete_cb(struct regmap_async *async, int ret)
{
struct regmap *map = async->map;
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 4af11a423475..a5bae551167d 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -46,7 +46,7 @@ static umode_t soc_attribute_mode(struct kobject *kobj,
struct attribute *attr,
int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
if ((attr == &dev_attr_machine.attr)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index de8d3543e8fe..e5eb27375416 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -712,19 +712,68 @@ EXPORT_SYMBOL_GPL(software_node_register_nodes);
* @nodes: Zero terminated array of software nodes to be unregistered
*
* Unregister multiple software nodes at once.
+ *
+ * NOTE: Be careful using this call if the nodes had parent pointers set up in
+ * them before registering. If so, it is wiser to remove the nodes
+ * individually, in the correct order (child before parent) instead of relying
+ * on the sequential order of the list of nodes in the array.
*/
void software_node_unregister_nodes(const struct software_node *nodes)
{
- struct swnode *swnode;
int i;
- for (i = 0; nodes[i].name; i++) {
- swnode = software_node_to_swnode(&nodes[i]);
+ for (i = 0; nodes[i].name; i++)
+ software_node_unregister(&nodes[i]);
+}
+EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
+
+/**
+ * software_node_register_node_group - Register a group of software nodes
+ * @node_group: NULL terminated array of software node pointers to be registered
+ *
+ * Register multiple software nodes at once.
+ */
+int software_node_register_node_group(const struct software_node **node_group)
+{
+ unsigned int i;
+ int ret;
+
+ if (!node_group)
+ return 0;
+
+ for (i = 0; node_group[i]; i++) {
+ ret = software_node_register(node_group[i]);
+ if (ret) {
+ software_node_unregister_node_group(node_group);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(software_node_register_node_group);
+
+/**
+ * software_node_unregister_node_group - Unregister a group of software nodes
+ * @node_group: NULL terminated array of software node pointers to be unregistered
+ *
+ * Unregister multiple software nodes at once.
+ */
+void software_node_unregister_node_group(const struct software_node **node_group)
+{
+ struct swnode *swnode;
+ unsigned int i;
+
+ if (!node_group)
+ return;
+
+ for (i = 0; node_group[i]; i++) {
+ swnode = software_node_to_swnode(node_group[i]);
if (swnode)
fwnode_remove_software_node(&swnode->fwnode);
}
}
-EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
+EXPORT_SYMBOL_GPL(software_node_unregister_node_group);
/**
* software_node_register - Register static software node
@@ -741,6 +790,20 @@ int software_node_register(const struct software_node *node)
}
EXPORT_SYMBOL_GPL(software_node_register);
+/**
+ * software_node_unregister - Unregister static software node
+ * @node: The software node to be unregistered
+ */
+void software_node_unregister(const struct software_node *node)
+{
+ struct swnode *swnode;
+
+ swnode = software_node_to_swnode(node);
+ if (swnode)
+ fwnode_remove_software_node(&swnode->fwnode);
+}
+EXPORT_SYMBOL_GPL(software_node_unregister);
+
struct fwnode_handle *
fwnode_create_software_node(const struct property_entry *properties,
const struct fwnode_handle *parent)
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 305c7751184a..ba225eb1b761 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -9,5 +9,6 @@ config TEST_ASYNC_DRIVER_PROBE
If unsure say N.
config KUNIT_DRIVER_PE_TEST
- bool "KUnit Tests for property entry API"
+ bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
depends on KUNIT=y
+ default KUNIT_ALL_TESTS
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 025b1b77b11a..084b9efcefca 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -458,4 +458,6 @@ config BLK_DEV_RSXX
To compile this driver as a module, choose M here: the
module will be called rsxx.
+source "drivers/block/rnbd/Kconfig"
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 795facd8cf19..e1f63117ee94 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
obj-$(CONFIG_ZRAM) += zram/
+obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
null_blk-objs := null_blk_main.o
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index a27804d71e12..5ca7216e9e01 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -407,7 +407,6 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
- q->backing_dev_info->name = "aoe";
q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 15e99697234a..df53dca5d02c 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -396,9 +396,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
bytes = sizeof(struct page *)*want;
new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
if (!new_pages) {
- new_pages = __vmalloc(bytes,
- GFP_NOIO | __GFP_ZERO,
- PAGE_KERNEL);
+ new_pages = __vmalloc(bytes, GFP_NOIO | __GFP_ZERO);
if (!new_pages)
return NULL;
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index aae99a2d7bd4..14345a87c7cc 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1570,34 +1570,6 @@ extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern int drbd_connected(struct drbd_peer_device *);
-static inline void drbd_tcp_cork(struct socket *sock)
-{
- int val = 1;
- (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
- (char*)&val, sizeof(val));
-}
-
-static inline void drbd_tcp_uncork(struct socket *sock)
-{
- int val = 0;
- (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
- (char*)&val, sizeof(val));
-}
-
-static inline void drbd_tcp_nodelay(struct socket *sock)
-{
- int val = 1;
- (void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char*)&val, sizeof(val));
-}
-
-static inline void drbd_tcp_quickack(struct socket *sock)
-{
- int val = 2;
- (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
- (char*)&val, sizeof(val));
-}
-
/* sets the number of 512 byte sectors of our virtual device */
void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index c094c3c2c5d4..45fbd526c453 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -660,7 +660,7 @@ static int __send_command(struct drbd_connection *connection, int vnr,
/* DRBD protocol "pings" are latency critical.
* This is supposed to trigger tcp_push_pending_frames() */
if (!err && (cmd == P_PING || cmd == P_PING_ACK))
- drbd_tcp_nodelay(sock->socket);
+ tcp_sock_set_nodelay(sock->socket->sk);
return err;
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c15e7083b13a..3a3f2b6a821f 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1051,8 +1051,8 @@ randomize:
/* we don't want delays.
* we use TCP_CORK where appropriate, though */
- drbd_tcp_nodelay(sock.socket);
- drbd_tcp_nodelay(msock.socket);
+ tcp_sock_set_nodelay(sock.socket->sk);
+ tcp_sock_set_nodelay(msock.socket->sk);
connection->data.socket = sock.socket;
connection->meta.socket = msock.socket;
@@ -1223,7 +1223,7 @@ static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, str
* quickly as possible, and let remote TCP know what we have
* received so far. */
if (err == -EAGAIN) {
- drbd_tcp_quickack(connection->data.socket);
+ tcp_sock_set_quickack(connection->data.socket->sk, 2);
drbd_unplug_all_devices(connection);
}
if (err > 0) {
@@ -4959,8 +4959,7 @@ static int receive_UnplugRemote(struct drbd_connection *connection, struct packe
{
/* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */
- drbd_tcp_quickack(connection->data.socket);
-
+ tcp_sock_set_quickack(connection->data.socket->sk, 2);
return 0;
}
@@ -6162,7 +6161,7 @@ void drbd_send_acks_wf(struct work_struct *ws)
rcu_read_unlock();
if (tcp_cork)
- drbd_tcp_cork(connection->meta.socket);
+ tcp_sock_set_cork(connection->meta.socket->sk, true);
err = drbd_finish_peer_reqs(device);
kref_put(&device->kref, drbd_destroy_device);
@@ -6175,7 +6174,7 @@ void drbd_send_acks_wf(struct work_struct *ws)
}
if (tcp_cork)
- drbd_tcp_uncork(connection->meta.socket);
+ tcp_sock_set_cork(connection->meta.socket->sk, false);
return;
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 840c3aef3c5c..c80a2f1c3c2a 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -21,24 +21,6 @@
static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
-/* Update disk stats at start of I/O request */
-static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
-{
- struct request_queue *q = device->rq_queue;
-
- generic_start_io_acct(q, bio_op(req->master_bio),
- req->i.size >> 9, &device->vdisk->part0);
-}
-
-/* Update disk stats when completing request upwards */
-static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
-{
- struct request_queue *q = device->rq_queue;
-
- generic_end_io_acct(q, bio_op(req->master_bio),
- &device->vdisk->part0, req->start_jif);
-}
-
static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
{
struct drbd_request *req;
@@ -263,7 +245,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
start_new_tl_epoch(first_peer_device(device)->connection);
/* Update disk stats */
- _drbd_end_io_acct(device, req);
+ bio_end_io_acct(req->master_bio, req->start_jif);
/* If READ failed,
* have it be pushed back to the retry work queue,
@@ -1222,16 +1204,15 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
bio_endio(bio);
return ERR_PTR(-ENOMEM);
}
- req->start_jif = start_jif;
+
+ /* Update disk stats */
+ req->start_jif = bio_start_io_acct(req->master_bio);
if (!get_ldev(device)) {
bio_put(req->private_bio);
req->private_bio = NULL;
}
- /* Update disk stats */
- _drbd_start_io_acct(device, req);
-
/* process discards always from our submitter thread */
if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
bio_op(bio) == REQ_OP_DISCARD)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 0dc019da1f8d..2b89c9f2ca70 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -2098,7 +2098,7 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
if (uncork) {
mutex_lock(&connection->data.mutex);
if (connection->data.socket)
- drbd_tcp_uncork(connection->data.socket);
+ tcp_sock_set_cork(connection->data.socket->sk, false);
mutex_unlock(&connection->data.mutex);
}
@@ -2153,9 +2153,9 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
mutex_lock(&connection->data.mutex);
if (connection->data.socket) {
if (cork)
- drbd_tcp_cork(connection->data.socket);
+ tcp_sock_set_cork(connection->data.socket->sk, true);
else if (!uncork)
- drbd_tcp_uncork(connection->data.socket);
+ tcp_sock_set_cork(connection->data.socket->sk, false);
}
mutex_unlock(&connection->data.mutex);
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index c3daa64cb52c..3e9db22db2a8 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -337,8 +337,7 @@ static bool initialized;
/*
* globals used by 'result()'
*/
-#define MAX_REPLIES 16
-static unsigned char reply_buffer[MAX_REPLIES];
+static unsigned char reply_buffer[FD_RAW_REPLY_SIZE];
static int inr; /* size of reply buffer, when called from interrupt */
#define ST0 0
#define ST1 1
@@ -595,12 +594,12 @@ static unsigned char in_sector_offset; /* offset within physical sector,
static inline unsigned char fdc_inb(int fdc, int reg)
{
- return fd_inb(fdc_state[fdc].address + reg);
+ return fd_inb(fdc_state[fdc].address, reg);
}
static inline void fdc_outb(unsigned char value, int fdc, int reg)
{
- fd_outb(value, fdc_state[fdc].address + reg);
+ fd_outb(value, fdc_state[fdc].address, reg);
}
static inline bool drive_no_geom(int drive)
@@ -668,16 +667,12 @@ static struct output_log {
static int output_log_pos;
-#define current_reqD -1
#define MAXTIMEOUT -2
static void __reschedule_timeout(int drive, const char *message)
{
unsigned long delay;
- if (drive == current_reqD)
- drive = current_drive;
-
if (drive < 0 || drive >= N_DRIVE) {
delay = 20UL * HZ;
drive = 0;
@@ -827,59 +822,70 @@ static int set_dor(int fdc, char mask, char data)
return olddor;
}
-static void twaddle(void)
+static void twaddle(int fdc, int drive)
{
- if (drive_params[current_drive].select_delay)
+ if (drive_params[drive].select_delay)
return;
- fdc_outb(fdc_state[current_fdc].dor & ~(0x10 << UNIT(current_drive)),
- current_fdc, FD_DOR);
- fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR);
- drive_state[current_drive].select_date = jiffies;
+ fdc_outb(fdc_state[fdc].dor & ~(0x10 << UNIT(drive)),
+ fdc, FD_DOR);
+ fdc_outb(fdc_state[fdc].dor, fdc, FD_DOR);
+ drive_state[drive].select_date = jiffies;
}
/*
- * Reset all driver information about the current fdc.
+ * Reset all driver information about the specified fdc.
* This is needed after a reset, and after a raw command.
*/
-static void reset_fdc_info(int mode)
+static void reset_fdc_info(int fdc, int mode)
{
int drive;
- fdc_state[current_fdc].spec1 = fdc_state[current_fdc].spec2 = -1;
- fdc_state[current_fdc].need_configure = 1;
- fdc_state[current_fdc].perp_mode = 1;
- fdc_state[current_fdc].rawcmd = 0;
+ fdc_state[fdc].spec1 = fdc_state[fdc].spec2 = -1;
+ fdc_state[fdc].need_configure = 1;
+ fdc_state[fdc].perp_mode = 1;
+ fdc_state[fdc].rawcmd = 0;
for (drive = 0; drive < N_DRIVE; drive++)
- if (FDC(drive) == current_fdc &&
+ if (FDC(drive) == fdc &&
(mode || drive_state[drive].track != NEED_1_RECAL))
drive_state[drive].track = NEED_2_RECAL;
}
-/* selects the fdc and drive, and enables the fdc's input/dma. */
+/*
+ * selects the fdc and drive, and enables the fdc's input/dma.
+ * Both current_drive and current_fdc are changed to match the new drive.
+ */
static void set_fdc(int drive)
{
- unsigned int new_fdc = current_fdc;
+ unsigned int fdc;
- if (drive >= 0 && drive < N_DRIVE) {
- new_fdc = FDC(drive);
- current_drive = drive;
+ if (drive < 0 || drive >= N_DRIVE) {
+ pr_info("bad drive value %d\n", drive);
+ return;
}
- if (new_fdc >= N_FDC) {
+
+ fdc = FDC(drive);
+ if (fdc >= N_FDC) {
pr_info("bad fdc value\n");
return;
}
- current_fdc = new_fdc;
- set_dor(current_fdc, ~0, 8);
+
+ set_dor(fdc, ~0, 8);
#if N_FDC > 1
- set_dor(1 - current_fdc, ~8, 0);
+ set_dor(1 - fdc, ~8, 0);
#endif
- if (fdc_state[current_fdc].rawcmd == 2)
- reset_fdc_info(1);
- if (fdc_inb(current_fdc, FD_STATUS) != STATUS_READY)
- fdc_state[current_fdc].reset = 1;
+ if (fdc_state[fdc].rawcmd == 2)
+ reset_fdc_info(fdc, 1);
+ if (fdc_inb(fdc, FD_STATUS) != STATUS_READY)
+ fdc_state[fdc].reset = 1;
+
+ current_drive = drive;
+ current_fdc = fdc;
}
-/* locks the driver */
+/*
+ * locks the driver.
+ * Both current_drive and current_fdc are changed to match the new drive.
+ */
static int lock_fdc(int drive)
{
if (WARN(atomic_read(&usage_count) == 0,
@@ -1062,12 +1068,9 @@ static void setup_DMA(void)
unsigned long f;
if (raw_cmd->length == 0) {
- int i;
-
- pr_info("zero dma transfer size:");
- for (i = 0; i < raw_cmd->cmd_count; i++)
- pr_cont("%x,", raw_cmd->cmd[i]);
- pr_cont("\n");
+ print_hex_dump(KERN_INFO, "zero dma transfer size: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ raw_cmd->fullcmd, raw_cmd->cmd_count, false);
cont->done(0);
fdc_state[current_fdc].reset = 1;
return;
@@ -1104,62 +1107,62 @@ static void setup_DMA(void)
#endif
}
-static void show_floppy(void);
+static void show_floppy(int fdc);
/* waits until the fdc becomes ready */
-static int wait_til_ready(void)
+static int wait_til_ready(int fdc)
{
int status;
int counter;
- if (fdc_state[current_fdc].reset)
+ if (fdc_state[fdc].reset)
return -1;
for (counter = 0; counter < 10000; counter++) {
- status = fdc_inb(current_fdc, FD_STATUS);
+ status = fdc_inb(fdc, FD_STATUS);
if (status & STATUS_READY)
return status;
}
if (initialized) {
- DPRINT("Getstatus times out (%x) on fdc %d\n", status, current_fdc);
- show_floppy();
+ DPRINT("Getstatus times out (%x) on fdc %d\n", status, fdc);
+ show_floppy(fdc);
}
- fdc_state[current_fdc].reset = 1;
+ fdc_state[fdc].reset = 1;
return -1;
}
/* sends a command byte to the fdc */
-static int output_byte(char byte)
+static int output_byte(int fdc, char byte)
{
- int status = wait_til_ready();
+ int status = wait_til_ready(fdc);
if (status < 0)
return -1;
if (is_ready_state(status)) {
- fdc_outb(byte, current_fdc, FD_DATA);
+ fdc_outb(byte, fdc, FD_DATA);
output_log[output_log_pos].data = byte;
output_log[output_log_pos].status = status;
output_log[output_log_pos].jiffies = jiffies;
output_log_pos = (output_log_pos + 1) % OLOGSIZE;
return 0;
}
- fdc_state[current_fdc].reset = 1;
+ fdc_state[fdc].reset = 1;
if (initialized) {
DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n",
- byte, current_fdc, status);
- show_floppy();
+ byte, fdc, status);
+ show_floppy(fdc);
}
return -1;
}
/* gets the response from the fdc */
-static int result(void)
+static int result(int fdc)
{
int i;
int status = 0;
- for (i = 0; i < MAX_REPLIES; i++) {
- status = wait_til_ready();
+ for (i = 0; i < FD_RAW_REPLY_SIZE; i++) {
+ status = wait_til_ready(fdc);
if (status < 0)
break;
status &= STATUS_DIR | STATUS_READY | STATUS_BUSY | STATUS_DMA;
@@ -1169,24 +1172,24 @@ static int result(void)
return i;
}
if (status == (STATUS_DIR | STATUS_READY | STATUS_BUSY))
- reply_buffer[i] = fdc_inb(current_fdc, FD_DATA);
+ reply_buffer[i] = fdc_inb(fdc, FD_DATA);
else
break;
}
if (initialized) {
DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n",
- current_fdc, status, i);
- show_floppy();
+ fdc, status, i);
+ show_floppy(fdc);
}
- fdc_state[current_fdc].reset = 1;
+ fdc_state[fdc].reset = 1;
return -1;
}
#define MORE_OUTPUT -2
/* does the fdc need more output? */
-static int need_more_output(void)
+static int need_more_output(int fdc)
{
- int status = wait_til_ready();
+ int status = wait_til_ready(fdc);
if (status < 0)
return -1;
@@ -1194,13 +1197,13 @@ static int need_more_output(void)
if (is_ready_state(status))
return MORE_OUTPUT;
- return result();
+ return result(fdc);
}
/* Set perpendicular mode as required, based on data rate, if supported.
* 82077 Now tested. 1Mbps data rate only possible with 82077-1.
*/
-static void perpendicular_mode(void)
+static void perpendicular_mode(int fdc)
{
unsigned char perp_mode;
@@ -1215,7 +1218,7 @@ static void perpendicular_mode(void)
default:
DPRINT("Invalid data rate for perpendicular mode!\n");
cont->done(0);
- fdc_state[current_fdc].reset = 1;
+ fdc_state[fdc].reset = 1;
/*
* convenient way to return to
* redo without too much hassle
@@ -1226,12 +1229,12 @@ static void perpendicular_mode(void)
} else
perp_mode = 0;
- if (fdc_state[current_fdc].perp_mode == perp_mode)
+ if (fdc_state[fdc].perp_mode == perp_mode)
return;
- if (fdc_state[current_fdc].version >= FDC_82077_ORIG) {
- output_byte(FD_PERPENDICULAR);
- output_byte(perp_mode);
- fdc_state[current_fdc].perp_mode = perp_mode;
+ if (fdc_state[fdc].version >= FDC_82077_ORIG) {
+ output_byte(fdc, FD_PERPENDICULAR);
+ output_byte(fdc, perp_mode);
+ fdc_state[fdc].perp_mode = perp_mode;
} else if (perp_mode) {
DPRINT("perpendicular mode not supported by this FDC.\n");
}
@@ -1240,16 +1243,15 @@ static void perpendicular_mode(void)
static int fifo_depth = 0xa;
static int no_fifo;
-static int fdc_configure(void)
+static int fdc_configure(int fdc)
{
/* Turn on FIFO */
- output_byte(FD_CONFIGURE);
- if (need_more_output() != MORE_OUTPUT)
+ output_byte(fdc, FD_CONFIGURE);
+ if (need_more_output(fdc) != MORE_OUTPUT)
return 0;
- output_byte(0);
- output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
- output_byte(0); /* pre-compensation from track
- 0 upwards */
+ output_byte(fdc, 0);
+ output_byte(fdc, 0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
+ output_byte(fdc, 0); /* pre-compensation from track 0 upwards */
return 1;
}
@@ -1274,7 +1276,7 @@ static int fdc_configure(void)
*
* These values are rounded up to the next highest available delay time.
*/
-static void fdc_specify(void)
+static void fdc_specify(int fdc, int drive)
{
unsigned char spec1;
unsigned char spec2;
@@ -1286,10 +1288,10 @@ static void fdc_specify(void)
int hlt_max_code = 0x7f;
int hut_max_code = 0xf;
- if (fdc_state[current_fdc].need_configure &&
- fdc_state[current_fdc].version >= FDC_82072A) {
- fdc_configure();
- fdc_state[current_fdc].need_configure = 0;
+ if (fdc_state[fdc].need_configure &&
+ fdc_state[fdc].version >= FDC_82072A) {
+ fdc_configure(fdc);
+ fdc_state[fdc].need_configure = 0;
}
switch (raw_cmd->rate & 0x03) {
@@ -1298,13 +1300,13 @@ static void fdc_specify(void)
break;
case 1:
dtr = 300;
- if (fdc_state[current_fdc].version >= FDC_82078) {
+ if (fdc_state[fdc].version >= FDC_82078) {
/* chose the default rate table, not the one
* where 1 = 2 Mbps */
- output_byte(FD_DRIVESPEC);
- if (need_more_output() == MORE_OUTPUT) {
- output_byte(UNIT(current_drive));
- output_byte(0xc0);
+ output_byte(fdc, FD_DRIVESPEC);
+ if (need_more_output(fdc) == MORE_OUTPUT) {
+ output_byte(fdc, UNIT(drive));
+ output_byte(fdc, 0xc0);
}
}
break;
@@ -1313,14 +1315,14 @@ static void fdc_specify(void)
break;
}
- if (fdc_state[current_fdc].version >= FDC_82072) {
+ if (fdc_state[fdc].version >= FDC_82072) {
scale_dtr = dtr;
hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */
hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */
}
/* Convert step rate from microseconds to milliseconds and 4 bits */
- srt = 16 - DIV_ROUND_UP(drive_params[current_drive].srt * scale_dtr / 1000,
+ srt = 16 - DIV_ROUND_UP(drive_params[drive].srt * scale_dtr / 1000,
NOMINAL_DTR);
if (slow_floppy)
srt = srt / 4;
@@ -1328,14 +1330,14 @@ static void fdc_specify(void)
SUPBOUND(srt, 0xf);
INFBOUND(srt, 0);
- hlt = DIV_ROUND_UP(drive_params[current_drive].hlt * scale_dtr / 2,
+ hlt = DIV_ROUND_UP(drive_params[drive].hlt * scale_dtr / 2,
NOMINAL_DTR);
if (hlt < 0x01)
hlt = 0x01;
else if (hlt > 0x7f)
hlt = hlt_max_code;
- hut = DIV_ROUND_UP(drive_params[current_drive].hut * scale_dtr / 16,
+ hut = DIV_ROUND_UP(drive_params[drive].hut * scale_dtr / 16,
NOMINAL_DTR);
if (hut < 0x1)
hut = 0x1;
@@ -1346,12 +1348,12 @@ static void fdc_specify(void)
spec2 = (hlt << 1) | (use_virtual_dma & 1);
/* If these parameters did not change, just return with success */
- if (fdc_state[current_fdc].spec1 != spec1 ||
- fdc_state[current_fdc].spec2 != spec2) {
+ if (fdc_state[fdc].spec1 != spec1 ||
+ fdc_state[fdc].spec2 != spec2) {
/* Go ahead and set spec1 and spec2 */
- output_byte(FD_SPECIFY);
- output_byte(fdc_state[current_fdc].spec1 = spec1);
- output_byte(fdc_state[current_fdc].spec2 = spec2);
+ output_byte(fdc, FD_SPECIFY);
+ output_byte(fdc, fdc_state[fdc].spec1 = spec1);
+ output_byte(fdc, fdc_state[fdc].spec2 = spec2);
}
} /* fdc_specify */
@@ -1513,7 +1515,7 @@ static void setup_rw_floppy(void)
r = 0;
for (i = 0; i < raw_cmd->cmd_count; i++)
- r |= output_byte(raw_cmd->cmd[i]);
+ r |= output_byte(current_fdc, raw_cmd->fullcmd[i]);
debugt(__func__, "rw_command");
@@ -1524,7 +1526,7 @@ static void setup_rw_floppy(void)
}
if (!(flags & FD_RAW_INTR)) {
- inr = result();
+ inr = result(current_fdc);
cont->interrupt();
} else if (flags & FD_RAW_NEED_DISK)
fd_watchdog();
@@ -1562,29 +1564,29 @@ static void seek_interrupt(void)
floppy_ready();
}
-static void check_wp(void)
+static void check_wp(int fdc, int drive)
{
- if (test_bit(FD_VERIFY_BIT, &drive_state[current_drive].flags)) {
+ if (test_bit(FD_VERIFY_BIT, &drive_state[drive].flags)) {
/* check write protection */
- output_byte(FD_GETSTATUS);
- output_byte(UNIT(current_drive));
- if (result() != 1) {
- fdc_state[current_fdc].reset = 1;
+ output_byte(fdc, FD_GETSTATUS);
+ output_byte(fdc, UNIT(drive));
+ if (result(fdc) != 1) {
+ fdc_state[fdc].reset = 1;
return;
}
- clear_bit(FD_VERIFY_BIT, &drive_state[current_drive].flags);
+ clear_bit(FD_VERIFY_BIT, &drive_state[drive].flags);
clear_bit(FD_NEED_TWADDLE_BIT,
- &drive_state[current_drive].flags);
- debug_dcl(drive_params[current_drive].flags,
+ &drive_state[drive].flags);
+ debug_dcl(drive_params[drive].flags,
"checking whether disk is write protected\n");
- debug_dcl(drive_params[current_drive].flags, "wp=%x\n",
+ debug_dcl(drive_params[drive].flags, "wp=%x\n",
reply_buffer[ST3] & 0x40);
if (!(reply_buffer[ST3] & 0x40))
set_bit(FD_DISK_WRITABLE_BIT,
- &drive_state[current_drive].flags);
+ &drive_state[drive].flags);
else
clear_bit(FD_DISK_WRITABLE_BIT,
- &drive_state[current_drive].flags);
+ &drive_state[drive].flags);
}
}
@@ -1628,7 +1630,7 @@ static void seek_floppy(void)
track = 1;
}
} else {
- check_wp();
+ check_wp(current_fdc, current_drive);
if (raw_cmd->track != drive_state[current_drive].track &&
(raw_cmd->flags & FD_RAW_NEED_SEEK))
track = raw_cmd->track;
@@ -1639,9 +1641,9 @@ static void seek_floppy(void)
}
do_floppy = seek_interrupt;
- output_byte(FD_SEEK);
- output_byte(UNIT(current_drive));
- if (output_byte(track) < 0) {
+ output_byte(current_fdc, FD_SEEK);
+ output_byte(current_fdc, UNIT(current_drive));
+ if (output_byte(current_fdc, track) < 0) {
reset_fdc();
return;
}
@@ -1742,14 +1744,14 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id)
do_print = !handler && print_unex && initialized;
- inr = result();
+ inr = result(current_fdc);
if (do_print)
print_result("unexpected interrupt", inr);
if (inr == 0) {
int max_sensei = 4;
do {
- output_byte(FD_SENSEI);
- inr = result();
+ output_byte(current_fdc, FD_SENSEI);
+ inr = result(current_fdc);
if (do_print)
print_result("sensei", inr);
max_sensei--;
@@ -1771,8 +1773,8 @@ static void recalibrate_floppy(void)
{
debugt(__func__, "");
do_floppy = recal_interrupt;
- output_byte(FD_RECALIBRATE);
- if (output_byte(UNIT(current_drive)) < 0)
+ output_byte(current_fdc, FD_RECALIBRATE);
+ if (output_byte(current_fdc, UNIT(current_drive)) < 0)
reset_fdc();
}
@@ -1782,7 +1784,7 @@ static void recalibrate_floppy(void)
static void reset_interrupt(void)
{
debugt(__func__, "");
- result(); /* get the status ready for set_fdc */
+ result(current_fdc); /* get the status ready for set_fdc */
if (fdc_state[current_fdc].reset) {
pr_info("reset set in interrupt, calling %ps\n", cont->error);
cont->error(); /* a reset just after a reset. BAD! */
@@ -1792,7 +1794,9 @@ static void reset_interrupt(void)
/*
* reset is done by pulling bit 2 of DOR low for a while (old FDCs),
- * or by setting the self clearing bit 7 of STATUS (newer FDCs)
+ * or by setting the self clearing bit 7 of STATUS (newer FDCs).
+ * This WILL trigger an interrupt, causing the handlers in the current
+ * cont's ->redo() to be called via reset_interrupt().
*/
static void reset_fdc(void)
{
@@ -1800,7 +1804,7 @@ static void reset_fdc(void)
do_floppy = reset_interrupt;
fdc_state[current_fdc].reset = 0;
- reset_fdc_info(0);
+ reset_fdc_info(current_fdc, 0);
/* Pseudo-DMA may intercept 'reset finished' interrupt. */
/* Irrelevant for systems with true DMA (i386). */
@@ -1819,7 +1823,7 @@ static void reset_fdc(void)
}
}
-static void show_floppy(void)
+static void show_floppy(int fdc)
{
int i;
@@ -1842,7 +1846,7 @@ static void show_floppy(void)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
reply_buffer, resultsize, true);
- pr_info("status=%x\n", fdc_inb(current_fdc, FD_STATUS));
+ pr_info("status=%x\n", fdc_inb(fdc, FD_STATUS));
pr_info("fdc_busy=%lu\n", fdc_busy);
if (do_floppy)
pr_info("do_floppy=%ps\n", do_floppy);
@@ -1868,7 +1872,7 @@ static void floppy_shutdown(struct work_struct *arg)
unsigned long flags;
if (initialized)
- show_floppy();
+ show_floppy(current_fdc);
cancel_activity();
flags = claim_dma_lock();
@@ -1934,7 +1938,7 @@ static void floppy_ready(void)
"calling disk change from floppy_ready\n");
if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
disk_change(current_drive) && !drive_params[current_drive].select_delay)
- twaddle(); /* this clears the dcl on certain
+ twaddle(current_fdc, current_drive); /* this clears the dcl on certain
* drive/controller combinations */
#ifdef fd_chose_dma_mode
@@ -1946,20 +1950,20 @@ static void floppy_ready(void)
#endif
if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)) {
- perpendicular_mode();
- fdc_specify(); /* must be done here because of hut, hlt ... */
+ perpendicular_mode(current_fdc);
+ fdc_specify(current_fdc, current_drive); /* must be done here because of hut, hlt ... */
seek_floppy();
} else {
if ((raw_cmd->flags & FD_RAW_READ) ||
(raw_cmd->flags & FD_RAW_WRITE))
- fdc_specify();
+ fdc_specify(current_fdc, current_drive);
setup_rw_floppy();
}
}
static void floppy_start(void)
{
- reschedule_timeout(current_reqD, "floppy start");
+ reschedule_timeout(current_drive, "floppy start");
scandrives();
debug_dcl(drive_params[current_drive].flags,
@@ -2004,6 +2008,9 @@ static const struct cont_t intr_cont = {
.done = (done_f)empty
};
+/* schedules handler, waiting for completion. May be interrupted, will then
+ * return -EINTR, in which case the driver will automatically be unlocked.
+ */
static int wait_til_done(void (*handler)(void), bool interruptible)
{
int ret;
@@ -2059,18 +2066,19 @@ static void success_and_wakeup(void)
* ==========================
*/
-static int next_valid_format(void)
+static int next_valid_format(int drive)
{
int probed_format;
- probed_format = drive_state[current_drive].probed_format;
+ probed_format = drive_state[drive].probed_format;
while (1) {
- if (probed_format >= 8 || !drive_params[current_drive].autodetect[probed_format]) {
- drive_state[current_drive].probed_format = 0;
+ if (probed_format >= FD_AUTODETECT_SIZE ||
+ !drive_params[drive].autodetect[probed_format]) {
+ drive_state[drive].probed_format = 0;
return 1;
}
- if (floppy_type[drive_params[current_drive].autodetect[probed_format]].sect) {
- drive_state[current_drive].probed_format = probed_format;
+ if (floppy_type[drive_params[drive].autodetect[probed_format]].sect) {
+ drive_state[drive].probed_format = probed_format;
return 0;
}
probed_format++;
@@ -2083,7 +2091,7 @@ static void bad_flp_intr(void)
if (probing) {
drive_state[current_drive].probed_format++;
- if (!next_valid_format())
+ if (!next_valid_format(current_drive))
return;
}
err_count = ++(*errors);
@@ -2843,6 +2851,9 @@ static int set_next_request(void)
return current_req != NULL;
}
+/* Starts or continues processing request. Will automatically unlock the
+ * driver at end of request.
+ */
static void redo_fd_request(void)
{
int drive;
@@ -2867,7 +2878,7 @@ do_request:
}
drive = (long)current_req->rq_disk->private_data;
set_fdc(drive);
- reschedule_timeout(current_reqD, "redo fd request");
+ reschedule_timeout(current_drive, "redo fd request");
set_floppy(drive);
raw_cmd = &default_raw_cmd;
@@ -2885,7 +2896,7 @@ do_request:
if (!_floppy) { /* Autodetection */
if (!probing) {
drive_state[current_drive].probed_format = 0;
- if (next_valid_format()) {
+ if (next_valid_format(current_drive)) {
DPRINT("no autodetectable formats\n");
_floppy = NULL;
request_done(0);
@@ -2904,7 +2915,7 @@ do_request:
}
if (test_bit(FD_NEED_TWADDLE_BIT, &drive_state[current_drive].flags))
- twaddle();
+ twaddle(current_fdc, current_drive);
schedule_bh(floppy_start);
debugt(__func__, "queue fd request");
return;
@@ -2917,6 +2928,7 @@ static const struct cont_t rw_cont = {
.done = request_done
};
+/* schedule the request and automatically unlock the driver on completion */
static void process_fd_request(void)
{
cont = &rw_cont;
@@ -2938,17 +2950,17 @@ static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx,
(unsigned long long) current_req->cmd_flags))
return BLK_STS_IOERR;
- spin_lock_irq(&floppy_lock);
- list_add_tail(&bd->rq->queuelist, &floppy_reqs);
- spin_unlock_irq(&floppy_lock);
-
if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
- return BLK_STS_OK;
+ return BLK_STS_RESOURCE;
}
+ spin_lock_irq(&floppy_lock);
+ list_add_tail(&bd->rq->queuelist, &floppy_reqs);
+ spin_unlock_irq(&floppy_lock);
+
command_status = FD_COMMAND_NONE;
__reschedule_timeout(MAXTIMEOUT, "fd_request");
set_fdc(0);
@@ -2996,6 +3008,10 @@ static const struct cont_t reset_cont = {
.done = generic_done
};
+/*
+ * Resets the FDC connected to drive <drive>.
+ * Both current_drive and current_fdc are changed to match the new drive.
+ */
static int user_reset_fdc(int drive, int arg, bool interruptible)
{
int ret;
@@ -3006,6 +3022,9 @@ static int user_reset_fdc(int drive, int arg, bool interruptible)
if (arg == FD_RESET_ALWAYS)
fdc_state[current_fdc].reset = 1;
if (fdc_state[current_fdc].reset) {
+ /* note: reset_fdc will take care of unlocking the driver
+ * on completion.
+ */
cont = &reset_cont;
ret = wait_til_done(reset_fdc, interruptible);
if (ret == -EINTR)
@@ -3059,7 +3078,7 @@ static void raw_cmd_done(int flag)
raw_cmd->flags |= FD_RAW_HARDFAILURE;
} else {
raw_cmd->reply_count = inr;
- if (raw_cmd->reply_count > MAX_REPLIES)
+ if (raw_cmd->reply_count > FD_RAW_REPLY_SIZE)
raw_cmd->reply_count = 0;
for (i = 0; i < raw_cmd->reply_count; i++)
raw_cmd->reply[i] = reply_buffer[i];
@@ -3170,18 +3189,10 @@ loop:
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
- if (ptr->cmd_count > 33)
- /* the command may now also take up the space
- * initially intended for the reply & the
- * reply count. Needed for long 82078 commands
- * such as RESTORE, which takes ... 17 command
- * bytes. Murphy's law #137: When you reserve
- * 16 bytes for a structure, you'll one day
- * discover that you really need 17...
- */
+ if (ptr->cmd_count > FD_RAW_CMD_FULLSIZE)
return -EINVAL;
- for (i = 0; i < 16; i++)
+ for (i = 0; i < FD_RAW_REPLY_SIZE; i++)
ptr->reply[i] = 0;
ptr->resultcode = 0;
@@ -3423,13 +3434,13 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static bool valid_floppy_drive_params(const short autodetect[8],
+static bool valid_floppy_drive_params(const short autodetect[FD_AUTODETECT_SIZE],
int native_format)
{
size_t floppy_type_size = ARRAY_SIZE(floppy_type);
size_t i = 0;
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < FD_AUTODETECT_SIZE; ++i) {
if (autodetect[i] < 0 ||
autodetect[i] >= floppy_type_size)
return false;
@@ -3610,7 +3621,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
case FDTWADDLE:
if (lock_fdc(drive))
return -EINTR;
- twaddle();
+ twaddle(current_fdc, current_drive);
process_fd_request();
return 0;
default:
@@ -3654,7 +3665,7 @@ struct compat_floppy_drive_params {
struct floppy_max_errors max_errors;
char flags;
char read_track;
- short autodetect[8];
+ short autodetect[FD_AUTODETECT_SIZE];
compat_int_t checkfreq;
compat_int_t native_format;
};
@@ -4298,79 +4309,79 @@ static const struct block_device_operations floppy_fops = {
/* Determine the floppy disk controller type */
/* This routine was written by David C. Niemi */
-static char __init get_fdc_version(void)
+static char __init get_fdc_version(int fdc)
{
int r;
- output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */
- if (fdc_state[current_fdc].reset)
+ output_byte(fdc, FD_DUMPREGS); /* 82072 and better know DUMPREGS */
+ if (fdc_state[fdc].reset)
return FDC_NONE;
- r = result();
+ r = result(fdc);
if (r <= 0x00)
return FDC_NONE; /* No FDC present ??? */
if ((r == 1) && (reply_buffer[0] == 0x80)) {
- pr_info("FDC %d is an 8272A\n", current_fdc);
+ pr_info("FDC %d is an 8272A\n", fdc);
return FDC_8272A; /* 8272a/765 don't know DUMPREGS */
}
if (r != 10) {
pr_info("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
- current_fdc, r);
+ fdc, r);
return FDC_UNKNOWN;
}
- if (!fdc_configure()) {
- pr_info("FDC %d is an 82072\n", current_fdc);
+ if (!fdc_configure(fdc)) {
+ pr_info("FDC %d is an 82072\n", fdc);
return FDC_82072; /* 82072 doesn't know CONFIGURE */
}
- output_byte(FD_PERPENDICULAR);
- if (need_more_output() == MORE_OUTPUT) {
- output_byte(0);
+ output_byte(fdc, FD_PERPENDICULAR);
+ if (need_more_output(fdc) == MORE_OUTPUT) {
+ output_byte(fdc, 0);
} else {
- pr_info("FDC %d is an 82072A\n", current_fdc);
+ pr_info("FDC %d is an 82072A\n", fdc);
return FDC_82072A; /* 82072A as found on Sparcs. */
}
- output_byte(FD_UNLOCK);
- r = result();
+ output_byte(fdc, FD_UNLOCK);
+ r = result(fdc);
if ((r == 1) && (reply_buffer[0] == 0x80)) {
- pr_info("FDC %d is a pre-1991 82077\n", current_fdc);
+ pr_info("FDC %d is a pre-1991 82077\n", fdc);
return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know
* LOCK/UNLOCK */
}
if ((r != 1) || (reply_buffer[0] != 0x00)) {
pr_info("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
- current_fdc, r);
+ fdc, r);
return FDC_UNKNOWN;
}
- output_byte(FD_PARTID);
- r = result();
+ output_byte(fdc, FD_PARTID);
+ r = result(fdc);
if (r != 1) {
pr_info("FDC %d init: PARTID: unexpected return of %d bytes.\n",
- current_fdc, r);
+ fdc, r);
return FDC_UNKNOWN;
}
if (reply_buffer[0] == 0x80) {
- pr_info("FDC %d is a post-1991 82077\n", current_fdc);
+ pr_info("FDC %d is a post-1991 82077\n", fdc);
return FDC_82077; /* Revised 82077AA passes all the tests */
}
switch (reply_buffer[0] >> 5) {
case 0x0:
/* Either a 82078-1 or a 82078SL running at 5Volt */
- pr_info("FDC %d is an 82078.\n", current_fdc);
+ pr_info("FDC %d is an 82078.\n", fdc);
return FDC_82078;
case 0x1:
- pr_info("FDC %d is a 44pin 82078\n", current_fdc);
+ pr_info("FDC %d is a 44pin 82078\n", fdc);
return FDC_82078;
case 0x2:
- pr_info("FDC %d is a S82078B\n", current_fdc);
+ pr_info("FDC %d is a S82078B\n", fdc);
return FDC_S82078B;
case 0x3:
- pr_info("FDC %d is a National Semiconductor PC87306\n", current_fdc);
+ pr_info("FDC %d is a National Semiconductor PC87306\n", fdc);
return FDC_87306;
default:
pr_info("FDC %d init: 82078 variant with unknown PARTID=%d.\n",
- current_fdc, reply_buffer[0] >> 5);
+ fdc, reply_buffer[0] >> 5);
return FDC_82078_UNKN;
}
} /* get_fdc_version */
@@ -4534,11 +4545,13 @@ static void floppy_device_release(struct device *dev)
static int floppy_resume(struct device *dev)
{
int fdc;
+ int saved_drive;
+ saved_drive = current_drive;
for (fdc = 0; fdc < N_FDC; fdc++)
if (fdc_state[fdc].address != -1)
- user_reset_fdc(-1, FD_RESET_ALWAYS, false);
-
+ user_reset_fdc(REVDRIVE(fdc, 0), FD_RESET_ALWAYS, false);
+ set_fdc(saved_drive);
return 0;
}
@@ -4646,16 +4659,15 @@ static int __init do_floppy_init(void)
config_types();
for (i = 0; i < N_FDC; i++) {
- current_fdc = i;
- memset(&fdc_state[current_fdc], 0, sizeof(*fdc_state));
- fdc_state[current_fdc].dtr = -1;
- fdc_state[current_fdc].dor = 0x4;
+ memset(&fdc_state[i], 0, sizeof(*fdc_state));
+ fdc_state[i].dtr = -1;
+ fdc_state[i].dor = 0x4;
#if defined(__sparc__) || defined(__mc68000__)
/*sparcs/sun3x don't have a DOR reset which we can fall back on to */
#ifdef __mc68000__
if (MACH_IS_SUN3X)
#endif
- fdc_state[current_fdc].version = FDC_82072A;
+ fdc_state[i].version = FDC_82072A;
#endif
}
@@ -4697,30 +4709,29 @@ static int __init do_floppy_init(void)
msleep(10);
for (i = 0; i < N_FDC; i++) {
- current_fdc = i;
- fdc_state[current_fdc].driver_version = FD_DRIVER_VERSION;
+ fdc_state[i].driver_version = FD_DRIVER_VERSION;
for (unit = 0; unit < 4; unit++)
- fdc_state[current_fdc].track[unit] = 0;
- if (fdc_state[current_fdc].address == -1)
+ fdc_state[i].track[unit] = 0;
+ if (fdc_state[i].address == -1)
continue;
- fdc_state[current_fdc].rawcmd = 2;
- if (user_reset_fdc(-1, FD_RESET_ALWAYS, false)) {
+ fdc_state[i].rawcmd = 2;
+ if (user_reset_fdc(REVDRIVE(i, 0), FD_RESET_ALWAYS, false)) {
/* free ioports reserved by floppy_grab_irq_and_dma() */
- floppy_release_regions(current_fdc);
- fdc_state[current_fdc].address = -1;
- fdc_state[current_fdc].version = FDC_NONE;
+ floppy_release_regions(i);
+ fdc_state[i].address = -1;
+ fdc_state[i].version = FDC_NONE;
continue;
}
/* Try to determine the floppy controller type */
- fdc_state[current_fdc].version = get_fdc_version();
- if (fdc_state[current_fdc].version == FDC_NONE) {
+ fdc_state[i].version = get_fdc_version(i);
+ if (fdc_state[i].version == FDC_NONE) {
/* free ioports reserved by floppy_grab_irq_and_dma() */
- floppy_release_regions(current_fdc);
- fdc_state[current_fdc].address = -1;
+ floppy_release_regions(i);
+ fdc_state[i].address = -1;
continue;
}
if (can_use_virtual_dma == 2 &&
- fdc_state[current_fdc].version < FDC_82072A)
+ fdc_state[i].version < FDC_82072A)
can_use_virtual_dma = 0;
have_no_fdc = 0;
@@ -4728,7 +4739,7 @@ static int __init do_floppy_init(void)
* properly, so force a reset for the standard FDC clones,
* to avoid interrupt garbage.
*/
- user_reset_fdc(-1, FD_RESET_ALWAYS, false);
+ user_reset_fdc(REVDRIVE(i, 0), FD_RESET_ALWAYS, false);
}
current_fdc = 0;
cancel_delayed_work(&fd_timeout);
@@ -4855,6 +4866,8 @@ static void floppy_release_regions(int fdc)
static int floppy_grab_irq_and_dma(void)
{
+ int fdc;
+
if (atomic_inc_return(&usage_count) > 1)
return 0;
@@ -4882,24 +4895,24 @@ static int floppy_grab_irq_and_dma(void)
}
}
- for (current_fdc = 0; current_fdc < N_FDC; current_fdc++) {
- if (fdc_state[current_fdc].address != -1) {
- if (floppy_request_regions(current_fdc))
+ for (fdc = 0; fdc < N_FDC; fdc++) {
+ if (fdc_state[fdc].address != -1) {
+ if (floppy_request_regions(fdc))
goto cleanup;
}
}
- for (current_fdc = 0; current_fdc < N_FDC; current_fdc++) {
- if (fdc_state[current_fdc].address != -1) {
- reset_fdc_info(1);
- fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR);
+ for (fdc = 0; fdc < N_FDC; fdc++) {
+ if (fdc_state[fdc].address != -1) {
+ reset_fdc_info(fdc, 1);
+ fdc_outb(fdc_state[fdc].dor, fdc, FD_DOR);
}
}
- current_fdc = 0;
+
set_dor(0, ~0, 8); /* avoid immediate interrupt */
- for (current_fdc = 0; current_fdc < N_FDC; current_fdc++)
- if (fdc_state[current_fdc].address != -1)
- fdc_outb(fdc_state[current_fdc].dor, current_fdc, FD_DOR);
+ for (fdc = 0; fdc < N_FDC; fdc++)
+ if (fdc_state[fdc].address != -1)
+ fdc_outb(fdc_state[fdc].dor, fdc, FD_DOR);
/*
* The driver will try and free resources and relies on us
* to know if they were allocated or not.
@@ -4910,15 +4923,16 @@ static int floppy_grab_irq_and_dma(void)
cleanup:
fd_free_irq();
fd_free_dma();
- while (--current_fdc >= 0)
- floppy_release_regions(current_fdc);
+ while (--fdc >= 0)
+ floppy_release_regions(fdc);
+ current_fdc = 0;
atomic_dec(&usage_count);
return -1;
}
static void floppy_release_irq_and_dma(void)
{
- int old_fdc;
+ int fdc;
#ifndef __sparc__
int drive;
#endif
@@ -4959,11 +4973,9 @@ static void floppy_release_irq_and_dma(void)
pr_info("auxiliary floppy timer still active\n");
if (work_pending(&floppy_work))
pr_info("work still pending\n");
- old_fdc = current_fdc;
- for (current_fdc = 0; current_fdc < N_FDC; current_fdc++)
- if (fdc_state[current_fdc].address != -1)
- floppy_release_regions(current_fdc);
- current_fdc = old_fdc;
+ for (fdc = 0; fdc < N_FDC; fdc++)
+ if (fdc_state[fdc].address != -1)
+ floppy_release_regions(fdc);
}
#ifdef MODULE
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index da693e6a834e..2e96d8b8758b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -228,26 +228,36 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
blk_mq_unfreeze_queue(lo->lo_queue);
}
+/**
+ * loop_validate_block_size() - validates the passed in block size
+ * @bsize: size to validate
+ */
static int
-figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
+loop_validate_block_size(unsigned short bsize)
{
- loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
- sector_t x = (sector_t)size;
- struct block_device *bdev = lo->lo_device;
+ if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
+ return -EINVAL;
- if (unlikely((loff_t)x != size))
- return -EFBIG;
- if (lo->lo_offset != offset)
- lo->lo_offset = offset;
- if (lo->lo_sizelimit != sizelimit)
- lo->lo_sizelimit = sizelimit;
- set_capacity(lo->lo_disk, x);
- bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
- /* let user-space know about the new size */
- kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
return 0;
}
+/**
+ * loop_set_size() - sets device size and notifies userspace
+ * @lo: struct loop_device to set the size for
+ * @size: new size of the loop device
+ *
+ * Callers must validate that the size passed into this function fits into
+ * a sector_t, eg using loop_validate_size()
+ */
+static void loop_set_size(struct loop_device *lo, loff_t size)
+{
+ struct block_device *bdev = lo->lo_device;
+
+ bd_set_size(bdev, size << SECTOR_SHIFT);
+
+ set_capacity_revalidate_and_notify(lo->lo_disk, size, false);
+}
+
static inline int
lo_do_transfer(struct loop_device *lo, int cmd,
struct page *rpage, unsigned roffs,
@@ -634,8 +644,8 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
static inline void loop_update_dio(struct loop_device *lo)
{
- __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) |
- lo->use_dio);
+ __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
+ lo->use_dio);
}
static void loop_reread_partitions(struct loop_device *lo,
@@ -919,7 +929,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
static int loop_kthread_worker_fn(void *worker_ptr)
{
- current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
return kthread_worker_fn(worker_ptr);
}
@@ -952,23 +962,125 @@ static void loop_update_rotational(struct loop_device *lo)
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
}
-static int loop_set_fd(struct loop_device *lo, fmode_t mode,
- struct block_device *bdev, unsigned int arg)
+static int
+loop_release_xfer(struct loop_device *lo)
+{
+ int err = 0;
+ struct loop_func_table *xfer = lo->lo_encryption;
+
+ if (xfer) {
+ if (xfer->release)
+ err = xfer->release(lo);
+ lo->transfer = NULL;
+ lo->lo_encryption = NULL;
+ module_put(xfer->owner);
+ }
+ return err;
+}
+
+static int
+loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
+ const struct loop_info64 *i)
+{
+ int err = 0;
+
+ if (xfer) {
+ struct module *owner = xfer->owner;
+
+ if (!try_module_get(owner))
+ return -EINVAL;
+ if (xfer->init)
+ err = xfer->init(lo, i);
+ if (err)
+ module_put(owner);
+ else
+ lo->lo_encryption = xfer;
+ }
+ return err;
+}
+
+/**
+ * loop_set_status_from_info - configure device from loop_info
+ * @lo: struct loop_device to configure
+ * @info: struct loop_info64 to configure the device with
+ *
+ * Configures the loop device parameters according to the passed
+ * in loop_info64 configuration.
+ */
+static int
+loop_set_status_from_info(struct loop_device *lo,
+ const struct loop_info64 *info)
+{
+ int err;
+ struct loop_func_table *xfer;
+ kuid_t uid = current_uid();
+
+ if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
+ return -EINVAL;
+
+ err = loop_release_xfer(lo);
+ if (err)
+ return err;
+
+ if (info->lo_encrypt_type) {
+ unsigned int type = info->lo_encrypt_type;
+
+ if (type >= MAX_LO_CRYPT)
+ return -EINVAL;
+ xfer = xfer_funcs[type];
+ if (xfer == NULL)
+ return -EINVAL;
+ } else
+ xfer = NULL;
+
+ err = loop_init_xfer(lo, xfer, info);
+ if (err)
+ return err;
+
+ lo->lo_offset = info->lo_offset;
+ lo->lo_sizelimit = info->lo_sizelimit;
+ memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
+ memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
+ lo->lo_file_name[LO_NAME_SIZE-1] = 0;
+ lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
+
+ if (!xfer)
+ xfer = &none_funcs;
+ lo->transfer = xfer->transfer;
+ lo->ioctl = xfer->ioctl;
+
+ lo->lo_flags = info->lo_flags;
+
+ lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
+ lo->lo_init[0] = info->lo_init[0];
+ lo->lo_init[1] = info->lo_init[1];
+ if (info->lo_encrypt_key_size) {
+ memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
+ info->lo_encrypt_key_size);
+ lo->lo_key_owner = uid;
+ }
+
+ return 0;
+}
+
+static int loop_configure(struct loop_device *lo, fmode_t mode,
+ struct block_device *bdev,
+ const struct loop_config *config)
{
struct file *file;
struct inode *inode;
struct address_space *mapping;
struct block_device *claimed_bdev = NULL;
- int lo_flags = 0;
int error;
loff_t size;
bool partscan;
+ unsigned short bsize;
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
error = -EBADF;
- file = fget(arg);
+ file = fget(config->fd);
if (!file)
goto out;
@@ -977,7 +1089,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
* here to avoid changing device under exclusive owner.
*/
if (!(mode & FMODE_EXCL)) {
- claimed_bdev = bd_start_claiming(bdev, loop_set_fd);
+ claimed_bdev = bd_start_claiming(bdev, loop_configure);
if (IS_ERR(claimed_bdev)) {
error = PTR_ERR(claimed_bdev);
goto out_putf;
@@ -999,52 +1111,58 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
mapping = file->f_mapping;
inode = mapping->host;
+ size = get_loop_size(lo, file);
+
+ if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
+ error = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (config->block_size) {
+ error = loop_validate_block_size(config->block_size);
+ if (error)
+ goto out_unlock;
+ }
+
+ error = loop_set_status_from_info(lo, &config->info);
+ if (error)
+ goto out_unlock;
+
if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
!file->f_op->write_iter)
- lo_flags |= LO_FLAGS_READ_ONLY;
+ lo->lo_flags |= LO_FLAGS_READ_ONLY;
- error = -EFBIG;
- size = get_loop_size(lo, file);
- if ((loff_t)(sector_t)size != size)
- goto out_unlock;
error = loop_prepare_queue(lo);
if (error)
goto out_unlock;
- error = 0;
-
- set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
+ set_device_ro(bdev, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->use_dio = false;
+ lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
lo->lo_device = bdev;
- lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
- lo->transfer = NULL;
- lo->ioctl = NULL;
- lo->lo_sizelimit = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
+ if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_write_cache(lo->lo_queue, true, false);
- if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) {
+ if (config->block_size)
+ bsize = config->block_size;
+ else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
/* In case of direct I/O, match underlying block size */
- unsigned short bsize = bdev_logical_block_size(
- inode->i_sb->s_bdev);
+ bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
+ else
+ bsize = 512;
- blk_queue_logical_block_size(lo->lo_queue, bsize);
- blk_queue_physical_block_size(lo->lo_queue, bsize);
- blk_queue_io_min(lo->lo_queue, bsize);
- }
+ blk_queue_logical_block_size(lo->lo_queue, bsize);
+ blk_queue_physical_block_size(lo->lo_queue, bsize);
+ blk_queue_io_min(lo->lo_queue, bsize);
loop_update_rotational(lo);
loop_update_dio(lo);
- set_capacity(lo->lo_disk, size);
- bd_set_size(bdev, size << 9);
loop_sysfs_init(lo);
- /* let user-space know about the new size */
- kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
+ loop_set_size(lo, size);
set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
block_size(inode->i_bdev) : PAGE_SIZE);
@@ -1062,14 +1180,14 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (partscan)
loop_reread_partitions(lo, bdev);
if (claimed_bdev)
- bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
+ bd_abort_claiming(bdev, claimed_bdev, loop_configure);
return 0;
out_unlock:
mutex_unlock(&loop_ctl_mutex);
out_bdev:
if (claimed_bdev)
- bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
+ bd_abort_claiming(bdev, claimed_bdev, loop_configure);
out_putf:
fput(file);
out:
@@ -1078,43 +1196,6 @@ out:
return error;
}
-static int
-loop_release_xfer(struct loop_device *lo)
-{
- int err = 0;
- struct loop_func_table *xfer = lo->lo_encryption;
-
- if (xfer) {
- if (xfer->release)
- err = xfer->release(lo);
- lo->transfer = NULL;
- lo->lo_encryption = NULL;
- module_put(xfer->owner);
- }
- return err;
-}
-
-static int
-loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
- const struct loop_info64 *i)
-{
- int err = 0;
-
- if (xfer) {
- struct module *owner = xfer->owner;
-
- if (!try_module_get(owner))
- return -EINVAL;
- if (xfer->init)
- err = xfer->init(lo, i);
- if (err)
- module_put(owner);
- else
- lo->lo_encryption = xfer;
- }
- return err;
-}
-
static int __loop_clr_fd(struct loop_device *lo, bool release)
{
struct file *filp = NULL;
@@ -1263,10 +1344,11 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
- struct loop_func_table *xfer;
- kuid_t uid = current_uid();
struct block_device *bdev;
+ kuid_t uid = current_uid();
+ int prev_lo_flags;
bool partscan = false;
+ bool size_changed = false;
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
@@ -1281,13 +1363,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
err = -ENXIO;
goto out_unlock;
}
- if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) {
- err = -EINVAL;
- goto out_unlock;
- }
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
+ size_changed = true;
sync_blockdev(lo->lo_device);
kill_bdev(lo->lo_device);
}
@@ -1295,79 +1374,44 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
- err = loop_release_xfer(lo);
- if (err)
+ if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
+ /* If any pages were dirtied after kill_bdev(), try again */
+ err = -EAGAIN;
+ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+ __func__, lo->lo_number, lo->lo_file_name,
+ lo->lo_device->bd_inode->i_mapping->nrpages);
goto out_unfreeze;
+ }
- if (info->lo_encrypt_type) {
- unsigned int type = info->lo_encrypt_type;
-
- if (type >= MAX_LO_CRYPT) {
- err = -EINVAL;
- goto out_unfreeze;
- }
- xfer = xfer_funcs[type];
- if (xfer == NULL) {
- err = -EINVAL;
- goto out_unfreeze;
- }
- } else
- xfer = NULL;
+ prev_lo_flags = lo->lo_flags;
- err = loop_init_xfer(lo, xfer, info);
+ err = loop_set_status_from_info(lo, info);
if (err)
goto out_unfreeze;
- if (lo->lo_offset != info->lo_offset ||
- lo->lo_sizelimit != info->lo_sizelimit) {
- /* kill_bdev should have truncated all the pages */
- if (lo->lo_device->bd_inode->i_mapping->nrpages) {
- err = -EAGAIN;
- pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
- __func__, lo->lo_number, lo->lo_file_name,
- lo->lo_device->bd_inode->i_mapping->nrpages);
- goto out_unfreeze;
- }
- if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
- err = -EFBIG;
- goto out_unfreeze;
- }
+ /* Mask out flags that can't be set using LOOP_SET_STATUS. */
+ lo->lo_flags &= ~LOOP_SET_STATUS_SETTABLE_FLAGS;
+ /* For those flags, use the previous values instead */
+ lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
+ /* For flags that can't be cleared, use previous values too */
+ lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
+
+ if (size_changed) {
+ loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
+ lo->lo_backing_file);
+ loop_set_size(lo, new_size);
}
loop_config_discard(lo);
- memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
- memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
- lo->lo_file_name[LO_NAME_SIZE-1] = 0;
- lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
-
- if (!xfer)
- xfer = &none_funcs;
- lo->transfer = xfer->transfer;
- lo->ioctl = xfer->ioctl;
-
- if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
- (info->lo_flags & LO_FLAGS_AUTOCLEAR))
- lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
-
- lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
- lo->lo_init[0] = info->lo_init[0];
- lo->lo_init[1] = info->lo_init[1];
- if (info->lo_encrypt_key_size) {
- memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
- lo->lo_key_owner = uid;
- }
-
/* update dio if lo_offset or transfer is changed */
__loop_update_dio(lo, lo->use_dio);
out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue);
- if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
- !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
- lo->lo_flags |= LO_FLAGS_PARTSCAN;
+ if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
+ !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
bdev = lo->lo_device;
partscan = true;
@@ -1531,10 +1575,15 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
static int loop_set_capacity(struct loop_device *lo)
{
+ loff_t size;
+
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
- return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
+ size = get_loop_size(lo, lo->lo_backing_file);
+ loop_set_size(lo, size);
+
+ return 0;
}
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
@@ -1558,8 +1607,9 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
if (lo->lo_state != Lo_bound)
return -ENXIO;
- if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
- return -EINVAL;
+ err = loop_validate_block_size(arg);
+ if (err)
+ return err;
if (lo->lo_queue->limits.logical_block_size == arg)
return 0;
@@ -1617,11 +1667,31 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct loop_device *lo = bdev->bd_disk->private_data;
+ void __user *argp = (void __user *) arg;
int err;
switch (cmd) {
- case LOOP_SET_FD:
- return loop_set_fd(lo, mode, bdev, arg);
+ case LOOP_SET_FD: {
+ /*
+ * Legacy case - pass in a zeroed out struct loop_config with
+ * only the file descriptor set , which corresponds with the
+ * default parameters we'd have used otherwise.
+ */
+ struct loop_config config;
+
+ memset(&config, 0, sizeof(config));
+ config.fd = arg;
+
+ return loop_configure(lo, mode, bdev, &config);
+ }
+ case LOOP_CONFIGURE: {
+ struct loop_config config;
+
+ if (copy_from_user(&config, argp, sizeof(config)))
+ return -EFAULT;
+
+ return loop_configure(lo, mode, bdev, &config);
+ }
case LOOP_CHANGE_FD:
return loop_change_fd(lo, bdev, arg);
case LOOP_CLR_FD:
@@ -1629,21 +1699,19 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_SET_STATUS:
err = -EPERM;
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
- err = loop_set_status_old(lo,
- (struct loop_info __user *)arg);
+ err = loop_set_status_old(lo, argp);
}
break;
case LOOP_GET_STATUS:
- return loop_get_status_old(lo, (struct loop_info __user *) arg);
+ return loop_get_status_old(lo, argp);
case LOOP_SET_STATUS64:
err = -EPERM;
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
- err = loop_set_status64(lo,
- (struct loop_info64 __user *) arg);
+ err = loop_set_status64(lo, argp);
}
break;
case LOOP_GET_STATUS64:
- return loop_get_status64(lo, (struct loop_info64 __user *) arg);
+ return loop_get_status64(lo, argp);
case LOOP_SET_CAPACITY:
case LOOP_SET_DIRECT_IO:
case LOOP_SET_BLOCK_SIZE:
@@ -1795,6 +1863,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_CLR_FD:
case LOOP_GET_STATUS64:
case LOOP_SET_STATUS64:
+ case LOOP_CONFIGURE:
arg = (unsigned long) compat_ptr(arg);
/* fall through */
case LOOP_SET_FD:
@@ -2037,7 +2106,7 @@ static int loop_add(struct loop_device **l, int i)
lo->tag_set.queue_depth = 128;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
- lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
lo->tag_set.driver_data = lo;
err = blk_mq_alloc_tag_set(&lo->tag_set);
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index ce9e33603a4d..87b31f9ca362 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1250,8 +1250,34 @@ static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
return errno_to_blk_status(err);
}
+static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ struct bio *bio;
+
+ if (dev->memory_backed)
+ return;
+
+ if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) {
+ zero_fill_bio(cmd->bio);
+ } else if (req_op(cmd->rq) == REQ_OP_READ) {
+ __rq_for_each_bio(bio, cmd->rq)
+ zero_fill_bio(bio);
+ }
+}
+
static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
{
+ /*
+ * Since root privileges are required to configure the null_blk
+ * driver, it is fine that this driver does not initialize the
+ * data buffers of read commands. Zero-initialize these buffers
+ * anyway if KMSAN is enabled to prevent that KMSAN complains
+ * about null_blk not initializing read data buffers.
+ */
+ if (IS_ENABLED(CONFIG_KMSAN))
+ nullb_zero_read_cmd_buffer(cmd);
+
/* Complete IO by inline, softirq or timer */
switch (cmd->nq->dev->irqmode) {
case NULL_IRQ_SOFTIRQ:
@@ -1397,7 +1423,7 @@ static bool should_requeue_request(struct request *rq)
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
pr_info("rq %p timed out\n", rq);
- blk_mq_complete_request(rq);
+ blk_mq_force_complete_rq(rq);
return BLK_EH_DONE;
}
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index ed5458f2d367..cc47606d8ffe 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -74,13 +74,20 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
int null_register_zoned_dev(struct nullb *nullb)
{
+ struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
- if (queue_is_mq(q))
- return blk_revalidate_disk_zones(nullb->disk);
+ if (queue_is_mq(q)) {
+ int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
+
+ if (ret)
+ return ret;
+ } else {
+ blk_queue_chunk_sectors(q, dev->zone_size_sects);
+ q->nr_zones = blkdev_nr_zones(nullb->disk);
+ }
- blk_queue_chunk_sectors(q, nullb->dev->zone_size_sects);
- q->nr_zones = blkdev_nr_zones(nullb->disk);
+ blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
return 0;
}
@@ -142,7 +149,7 @@ size_t null_zone_valid_read_len(struct nullb *nullb,
}
static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
- unsigned int nr_sectors)
+ unsigned int nr_sectors, bool append)
{
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
@@ -162,9 +169,21 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
case BLK_ZONE_COND_CLOSED:
- /* Writes must be at the write pointer position */
- if (sector != zone->wp)
+ /*
+ * Regular writes must be at the write pointer position.
+ * Zone append writes are automatically issued at the write
+ * pointer and the position returned using the request or BIO
+ * sector.
+ */
+ if (append) {
+ sector = zone->wp;
+ if (cmd->bio)
+ cmd->bio->bi_iter.bi_sector = sector;
+ else
+ cmd->rq->__sector = sector;
+ } else if (sector != zone->wp) {
return BLK_STS_IOERR;
+ }
if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
@@ -246,7 +265,9 @@ blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
{
switch (op) {
case REQ_OP_WRITE:
- return null_zone_write(cmd, sector, nr_sectors);
+ return null_zone_write(cmd, sector, nr_sectors, false);
+ case REQ_OP_ZONE_APPEND:
+ return null_zone_write(cmd, sector, nr_sectors, true);
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index cda5cf917e9a..5124eca90e83 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -1032,7 +1032,7 @@ static int __init pcd_init(void)
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (cd->present) {
- register_cdrom(&cd->info);
+ register_cdrom(cd->disk, &cd->info);
cd->disk->private_data = cd;
add_disk(cd->disk);
}
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index c5c6487a19d5..7b55811c2a81 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -454,7 +454,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
queue->queuedata = dev;
blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
- blk_queue_segment_boundary(queue, -1UL);
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 67d65ac785e9..7420648a1de6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -836,6 +836,7 @@ enum {
Opt_lock_timeout,
/* int args above */
Opt_pool_ns,
+ Opt_compression_hint,
/* string args above */
Opt_read_only,
Opt_read_write,
@@ -844,8 +845,23 @@ enum {
Opt_notrim,
};
+enum {
+ Opt_compression_hint_none,
+ Opt_compression_hint_compressible,
+ Opt_compression_hint_incompressible,
+};
+
+static const struct constant_table rbd_param_compression_hint[] = {
+ {"none", Opt_compression_hint_none},
+ {"compressible", Opt_compression_hint_compressible},
+ {"incompressible", Opt_compression_hint_incompressible},
+ {}
+};
+
static const struct fs_parameter_spec rbd_parameters[] = {
fsparam_u32 ("alloc_size", Opt_alloc_size),
+ fsparam_enum ("compression_hint", Opt_compression_hint,
+ rbd_param_compression_hint),
fsparam_flag ("exclusive", Opt_exclusive),
fsparam_flag ("lock_on_read", Opt_lock_on_read),
fsparam_u32 ("lock_timeout", Opt_lock_timeout),
@@ -867,6 +883,8 @@ struct rbd_options {
bool lock_on_read;
bool exclusive;
bool trim;
+
+ u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
};
#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
@@ -2253,7 +2271,8 @@ static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
osd_req_op_alloc_hint_init(osd_req, which++,
rbd_dev->layout.object_size,
- rbd_dev->layout.object_size);
+ rbd_dev->layout.object_size,
+ rbd_dev->opts->alloc_hint_flags);
}
if (rbd_obj_is_entire(obj_req))
@@ -6331,6 +6350,29 @@ static int rbd_parse_param(struct fs_parameter *param,
pctx->spec->pool_ns = param->string;
param->string = NULL;
break;
+ case Opt_compression_hint:
+ switch (result.uint_32) {
+ case Opt_compression_hint_none:
+ opt->alloc_hint_flags &=
+ ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
+ CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
+ break;
+ case Opt_compression_hint_compressible:
+ opt->alloc_hint_flags |=
+ CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
+ opt->alloc_hint_flags &=
+ ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
+ break;
+ case Opt_compression_hint_incompressible:
+ opt->alloc_hint_flags |=
+ CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
+ opt->alloc_hint_flags &=
+ ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
+ break;
+ default:
+ BUG();
+ }
+ break;
case Opt_read_only:
opt->read_only = true;
break;
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
index ac98ab6ccd3b..a600e0eb6b6f 100644
--- a/drivers/block/rbd_types.h
+++ b/drivers/block/rbd_types.h
@@ -93,7 +93,7 @@ struct rbd_image_header_ondisk {
__le32 snap_count;
__le32 reserved;
__le64 snap_names_len;
- struct rbd_image_snap_ondisk snaps[0];
+ struct rbd_image_snap_ondisk snaps[];
} __attribute__((packed));
diff --git a/drivers/block/rnbd/Kconfig b/drivers/block/rnbd/Kconfig
new file mode 100644
index 000000000000..4b6d3d816d1f
--- /dev/null
+++ b/drivers/block/rnbd/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config BLK_DEV_RNBD
+ bool
+
+config BLK_DEV_RNBD_CLIENT
+ tristate "RDMA Network Block Device driver client"
+ depends on INFINIBAND_RTRS_CLIENT
+ select BLK_DEV_RNBD
+ help
+ RNBD client is a network block device driver using rdma transport.
+
+ RNBD client allows for mapping of a remote block devices over
+ RTRS protocol from a target system where RNBD server is running.
+
+ If unsure, say N.
+
+config BLK_DEV_RNBD_SERVER
+ tristate "RDMA Network Block Device driver server"
+ depends on INFINIBAND_RTRS_SERVER
+ select BLK_DEV_RNBD
+ help
+ RNBD server is the server side of RNBD using rdma transport.
+
+ RNBD server allows for exporting local block devices to a remote client
+ over RTRS protocol.
+
+ If unsure, say N.
diff --git a/drivers/block/rnbd/Makefile b/drivers/block/rnbd/Makefile
new file mode 100644
index 000000000000..5bb1a7ad1ada
--- /dev/null
+++ b/drivers/block/rnbd/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+ccflags-y := -I$(srctree)/drivers/infiniband/ulp/rtrs
+
+rnbd-client-y := rnbd-clt.o \
+ rnbd-clt-sysfs.o \
+ rnbd-common.o
+
+rnbd-server-y := rnbd-common.o \
+ rnbd-srv.o \
+ rnbd-srv-dev.o \
+ rnbd-srv-sysfs.o
+
+obj-$(CONFIG_BLK_DEV_RNBD_CLIENT) += rnbd-client.o
+obj-$(CONFIG_BLK_DEV_RNBD_SERVER) += rnbd-server.o
diff --git a/drivers/block/rnbd/README b/drivers/block/rnbd/README
new file mode 100644
index 000000000000..1773c0aa0bd4
--- /dev/null
+++ b/drivers/block/rnbd/README
@@ -0,0 +1,92 @@
+********************************
+RDMA Network Block Device (RNBD)
+********************************
+
+Introduction
+------------
+
+RNBD (RDMA Network Block Device) is a pair of kernel modules
+(client and server) that allow for remote access of a block device on
+the server over RTRS protocol using the RDMA (InfiniBand, RoCE, iWARP)
+transport. After being mapped, the remote block devices can be accessed
+on the client side as local block devices.
+
+I/O is transferred between client and server by the RTRS transport
+modules. The administration of RNBD and RTRS modules is done via
+sysfs entries.
+
+Requirements
+------------
+
+ RTRS kernel modules
+
+Quick Start
+-----------
+
+Server side:
+ # modprobe rnbd_server
+
+Client side:
+ # modprobe rnbd_client
+ # echo "sessname=blya path=ip:10.50.100.66 device_path=/dev/ram0" > \
+ /sys/devices/virtual/rnbd-client/ctl/map_device
+
+ Where "sessname=" is a session name, a string to identify the session
+ on client and on server sides; "path=" is a destination IP address or
+ a pair of a source and a destination IPs, separated by comma. Multiple
+ "path=" options can be specified in order to use multipath (see RTRS
+ description for details); "device_path=" is the block device to be
+ mapped from the server side. After the session to the server machine is
+ established, the mapped device will appear on the client side under
+ /dev/rnbd<N>.
+
+
+RNBD-Server Module Parameters
+=============================
+
+dev_search_path
+---------------
+
+When a device is mapped from the client, the server generates the path
+to the block device on the server side by concatenating dev_search_path
+and the "device_path" that was specified in the map_device operation.
+
+The default dev_search_path is: "/".
+
+dev_search_path option can also contain %SESSNAME% in order to provide
+different device namespaces for different sessions. See "device_path"
+option for details.
+
+============================
+Protocol (rnbd/rnbd-proto.h)
+============================
+
+1. Before mapping first device from a given server, client sends an
+RNBD_MSG_SESS_INFO to the server. Server responds with
+RNBD_MSG_SESS_INFO_RSP. Currently the messages only contain the protocol
+version for backward compatibility.
+
+2. Client requests to open a device by sending RNBD_MSG_OPEN message. This
+contains the path to the device and access mode (read-only or writable).
+Server responds to the message with RNBD_MSG_OPEN_RSP. This contains
+a 32 bit device id to be used for IOs and device "geometry" related
+information: side, max_hw_sectors, etc.
+
+3. Client attaches RNBD_MSG_IO to each IO message send to a device. This
+message contains device id, provided by server in his rnbd_msg_open_rsp,
+sector to be accessed, read-write flags and bi_size.
+
+4. Client closes a device by sending RNBD_MSG_CLOSE which contains only the
+device id provided by the server.
+
+=========================================
+Contributors List(in alphabetical order)
+=========================================
+Danil Kipnis <danil.kipnis@profitbricks.com>
+Fabian Holler <mail@fholler.de>
+Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
+Jack Wang <jinpu.wang@profitbricks.com>
+Kleber Souza <kleber.souza@profitbricks.com>
+Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
+Milind Dumbare <Milind.dumbare@gmail.com>
+Roman Penyaev <roman.penyaev@profitbricks.com>
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
new file mode 100644
index 000000000000..4f4474eecadb
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/parser.h>
+#include <linux/module.h>
+#include <linux/in6.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <rdma/ib.h>
+#include <rdma/rdma_cm.h>
+
+#include "rnbd-clt.h"
+
+static struct device *rnbd_dev;
+static struct class *rnbd_dev_class;
+static struct kobject *rnbd_devs_kobj;
+
+enum {
+ RNBD_OPT_ERR = 0,
+ RNBD_OPT_DEST_PORT = 1 << 0,
+ RNBD_OPT_PATH = 1 << 1,
+ RNBD_OPT_DEV_PATH = 1 << 2,
+ RNBD_OPT_ACCESS_MODE = 1 << 3,
+ RNBD_OPT_SESSNAME = 1 << 6,
+};
+
+static const unsigned int rnbd_opt_mandatory[] = {
+ RNBD_OPT_PATH,
+ RNBD_OPT_DEV_PATH,
+ RNBD_OPT_SESSNAME,
+};
+
+static const match_table_t rnbd_opt_tokens = {
+ {RNBD_OPT_PATH, "path=%s" },
+ {RNBD_OPT_DEV_PATH, "device_path=%s"},
+ {RNBD_OPT_DEST_PORT, "dest_port=%d" },
+ {RNBD_OPT_ACCESS_MODE, "access_mode=%s"},
+ {RNBD_OPT_SESSNAME, "sessname=%s" },
+ {RNBD_OPT_ERR, NULL },
+};
+
+struct rnbd_map_options {
+ char *sessname;
+ struct rtrs_addr *paths;
+ size_t *path_cnt;
+ char *pathname;
+ u16 *dest_port;
+ enum rnbd_access_mode *access_mode;
+};
+
+static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt,
+ struct rnbd_map_options *opt)
+{
+ char *options, *sep_opt;
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ int opt_mask = 0;
+ int token;
+ int ret = -EINVAL;
+ int i, dest_port;
+ int p_cnt = 0;
+
+ options = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ sep_opt = strstrip(options);
+ while ((p = strsep(&sep_opt, " ")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, rnbd_opt_tokens, args);
+ opt_mask |= token;
+
+ switch (token) {
+ case RNBD_OPT_SESSNAME:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) > NAME_MAX) {
+ pr_err("map_device: sessname too long\n");
+ ret = -EINVAL;
+ kfree(p);
+ goto out;
+ }
+ strlcpy(opt->sessname, p, NAME_MAX);
+ kfree(p);
+ break;
+
+ case RNBD_OPT_PATH:
+ if (p_cnt >= max_path_cnt) {
+ pr_err("map_device: too many (> %zu) paths provided\n",
+ max_path_cnt);
+ ret = -ENOMEM;
+ goto out;
+ }
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = rtrs_addr_to_sockaddr(p, strlen(p),
+ *opt->dest_port,
+ &opt->paths[p_cnt]);
+ if (ret) {
+ pr_err("Can't parse path %s: %d\n", p, ret);
+ kfree(p);
+ goto out;
+ }
+
+ p_cnt++;
+
+ kfree(p);
+ break;
+
+ case RNBD_OPT_DEV_PATH:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) > NAME_MAX) {
+ pr_err("map_device: Device path too long\n");
+ ret = -EINVAL;
+ kfree(p);
+ goto out;
+ }
+ strlcpy(opt->pathname, p, NAME_MAX);
+ kfree(p);
+ break;
+
+ case RNBD_OPT_DEST_PORT:
+ if (match_int(args, &dest_port) || dest_port < 0 ||
+ dest_port > 65535) {
+ pr_err("bad destination port number parameter '%d'\n",
+ dest_port);
+ ret = -EINVAL;
+ goto out;
+ }
+ *opt->dest_port = dest_port;
+ break;
+
+ case RNBD_OPT_ACCESS_MODE:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!strcmp(p, "ro")) {
+ *opt->access_mode = RNBD_ACCESS_RO;
+ } else if (!strcmp(p, "rw")) {
+ *opt->access_mode = RNBD_ACCESS_RW;
+ } else if (!strcmp(p, "migration")) {
+ *opt->access_mode = RNBD_ACCESS_MIGRATION;
+ } else {
+ pr_err("map_device: Invalid access_mode: '%s'\n",
+ p);
+ ret = -EINVAL;
+ kfree(p);
+ goto out;
+ }
+
+ kfree(p);
+ break;
+
+ default:
+ pr_err("map_device: Unknown parameter or missing value '%s'\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rnbd_opt_mandatory); i++) {
+ if ((opt_mask & rnbd_opt_mandatory[i])) {
+ ret = 0;
+ } else {
+ pr_err("map_device: Parameters missing\n");
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ *opt->path_cnt = p_cnt;
+ kfree(options);
+ return ret;
+}
+
+static ssize_t state_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ switch (dev->dev_state) {
+ case DEV_STATE_INIT:
+ return snprintf(page, PAGE_SIZE, "init\n");
+ case DEV_STATE_MAPPED:
+ /* TODO fix cli tool before changing to proper state */
+ return snprintf(page, PAGE_SIZE, "open\n");
+ case DEV_STATE_MAPPED_DISCONNECTED:
+ /* TODO fix cli tool before changing to proper state */
+ return snprintf(page, PAGE_SIZE, "closed\n");
+ case DEV_STATE_UNMAPPED:
+ return snprintf(page, PAGE_SIZE, "unmapped\n");
+ default:
+ return snprintf(page, PAGE_SIZE, "unknown\n");
+ }
+}
+
+static struct kobj_attribute rnbd_clt_state_attr = __ATTR_RO(state);
+
+static ssize_t mapping_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", dev->pathname);
+}
+
+static struct kobj_attribute rnbd_clt_mapping_path_attr =
+ __ATTR_RO(mapping_path);
+
+static ssize_t access_mode_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ rnbd_access_mode_str(dev->access_mode));
+}
+
+static struct kobj_attribute rnbd_clt_access_mode =
+ __ATTR_RO(access_mode);
+
+static ssize_t rnbd_clt_unmap_dev_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo <normal|force> > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rnbd_clt_unmap_dev_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rnbd_clt_dev *dev;
+ char *opt, *options;
+ bool force;
+ int err;
+
+ opt = kstrdup(buf, GFP_KERNEL);
+ if (!opt)
+ return -ENOMEM;
+
+ options = strstrip(opt);
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+ if (sysfs_streq(options, "normal")) {
+ force = false;
+ } else if (sysfs_streq(options, "force")) {
+ force = true;
+ } else {
+ rnbd_clt_err(dev,
+ "unmap_device: Invalid value: %s\n",
+ options);
+ err = -EINVAL;
+ goto out;
+ }
+
+ rnbd_clt_info(dev, "Unmapping device, option: %s.\n",
+ force ? "force" : "normal");
+
+ /*
+ * We take explicit module reference only for one reason: do not
+ * race with lockless rnbd_destroy_sessions().
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ err = -ENODEV;
+ goto out;
+ }
+ err = rnbd_clt_unmap_device(dev, force, &attr->attr);
+ if (err) {
+ if (err != -EALREADY)
+ rnbd_clt_err(dev, "unmap_device: %d\n", err);
+ goto module_put;
+ }
+
+ /*
+ * Here device can be vanished!
+ */
+
+ err = count;
+
+module_put:
+ module_put(THIS_MODULE);
+out:
+ kfree(opt);
+
+ return err;
+}
+
+static struct kobj_attribute rnbd_clt_unmap_device_attr =
+ __ATTR(unmap_device, 0644, rnbd_clt_unmap_dev_show,
+ rnbd_clt_unmap_dev_store);
+
+static ssize_t rnbd_clt_resize_dev_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE,
+ "Usage: echo <new size in sectors> > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rnbd_clt_resize_dev_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long sectors;
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ ret = kstrtoul(buf, 0, &sectors);
+ if (ret)
+ return ret;
+
+ ret = rnbd_clt_resize_disk(dev, (size_t)sectors);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rnbd_clt_resize_dev_attr =
+ __ATTR(resize, 0644, rnbd_clt_resize_dev_show,
+ rnbd_clt_resize_dev_store);
+
+static ssize_t rnbd_clt_remap_dev_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo <1> > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rnbd_clt_remap_dev_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rnbd_clt_dev *dev;
+ char *opt, *options;
+ int err;
+
+ opt = kstrdup(buf, GFP_KERNEL);
+ if (!opt)
+ return -ENOMEM;
+
+ options = strstrip(opt);
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+ if (!sysfs_streq(options, "1")) {
+ rnbd_clt_err(dev,
+ "remap_device: Invalid value: %s\n",
+ options);
+ err = -EINVAL;
+ goto out;
+ }
+ err = rnbd_clt_remap_device(dev);
+ if (likely(!err))
+ err = count;
+
+out:
+ kfree(opt);
+
+ return err;
+}
+
+static struct kobj_attribute rnbd_clt_remap_device_attr =
+ __ATTR(remap_device, 0644, rnbd_clt_remap_dev_show,
+ rnbd_clt_remap_dev_store);
+
+static ssize_t session_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *page)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", dev->sess->sessname);
+}
+
+static struct kobj_attribute rnbd_clt_session_attr =
+ __ATTR_RO(session);
+
+static struct attribute *rnbd_dev_attrs[] = {
+ &rnbd_clt_unmap_device_attr.attr,
+ &rnbd_clt_resize_dev_attr.attr,
+ &rnbd_clt_remap_device_attr.attr,
+ &rnbd_clt_mapping_path_attr.attr,
+ &rnbd_clt_state_attr.attr,
+ &rnbd_clt_session_attr.attr,
+ &rnbd_clt_access_mode.attr,
+ NULL,
+};
+
+void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
+{
+ /*
+ * The module unload rnbd_client_exit path is racing with unmapping of
+ * the last single device from the sysfs manually
+ * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
+ * of sysfs link already was removed already.
+ */
+ if (strlen(dev->blk_symlink_name) && try_module_get(THIS_MODULE)) {
+ sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
+ module_put(THIS_MODULE);
+ }
+}
+
+static struct kobj_type rnbd_dev_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = rnbd_dev_attrs,
+};
+
+static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev)
+{
+ int ret;
+ struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
+
+ ret = kobject_init_and_add(&dev->kobj, &rnbd_dev_ktype, gd_kobj, "%s",
+ "rnbd");
+ if (ret)
+ rnbd_clt_err(dev, "Failed to create device sysfs dir, err: %d\n",
+ ret);
+
+ return ret;
+}
+
+static ssize_t rnbd_clt_map_device_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE,
+ "Usage: echo \"[dest_port=server port number] sessname=<name of the rtrs session> path=<[srcaddr@]dstaddr> [path=<[srcaddr@]dstaddr>] device_path=<full path on remote side> [access_mode=<ro|rw|migration>]\" > %s\n\naddr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]\n",
+ attr->attr.name);
+}
+
+static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
+ size_t len)
+{
+ int ret;
+ char pathname[NAME_MAX], *s;
+
+ strlcpy(pathname, dev->pathname, sizeof(pathname));
+ while ((s = strchr(pathname, '/')))
+ s[0] = '!';
+
+ ret = snprintf(buf, len, "%s", pathname);
+ if (ret >= len)
+ return -ENAMETOOLONG;
+
+ return 0;
+}
+
+static int rnbd_clt_add_dev_symlink(struct rnbd_clt_dev *dev)
+{
+ struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
+ int ret;
+
+ ret = rnbd_clt_get_path_name(dev, dev->blk_symlink_name,
+ sizeof(dev->blk_symlink_name));
+ if (ret) {
+ rnbd_clt_err(dev, "Failed to get /sys/block symlink path, err: %d\n",
+ ret);
+ goto out_err;
+ }
+
+ ret = sysfs_create_link(rnbd_devs_kobj, gd_kobj,
+ dev->blk_symlink_name);
+ if (ret) {
+ rnbd_clt_err(dev, "Creating /sys/block symlink failed, err: %d\n",
+ ret);
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ dev->blk_symlink_name[0] = '\0';
+ return ret;
+}
+
+static ssize_t rnbd_clt_map_device_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rnbd_clt_dev *dev;
+ struct rnbd_map_options opt;
+ int ret;
+ char pathname[NAME_MAX];
+ char sessname[NAME_MAX];
+ enum rnbd_access_mode access_mode = RNBD_ACCESS_RW;
+ u16 port_nr = RTRS_PORT;
+
+ struct sockaddr_storage *addrs;
+ struct rtrs_addr paths[6];
+ size_t path_cnt;
+
+ opt.sessname = sessname;
+ opt.paths = paths;
+ opt.path_cnt = &path_cnt;
+ opt.pathname = pathname;
+ opt.dest_port = &port_nr;
+ opt.access_mode = &access_mode;
+ addrs = kcalloc(ARRAY_SIZE(paths) * 2, sizeof(*addrs), GFP_KERNEL);
+ if (!addrs)
+ return -ENOMEM;
+
+ for (path_cnt = 0; path_cnt < ARRAY_SIZE(paths); path_cnt++) {
+ paths[path_cnt].src = &addrs[path_cnt * 2];
+ paths[path_cnt].dst = &addrs[path_cnt * 2 + 1];
+ }
+
+ ret = rnbd_clt_parse_map_options(buf, ARRAY_SIZE(paths), &opt);
+ if (ret)
+ goto out;
+
+ pr_info("Mapping device %s on session %s, (access_mode: %s)\n",
+ pathname, sessname,
+ rnbd_access_mode_str(access_mode));
+
+ dev = rnbd_clt_map_device(sessname, paths, path_cnt, port_nr, pathname,
+ access_mode);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto out;
+ }
+
+ ret = rnbd_clt_add_dev_kobj(dev);
+ if (ret)
+ goto unmap_dev;
+
+ ret = rnbd_clt_add_dev_symlink(dev);
+ if (ret)
+ goto unmap_dev;
+
+ kfree(addrs);
+ return count;
+
+unmap_dev:
+ rnbd_clt_unmap_device(dev, true, NULL);
+out:
+ kfree(addrs);
+ return ret;
+}
+
+static struct kobj_attribute rnbd_clt_map_device_attr =
+ __ATTR(map_device, 0644,
+ rnbd_clt_map_device_show, rnbd_clt_map_device_store);
+
+static struct attribute *default_attrs[] = {
+ &rnbd_clt_map_device_attr.attr,
+ NULL,
+};
+
+static struct attribute_group default_attr_group = {
+ .attrs = default_attrs,
+};
+
+static const struct attribute_group *default_attr_groups[] = {
+ &default_attr_group,
+ NULL,
+};
+
+int rnbd_clt_create_sysfs_files(void)
+{
+ int err;
+
+ rnbd_dev_class = class_create(THIS_MODULE, "rnbd-client");
+ if (IS_ERR(rnbd_dev_class))
+ return PTR_ERR(rnbd_dev_class);
+
+ rnbd_dev = device_create_with_groups(rnbd_dev_class, NULL,
+ MKDEV(0, 0), NULL,
+ default_attr_groups, "ctl");
+ if (IS_ERR(rnbd_dev)) {
+ err = PTR_ERR(rnbd_dev);
+ goto cls_destroy;
+ }
+ rnbd_devs_kobj = kobject_create_and_add("devices", &rnbd_dev->kobj);
+ if (!rnbd_devs_kobj) {
+ err = -ENOMEM;
+ goto dev_destroy;
+ }
+
+ return 0;
+
+dev_destroy:
+ device_destroy(rnbd_dev_class, MKDEV(0, 0));
+cls_destroy:
+ class_destroy(rnbd_dev_class);
+
+ return err;
+}
+
+void rnbd_clt_destroy_default_group(void)
+{
+ sysfs_remove_group(&rnbd_dev->kobj, &default_attr_group);
+}
+
+void rnbd_clt_destroy_sysfs_files(void)
+{
+ kobject_del(rnbd_devs_kobj);
+ kobject_put(rnbd_devs_kobj);
+ device_destroy(rnbd_dev_class, MKDEV(0, 0));
+ class_destroy(rnbd_dev_class);
+}
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
new file mode 100644
index 000000000000..cc6a4e2587ae
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -0,0 +1,1729 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/scatterlist.h>
+#include <linux/idr.h>
+
+#include "rnbd-clt.h"
+
+MODULE_DESCRIPTION("RDMA Network Block Device Client");
+MODULE_LICENSE("GPL");
+
+static int rnbd_client_major;
+static DEFINE_IDA(index_ida);
+static DEFINE_MUTEX(ida_lock);
+static DEFINE_MUTEX(sess_lock);
+static LIST_HEAD(sess_list);
+
+/*
+ * Maximum number of partitions an instance can have.
+ * 6 bits = 64 minors = 63 partitions (one minor is used for the device itself)
+ */
+#define RNBD_PART_BITS 6
+
+static inline bool rnbd_clt_get_sess(struct rnbd_clt_session *sess)
+{
+ return refcount_inc_not_zero(&sess->refcount);
+}
+
+static void free_sess(struct rnbd_clt_session *sess);
+
+static void rnbd_clt_put_sess(struct rnbd_clt_session *sess)
+{
+ might_sleep();
+
+ if (refcount_dec_and_test(&sess->refcount))
+ free_sess(sess);
+}
+
+static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev)
+{
+ might_sleep();
+
+ if (!refcount_dec_and_test(&dev->refcount))
+ return;
+
+ mutex_lock(&ida_lock);
+ ida_simple_remove(&index_ida, dev->clt_device_id);
+ mutex_unlock(&ida_lock);
+ kfree(dev->hw_queues);
+ rnbd_clt_put_sess(dev->sess);
+ mutex_destroy(&dev->lock);
+ kfree(dev);
+}
+
+static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev)
+{
+ return refcount_inc_not_zero(&dev->refcount);
+}
+
+static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
+ const struct rnbd_msg_open_rsp *rsp)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+
+ if (!rsp->logical_block_size)
+ return -EINVAL;
+
+ dev->device_id = le32_to_cpu(rsp->device_id);
+ dev->nsectors = le64_to_cpu(rsp->nsectors);
+ dev->logical_block_size = le16_to_cpu(rsp->logical_block_size);
+ dev->physical_block_size = le16_to_cpu(rsp->physical_block_size);
+ dev->max_write_same_sectors = le32_to_cpu(rsp->max_write_same_sectors);
+ dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors);
+ dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
+ dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
+ dev->secure_discard = le16_to_cpu(rsp->secure_discard);
+ dev->rotational = rsp->rotational;
+
+ dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
+ dev->max_segments = BMAX_SEGMENTS;
+
+ dev->max_hw_sectors = min_t(u32, dev->max_hw_sectors,
+ le32_to_cpu(rsp->max_hw_sectors));
+ dev->max_segments = min_t(u16, dev->max_segments,
+ le16_to_cpu(rsp->max_segments));
+
+ return 0;
+}
+
+static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
+ size_t new_nsectors)
+{
+ int err = 0;
+
+ rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
+ dev->nsectors, new_nsectors);
+ dev->nsectors = new_nsectors;
+ set_capacity(dev->gd, dev->nsectors);
+ err = revalidate_disk(dev->gd);
+ if (err)
+ rnbd_clt_err(dev,
+ "Failed to change device size from %zu to %zu, err: %d\n",
+ dev->nsectors, new_nsectors, err);
+ return err;
+}
+
+static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
+ struct rnbd_msg_open_rsp *rsp)
+{
+ int err = 0;
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state == DEV_STATE_UNMAPPED) {
+ rnbd_clt_info(dev,
+ "Ignoring Open-Response message from server for unmapped device\n");
+ err = -ENOENT;
+ goto out;
+ }
+ if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) {
+ u64 nsectors = le64_to_cpu(rsp->nsectors);
+
+ /*
+ * If the device was remapped and the size changed in the
+ * meantime we need to revalidate it
+ */
+ if (dev->nsectors != nsectors)
+ rnbd_clt_change_capacity(dev, nsectors);
+ rnbd_clt_info(dev, "Device online, device remapped successfully\n");
+ }
+ err = rnbd_clt_set_dev_attr(dev, rsp);
+ if (err)
+ goto out;
+ dev->dev_state = DEV_STATE_MAPPED;
+
+out:
+ mutex_unlock(&dev->lock);
+
+ return err;
+}
+
+int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize)
+{
+ int ret = 0;
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state != DEV_STATE_MAPPED) {
+ pr_err("Failed to set new size of the device, device is not opened\n");
+ ret = -ENOENT;
+ goto out;
+ }
+ ret = rnbd_clt_change_capacity(dev, newsize);
+
+out:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q)
+{
+ if (WARN_ON(!q->hctx))
+ return;
+
+ /* We can come here from interrupt, thus async=true */
+ blk_mq_run_hw_queue(q->hctx, true);
+}
+
+enum {
+ RNBD_DELAY_IFBUSY = -1,
+};
+
+/**
+ * rnbd_get_cpu_qlist() - finds a list with HW queues to be rerun
+ * @sess: Session to find a queue for
+ * @cpu: Cpu to start the search from
+ *
+ * Description:
+ * Each CPU has a list of HW queues, which needs to be rerun. If a list
+ * is not empty - it is marked with a bit. This function finds first
+ * set bit in a bitmap and returns corresponding CPU list.
+ */
+static struct rnbd_cpu_qlist *
+rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
+{
+ int bit;
+
+ /* Search from cpu to nr_cpu_ids */
+ bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu);
+ if (bit < nr_cpu_ids) {
+ return per_cpu_ptr(sess->cpu_queues, bit);
+ } else if (cpu != 0) {
+ /* Search from 0 to cpu */
+ bit = find_next_bit(sess->cpu_queues_bm, cpu, 0);
+ if (bit < cpu)
+ return per_cpu_ptr(sess->cpu_queues, bit);
+ }
+
+ return NULL;
+}
+
+static inline int nxt_cpu(int cpu)
+{
+ return (cpu + 1) % nr_cpu_ids;
+}
+
+/**
+ * rnbd_rerun_if_needed() - rerun next queue marked as stopped
+ * @sess: Session to rerun a queue on
+ *
+ * Description:
+ * Each CPU has it's own list of HW queues, which should be rerun.
+ * Function finds such list with HW queues, takes a list lock, picks up
+ * the first HW queue out of the list and requeues it.
+ *
+ * Return:
+ * True if the queue was requeued, false otherwise.
+ *
+ * Context:
+ * Does not matter.
+ */
+static bool rnbd_rerun_if_needed(struct rnbd_clt_session *sess)
+{
+ struct rnbd_queue *q = NULL;
+ struct rnbd_cpu_qlist *cpu_q;
+ unsigned long flags;
+ int *cpup;
+
+ /*
+ * To keep fairness and not to let other queues starve we always
+ * try to wake up someone else in round-robin manner. That of course
+ * increases latency but queues always have a chance to be executed.
+ */
+ cpup = get_cpu_ptr(sess->cpu_rr);
+ for (cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(*cpup)); cpu_q;
+ cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) {
+ if (!spin_trylock_irqsave(&cpu_q->requeue_lock, flags))
+ continue;
+ if (unlikely(!test_bit(cpu_q->cpu, sess->cpu_queues_bm)))
+ goto unlock;
+ q = list_first_entry_or_null(&cpu_q->requeue_list,
+ typeof(*q), requeue_list);
+ if (WARN_ON(!q))
+ goto clear_bit;
+ list_del_init(&q->requeue_list);
+ clear_bit_unlock(0, &q->in_list);
+
+ if (list_empty(&cpu_q->requeue_list)) {
+ /* Clear bit if nothing is left */
+clear_bit:
+ clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
+ }
+unlock:
+ spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
+
+ if (q)
+ break;
+ }
+
+ /**
+ * Saves the CPU that is going to be requeued on the per-cpu var. Just
+ * incrementing it doesn't work because rnbd_get_cpu_qlist() will
+ * always return the first CPU with something on the queue list when the
+ * value stored on the var is greater than the last CPU with something
+ * on the list.
+ */
+ if (cpu_q)
+ *cpup = cpu_q->cpu;
+ put_cpu_var(sess->cpu_rr);
+
+ if (q)
+ rnbd_clt_dev_requeue(q);
+
+ return q;
+}
+
+/**
+ * rnbd_rerun_all_if_idle() - rerun all queues left in the list if
+ * session is idling (there are no requests
+ * in-flight).
+ * @sess: Session to rerun the queues on
+ *
+ * Description:
+ * This function tries to rerun all stopped queues if there are no
+ * requests in-flight anymore. This function tries to solve an obvious
+ * problem, when number of tags < than number of queues (hctx), which
+ * are stopped and put to sleep. If last permit, which has been just put,
+ * does not wake up all left queues (hctxs), IO requests hang forever.
+ *
+ * That can happen when all number of permits, say N, have been exhausted
+ * from one CPU, and we have many block devices per session, say M.
+ * Each block device has it's own queue (hctx) for each CPU, so eventually
+ * we can put that number of queues (hctxs) to sleep: M x nr_cpu_ids.
+ * If number of permits N < M x nr_cpu_ids finally we will get an IO hang.
+ *
+ * To avoid this hang last caller of rnbd_put_permit() (last caller is the
+ * one who observes sess->busy == 0) must wake up all remaining queues.
+ *
+ * Context:
+ * Does not matter.
+ */
+static void rnbd_rerun_all_if_idle(struct rnbd_clt_session *sess)
+{
+ bool requeued;
+
+ do {
+ requeued = rnbd_rerun_if_needed(sess);
+ } while (atomic_read(&sess->busy) == 0 && requeued);
+}
+
+static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess,
+ enum rtrs_clt_con_type con_type,
+ int wait)
+{
+ struct rtrs_permit *permit;
+
+ permit = rtrs_clt_get_permit(sess->rtrs, con_type,
+ wait ? RTRS_PERMIT_WAIT :
+ RTRS_PERMIT_NOWAIT);
+ if (likely(permit))
+ /* We have a subtle rare case here, when all permits can be
+ * consumed before busy counter increased. This is safe,
+ * because loser will get NULL as a permit, observe 0 busy
+ * counter and immediately restart the queue himself.
+ */
+ atomic_inc(&sess->busy);
+
+ return permit;
+}
+
+static void rnbd_put_permit(struct rnbd_clt_session *sess,
+ struct rtrs_permit *permit)
+{
+ rtrs_clt_put_permit(sess->rtrs, permit);
+ atomic_dec(&sess->busy);
+ /* Paired with rnbd_clt_dev_add_to_requeue(). Decrement first
+ * and then check queue bits.
+ */
+ smp_mb__after_atomic();
+ rnbd_rerun_all_if_idle(sess);
+}
+
+static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
+ enum rtrs_clt_con_type con_type,
+ int wait)
+{
+ struct rnbd_iu *iu;
+ struct rtrs_permit *permit;
+
+ permit = rnbd_get_permit(sess, con_type,
+ wait ? RTRS_PERMIT_WAIT :
+ RTRS_PERMIT_NOWAIT);
+ if (unlikely(!permit))
+ return NULL;
+ iu = rtrs_permit_to_pdu(permit);
+ iu->permit = permit;
+ /*
+ * 1st reference is dropped after finishing sending a "user" message,
+ * 2nd reference is dropped after confirmation with the response is
+ * returned.
+ * 1st and 2nd can happen in any order, so the rnbd_iu should be
+ * released (rtrs_permit returned to ibbtrs) only leased after both
+ * are finished.
+ */
+ atomic_set(&iu->refcount, 2);
+ init_waitqueue_head(&iu->comp.wait);
+ iu->comp.errno = INT_MAX;
+
+ return iu;
+}
+
+static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
+{
+ if (atomic_dec_and_test(&iu->refcount))
+ rnbd_put_permit(sess, iu->permit);
+}
+
+static void rnbd_softirq_done_fn(struct request *rq)
+{
+ struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
+ struct rnbd_clt_session *sess = dev->sess;
+ struct rnbd_iu *iu;
+
+ iu = blk_mq_rq_to_pdu(rq);
+ rnbd_put_permit(sess, iu->permit);
+ blk_mq_end_request(rq, errno_to_blk_status(iu->errno));
+}
+
+static void msg_io_conf(void *priv, int errno)
+{
+ struct rnbd_iu *iu = priv;
+ struct rnbd_clt_dev *dev = iu->dev;
+ struct request *rq = iu->rq;
+ int rw = rq_data_dir(rq);
+
+ iu->errno = errno;
+
+ blk_mq_complete_request(rq);
+
+ if (errno)
+ rnbd_clt_info_rl(dev, "%s I/O failed with err: %d\n",
+ rw == READ ? "read" : "write", errno);
+}
+
+static void wake_up_iu_comp(struct rnbd_iu *iu, int errno)
+{
+ iu->comp.errno = errno;
+ wake_up(&iu->comp.wait);
+}
+
+static void msg_conf(void *priv, int errno)
+{
+ struct rnbd_iu *iu = priv;
+
+ iu->errno = errno;
+ schedule_work(&iu->work);
+}
+
+enum wait_type {
+ NO_WAIT = 0,
+ WAIT = 1
+};
+
+static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
+ struct rnbd_iu *iu, struct kvec *vec, size_t nr,
+ size_t len, struct scatterlist *sg, unsigned int sg_len,
+ void (*conf)(struct work_struct *work),
+ int *errno, enum wait_type wait)
+{
+ int err;
+ struct rtrs_clt_req_ops req_ops;
+
+ INIT_WORK(&iu->work, conf);
+ req_ops = (struct rtrs_clt_req_ops) {
+ .priv = iu,
+ .conf_fn = msg_conf,
+ };
+ err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit,
+ vec, nr, len, sg, sg_len);
+ if (!err && wait) {
+ wait_event(iu->comp.wait, iu->comp.errno != INT_MAX);
+ *errno = iu->comp.errno;
+ } else {
+ *errno = 0;
+ }
+
+ return err;
+}
+
+static void msg_close_conf(struct work_struct *work)
+{
+ struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
+ struct rnbd_clt_dev *dev = iu->dev;
+
+ wake_up_iu_comp(iu, iu->errno);
+ rnbd_put_iu(dev->sess, iu);
+ rnbd_clt_put_dev(dev);
+}
+
+static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+ struct rnbd_msg_close msg;
+ struct rnbd_iu *iu;
+ struct kvec vec = {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
+ int err, errno;
+
+ iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
+ if (!iu)
+ return -ENOMEM;
+
+ iu->buf = NULL;
+ iu->dev = dev;
+
+ sg_mark_end(&iu->sglist[0]);
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
+ msg.device_id = cpu_to_le32(device_id);
+
+ WARN_ON(!rnbd_clt_get_dev(dev));
+ err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 1, 0, NULL, 0,
+ msg_close_conf, &errno, wait);
+ if (err) {
+ rnbd_clt_put_dev(dev);
+ rnbd_put_iu(sess, iu);
+ } else {
+ err = errno;
+ }
+
+ rnbd_put_iu(sess, iu);
+ return err;
+}
+
+static void msg_open_conf(struct work_struct *work)
+{
+ struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
+ struct rnbd_msg_open_rsp *rsp = iu->buf;
+ struct rnbd_clt_dev *dev = iu->dev;
+ int errno = iu->errno;
+
+ if (errno) {
+ rnbd_clt_err(dev,
+ "Opening failed, server responded: %d\n",
+ errno);
+ } else {
+ errno = process_msg_open_rsp(dev, rsp);
+ if (errno) {
+ u32 device_id = le32_to_cpu(rsp->device_id);
+ /*
+ * If server thinks its fine, but we fail to process
+ * then be nice and send a close to server.
+ */
+ (void)send_msg_close(dev, device_id, NO_WAIT);
+ }
+ }
+ kfree(rsp);
+ wake_up_iu_comp(iu, errno);
+ rnbd_put_iu(dev->sess, iu);
+ rnbd_clt_put_dev(dev);
+}
+
+static void msg_sess_info_conf(struct work_struct *work)
+{
+ struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
+ struct rnbd_msg_sess_info_rsp *rsp = iu->buf;
+ struct rnbd_clt_session *sess = iu->sess;
+
+ if (!iu->errno)
+ sess->ver = min_t(u8, rsp->ver, RNBD_PROTO_VER_MAJOR);
+
+ kfree(rsp);
+ wake_up_iu_comp(iu, iu->errno);
+ rnbd_put_iu(sess, iu);
+ rnbd_clt_put_sess(sess);
+}
+
+static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+ struct rnbd_msg_open_rsp *rsp;
+ struct rnbd_msg_open msg;
+ struct rnbd_iu *iu;
+ struct kvec vec = {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
+ int err, errno;
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
+ if (!iu) {
+ kfree(rsp);
+ return -ENOMEM;
+ }
+
+ iu->buf = rsp;
+ iu->dev = dev;
+
+ sg_init_one(iu->sglist, rsp, sizeof(*rsp));
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
+ msg.access_mode = dev->access_mode;
+ strlcpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name));
+
+ WARN_ON(!rnbd_clt_get_dev(dev));
+ err = send_usr_msg(sess->rtrs, READ, iu,
+ &vec, 1, sizeof(*rsp), iu->sglist, 1,
+ msg_open_conf, &errno, wait);
+ if (err) {
+ rnbd_clt_put_dev(dev);
+ rnbd_put_iu(sess, iu);
+ kfree(rsp);
+ } else {
+ err = errno;
+ }
+
+ rnbd_put_iu(sess, iu);
+ return err;
+}
+
+static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
+{
+ struct rnbd_msg_sess_info_rsp *rsp;
+ struct rnbd_msg_sess_info msg;
+ struct rnbd_iu *iu;
+ struct kvec vec = {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
+ int err, errno;
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
+ if (!iu) {
+ kfree(rsp);
+ return -ENOMEM;
+ }
+
+ iu->buf = rsp;
+ iu->sess = sess;
+
+ sg_init_one(iu->sglist, rsp, sizeof(*rsp));
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
+ msg.ver = RNBD_PROTO_VER_MAJOR;
+
+ if (!rnbd_clt_get_sess(sess)) {
+ /*
+ * That can happen only in one case, when RTRS has restablished
+ * the connection and link_ev() is called, but session is almost
+ * dead, last reference on session is put and caller is waiting
+ * for RTRS to close everything.
+ */
+ err = -ENODEV;
+ goto put_iu;
+ }
+ err = send_usr_msg(sess->rtrs, READ, iu,
+ &vec, 1, sizeof(*rsp), iu->sglist, 1,
+ msg_sess_info_conf, &errno, wait);
+ if (err) {
+ rnbd_clt_put_sess(sess);
+put_iu:
+ rnbd_put_iu(sess, iu);
+ kfree(rsp);
+ } else {
+ err = errno;
+ }
+
+ rnbd_put_iu(sess, iu);
+ return err;
+}
+
+static void set_dev_states_to_disconnected(struct rnbd_clt_session *sess)
+{
+ struct rnbd_clt_dev *dev;
+
+ mutex_lock(&sess->lock);
+ list_for_each_entry(dev, &sess->devs_list, list) {
+ rnbd_clt_err(dev, "Device disconnected.\n");
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state == DEV_STATE_MAPPED)
+ dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED;
+ mutex_unlock(&dev->lock);
+ }
+ mutex_unlock(&sess->lock);
+}
+
+static void remap_devs(struct rnbd_clt_session *sess)
+{
+ struct rnbd_clt_dev *dev;
+ struct rtrs_attrs attrs;
+ int err;
+
+ /*
+ * Careful here: we are called from RTRS link event directly,
+ * thus we can't send any RTRS request and wait for response
+ * or RTRS will not be able to complete request with failure
+ * if something goes wrong (failing of outstanding requests
+ * happens exactly from the context where we are blocking now).
+ *
+ * So to avoid deadlocks each usr message sent from here must
+ * be asynchronous.
+ */
+
+ err = send_msg_sess_info(sess, NO_WAIT);
+ if (err) {
+ pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err);
+ return;
+ }
+
+ rtrs_clt_query(sess->rtrs, &attrs);
+ mutex_lock(&sess->lock);
+ sess->max_io_size = attrs.max_io_size;
+
+ list_for_each_entry(dev, &sess->devs_list, list) {
+ bool skip;
+
+ mutex_lock(&dev->lock);
+ skip = (dev->dev_state == DEV_STATE_INIT);
+ mutex_unlock(&dev->lock);
+ if (skip)
+ /*
+ * When device is establishing connection for the first
+ * time - do not remap, it will be closed soon.
+ */
+ continue;
+
+ rnbd_clt_info(dev, "session reconnected, remapping device\n");
+ err = send_msg_open(dev, NO_WAIT);
+ if (err) {
+ rnbd_clt_err(dev, "send_msg_open(): %d\n", err);
+ break;
+ }
+ }
+ mutex_unlock(&sess->lock);
+}
+
+static void rnbd_clt_link_ev(void *priv, enum rtrs_clt_link_ev ev)
+{
+ struct rnbd_clt_session *sess = priv;
+
+ switch (ev) {
+ case RTRS_CLT_LINK_EV_DISCONNECTED:
+ set_dev_states_to_disconnected(sess);
+ break;
+ case RTRS_CLT_LINK_EV_RECONNECTED:
+ remap_devs(sess);
+ break;
+ default:
+ pr_err("Unknown session event received (%d), session: %s\n",
+ ev, sess->sessname);
+ }
+}
+
+static void rnbd_init_cpu_qlists(struct rnbd_cpu_qlist __percpu *cpu_queues)
+{
+ unsigned int cpu;
+ struct rnbd_cpu_qlist *cpu_q;
+
+ for_each_possible_cpu(cpu) {
+ cpu_q = per_cpu_ptr(cpu_queues, cpu);
+
+ cpu_q->cpu = cpu;
+ INIT_LIST_HEAD(&cpu_q->requeue_list);
+ spin_lock_init(&cpu_q->requeue_lock);
+ }
+}
+
+static void destroy_mq_tags(struct rnbd_clt_session *sess)
+{
+ if (sess->tag_set.tags)
+ blk_mq_free_tag_set(&sess->tag_set);
+}
+
+static inline void wake_up_rtrs_waiters(struct rnbd_clt_session *sess)
+{
+ sess->rtrs_ready = true;
+ wake_up_all(&sess->rtrs_waitq);
+}
+
+static void close_rtrs(struct rnbd_clt_session *sess)
+{
+ might_sleep();
+
+ if (!IS_ERR_OR_NULL(sess->rtrs)) {
+ rtrs_clt_close(sess->rtrs);
+ sess->rtrs = NULL;
+ wake_up_rtrs_waiters(sess);
+ }
+}
+
+static void free_sess(struct rnbd_clt_session *sess)
+{
+ WARN_ON(!list_empty(&sess->devs_list));
+
+ might_sleep();
+
+ close_rtrs(sess);
+ destroy_mq_tags(sess);
+ if (!list_empty(&sess->list)) {
+ mutex_lock(&sess_lock);
+ list_del(&sess->list);
+ mutex_unlock(&sess_lock);
+ }
+ free_percpu(sess->cpu_queues);
+ free_percpu(sess->cpu_rr);
+ mutex_destroy(&sess->lock);
+ kfree(sess);
+}
+
+static struct rnbd_clt_session *alloc_sess(const char *sessname)
+{
+ struct rnbd_clt_session *sess;
+ int err, cpu;
+
+ sess = kzalloc_node(sizeof(*sess), GFP_KERNEL, NUMA_NO_NODE);
+ if (!sess)
+ return ERR_PTR(-ENOMEM);
+ strlcpy(sess->sessname, sessname, sizeof(sess->sessname));
+ atomic_set(&sess->busy, 0);
+ mutex_init(&sess->lock);
+ INIT_LIST_HEAD(&sess->devs_list);
+ INIT_LIST_HEAD(&sess->list);
+ bitmap_zero(sess->cpu_queues_bm, NR_CPUS);
+ init_waitqueue_head(&sess->rtrs_waitq);
+ refcount_set(&sess->refcount, 1);
+
+ sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist);
+ if (!sess->cpu_queues) {
+ err = -ENOMEM;
+ goto err;
+ }
+ rnbd_init_cpu_qlists(sess->cpu_queues);
+
+ /*
+ * That is simple percpu variable which stores cpu indeces, which are
+ * incremented on each access. We need that for the sake of fairness
+ * to wake up queues in a round-robin manner.
+ */
+ sess->cpu_rr = alloc_percpu(int);
+ if (!sess->cpu_rr) {
+ err = -ENOMEM;
+ goto err;
+ }
+ for_each_possible_cpu(cpu)
+ * per_cpu_ptr(sess->cpu_rr, cpu) = cpu;
+
+ return sess;
+
+err:
+ free_sess(sess);
+
+ return ERR_PTR(err);
+}
+
+static int wait_for_rtrs_connection(struct rnbd_clt_session *sess)
+{
+ wait_event(sess->rtrs_waitq, sess->rtrs_ready);
+ if (IS_ERR_OR_NULL(sess->rtrs))
+ return -ECONNRESET;
+
+ return 0;
+}
+
+static void wait_for_rtrs_disconnection(struct rnbd_clt_session *sess)
+ __releases(&sess_lock)
+ __acquires(&sess_lock)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&sess->rtrs_waitq, &wait, TASK_UNINTERRUPTIBLE);
+ if (IS_ERR_OR_NULL(sess->rtrs)) {
+ finish_wait(&sess->rtrs_waitq, &wait);
+ return;
+ }
+ mutex_unlock(&sess_lock);
+ /* loop in caller, see __find_and_get_sess().
+ * You can't leave mutex locked and call schedule(), you will catch a
+ * deadlock with a caller of free_sess(), which has just put the last
+ * reference and is about to take the sess_lock in order to delete
+ * the session from the list.
+ */
+ schedule();
+ mutex_lock(&sess_lock);
+}
+
+static struct rnbd_clt_session *__find_and_get_sess(const char *sessname)
+ __releases(&sess_lock)
+ __acquires(&sess_lock)
+{
+ struct rnbd_clt_session *sess, *sn;
+ int err;
+
+again:
+ list_for_each_entry_safe(sess, sn, &sess_list, list) {
+ if (strcmp(sessname, sess->sessname))
+ continue;
+
+ if (sess->rtrs_ready && IS_ERR_OR_NULL(sess->rtrs))
+ /*
+ * No RTRS connection, session is dying.
+ */
+ continue;
+
+ if (rnbd_clt_get_sess(sess)) {
+ /*
+ * Alive session is found, wait for RTRS connection.
+ */
+ mutex_unlock(&sess_lock);
+ err = wait_for_rtrs_connection(sess);
+ if (err)
+ rnbd_clt_put_sess(sess);
+ mutex_lock(&sess_lock);
+
+ if (err)
+ /* Session is dying, repeat the loop */
+ goto again;
+
+ return sess;
+ }
+ /*
+ * Ref is 0, session is dying, wait for RTRS disconnect
+ * in order to avoid session names clashes.
+ */
+ wait_for_rtrs_disconnection(sess);
+ /*
+ * RTRS is disconnected and soon session will be freed,
+ * so repeat a loop.
+ */
+ goto again;
+ }
+
+ return NULL;
+}
+
+static struct
+rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first)
+{
+ struct rnbd_clt_session *sess = NULL;
+
+ mutex_lock(&sess_lock);
+ sess = __find_and_get_sess(sessname);
+ if (!sess) {
+ sess = alloc_sess(sessname);
+ if (IS_ERR(sess)) {
+ mutex_unlock(&sess_lock);
+ return sess;
+ }
+ list_add(&sess->list, &sess_list);
+ *first = true;
+ } else
+ *first = false;
+ mutex_unlock(&sess_lock);
+
+ return sess;
+}
+
+static int rnbd_client_open(struct block_device *block_device, fmode_t mode)
+{
+ struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+
+ if (dev->read_only && (mode & FMODE_WRITE))
+ return -EPERM;
+
+ if (dev->dev_state == DEV_STATE_UNMAPPED ||
+ !rnbd_clt_get_dev(dev))
+ return -EIO;
+
+ return 0;
+}
+
+static void rnbd_client_release(struct gendisk *gen, fmode_t mode)
+{
+ struct rnbd_clt_dev *dev = gen->private_data;
+
+ rnbd_clt_put_dev(dev);
+}
+
+static int rnbd_client_getgeo(struct block_device *block_device,
+ struct hd_geometry *geo)
+{
+ u64 size;
+ struct rnbd_clt_dev *dev;
+
+ dev = block_device->bd_disk->private_data;
+ size = dev->size * (dev->logical_block_size / SECTOR_SIZE);
+ geo->cylinders = size >> 6; /* size/64 */
+ geo->heads = 4;
+ geo->sectors = 16;
+ geo->start = 0;
+
+ return 0;
+}
+
+static const struct block_device_operations rnbd_client_ops = {
+ .owner = THIS_MODULE,
+ .open = rnbd_client_open,
+ .release = rnbd_client_release,
+ .getgeo = rnbd_client_getgeo
+};
+
+/* The amount of data that belongs to an I/O and the amount of data that
+ * should be read or written to the disk (bi_size) can differ.
+ *
+ * E.g. When WRITE_SAME is used, only a small amount of data is
+ * transferred that is then written repeatedly over a lot of sectors.
+ *
+ * Get the size of data to be transferred via RTRS by summing up the size
+ * of the scather-gather list entries.
+ */
+static size_t rnbd_clt_get_sg_size(struct scatterlist *sglist, u32 len)
+{
+ struct scatterlist *sg;
+ size_t tsize = 0;
+ int i;
+
+ for_each_sg(sglist, sg, len, i)
+ tsize += sg->length;
+ return tsize;
+}
+
+static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
+ struct request *rq,
+ struct rnbd_iu *iu)
+{
+ struct rtrs_clt *rtrs = dev->sess->rtrs;
+ struct rtrs_permit *permit = iu->permit;
+ struct rnbd_msg_io msg;
+ struct rtrs_clt_req_ops req_ops;
+ unsigned int sg_cnt = 0;
+ struct kvec vec;
+ size_t size;
+ int err;
+
+ iu->rq = rq;
+ iu->dev = dev;
+ msg.sector = cpu_to_le64(blk_rq_pos(rq));
+ msg.bi_size = cpu_to_le32(blk_rq_bytes(rq));
+ msg.rw = cpu_to_le32(rq_to_rnbd_flags(rq));
+ msg.prio = cpu_to_le16(req_get_ioprio(rq));
+
+ /*
+ * We only support discards with single segment for now.
+ * See queue limits.
+ */
+ if (req_op(rq) != REQ_OP_DISCARD)
+ sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sglist);
+
+ if (sg_cnt == 0)
+ /* Do not forget to mark the end */
+ sg_mark_end(&iu->sglist[0]);
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_IO);
+ msg.device_id = cpu_to_le32(dev->device_id);
+
+ vec = (struct kvec) {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
+ size = rnbd_clt_get_sg_size(iu->sglist, sg_cnt);
+ req_ops = (struct rtrs_clt_req_ops) {
+ .priv = iu,
+ .conf_fn = msg_io_conf,
+ };
+ err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit,
+ &vec, 1, size, iu->sglist, sg_cnt);
+ if (unlikely(err)) {
+ rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * rnbd_clt_dev_add_to_requeue() - add device to requeue if session is busy
+ * @dev: Device to be checked
+ * @q: Queue to be added to the requeue list if required
+ *
+ * Description:
+ * If session is busy, that means someone will requeue us when resources
+ * are freed. If session is not doing anything - device is not added to
+ * the list and @false is returned.
+ */
+static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev,
+ struct rnbd_queue *q)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+ struct rnbd_cpu_qlist *cpu_q;
+ unsigned long flags;
+ bool added = true;
+ bool need_set;
+
+ cpu_q = get_cpu_ptr(sess->cpu_queues);
+ spin_lock_irqsave(&cpu_q->requeue_lock, flags);
+
+ if (likely(!test_and_set_bit_lock(0, &q->in_list))) {
+ if (WARN_ON(!list_empty(&q->requeue_list)))
+ goto unlock;
+
+ need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm);
+ if (need_set) {
+ set_bit(cpu_q->cpu, sess->cpu_queues_bm);
+ /* Paired with rnbd_put_permit(). Set a bit first
+ * and then observe the busy counter.
+ */
+ smp_mb__before_atomic();
+ }
+ if (likely(atomic_read(&sess->busy))) {
+ list_add_tail(&q->requeue_list, &cpu_q->requeue_list);
+ } else {
+ /* Very unlikely, but possible: busy counter was
+ * observed as zero. Drop all bits and return
+ * false to restart the queue by ourselves.
+ */
+ if (need_set)
+ clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
+ clear_bit_unlock(0, &q->in_list);
+ added = false;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
+ put_cpu_ptr(sess->cpu_queues);
+
+ return added;
+}
+
+static void rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev *dev,
+ struct blk_mq_hw_ctx *hctx,
+ int delay)
+{
+ struct rnbd_queue *q = hctx->driver_data;
+
+ if (delay != RNBD_DELAY_IFBUSY)
+ blk_mq_delay_run_hw_queue(hctx, delay);
+ else if (unlikely(!rnbd_clt_dev_add_to_requeue(dev, q)))
+ /*
+ * If session is not busy we have to restart
+ * the queue ourselves.
+ */
+ blk_mq_delay_run_hw_queue(hctx, 10/*ms*/);
+}
+
+static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *rq = bd->rq;
+ struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
+ struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
+ int err;
+
+ if (unlikely(dev->dev_state != DEV_STATE_MAPPED))
+ return BLK_STS_IOERR;
+
+ iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON,
+ RTRS_PERMIT_NOWAIT);
+ if (unlikely(!iu->permit)) {
+ rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY);
+ return BLK_STS_RESOURCE;
+ }
+
+ blk_mq_start_request(rq);
+ err = rnbd_client_xfer_request(dev, rq, iu);
+ if (likely(err == 0))
+ return BLK_STS_OK;
+ if (unlikely(err == -EAGAIN || err == -ENOMEM)) {
+ rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
+ rnbd_put_permit(dev->sess, iu->permit);
+ return BLK_STS_RESOURCE;
+ }
+
+ rnbd_put_permit(dev->sess, iu->permit);
+ return BLK_STS_IOERR;
+}
+
+static int rnbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
+{
+ struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
+
+ sg_init_table(iu->sglist, BMAX_SEGMENTS);
+ return 0;
+}
+
+static struct blk_mq_ops rnbd_mq_ops = {
+ .queue_rq = rnbd_queue_rq,
+ .init_request = rnbd_init_request,
+ .complete = rnbd_softirq_done_fn,
+};
+
+static int setup_mq_tags(struct rnbd_clt_session *sess)
+{
+ struct blk_mq_tag_set *tag_set = &sess->tag_set;
+
+ memset(tag_set, 0, sizeof(*tag_set));
+ tag_set->ops = &rnbd_mq_ops;
+ tag_set->queue_depth = sess->queue_depth;
+ tag_set->numa_node = NUMA_NO_NODE;
+ tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
+ BLK_MQ_F_TAG_SHARED;
+ tag_set->cmd_size = sizeof(struct rnbd_iu);
+ tag_set->nr_hw_queues = num_online_cpus();
+
+ return blk_mq_alloc_tag_set(tag_set);
+}
+
+static struct rnbd_clt_session *
+find_and_get_or_create_sess(const char *sessname,
+ const struct rtrs_addr *paths,
+ size_t path_cnt, u16 port_nr)
+{
+ struct rnbd_clt_session *sess;
+ struct rtrs_attrs attrs;
+ int err;
+ bool first;
+ struct rtrs_clt_ops rtrs_ops;
+
+ sess = find_or_create_sess(sessname, &first);
+ if (sess == ERR_PTR(-ENOMEM))
+ return ERR_PTR(-ENOMEM);
+ else if (!first)
+ return sess;
+
+ rtrs_ops = (struct rtrs_clt_ops) {
+ .priv = sess,
+ .link_ev = rnbd_clt_link_ev,
+ };
+ /*
+ * Nothing was found, establish rtrs connection and proceed further.
+ */
+ sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname,
+ paths, path_cnt, port_nr,
+ sizeof(struct rnbd_iu),
+ RECONNECT_DELAY, BMAX_SEGMENTS,
+ BLK_MAX_SEGMENT_SIZE,
+ MAX_RECONNECTS);
+ if (IS_ERR(sess->rtrs)) {
+ err = PTR_ERR(sess->rtrs);
+ goto wake_up_and_put;
+ }
+ rtrs_clt_query(sess->rtrs, &attrs);
+ sess->max_io_size = attrs.max_io_size;
+ sess->queue_depth = attrs.queue_depth;
+
+ err = setup_mq_tags(sess);
+ if (err)
+ goto close_rtrs;
+
+ err = send_msg_sess_info(sess, WAIT);
+ if (err)
+ goto close_rtrs;
+
+ wake_up_rtrs_waiters(sess);
+
+ return sess;
+
+close_rtrs:
+ close_rtrs(sess);
+put_sess:
+ rnbd_clt_put_sess(sess);
+
+ return ERR_PTR(err);
+
+wake_up_and_put:
+ wake_up_rtrs_waiters(sess);
+ goto put_sess;
+}
+
+static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev,
+ struct rnbd_queue *q,
+ struct blk_mq_hw_ctx *hctx)
+{
+ INIT_LIST_HEAD(&q->requeue_list);
+ q->dev = dev;
+ q->hctx = hctx;
+}
+
+static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
+{
+ int i;
+ struct blk_mq_hw_ctx *hctx;
+ struct rnbd_queue *q;
+
+ queue_for_each_hw_ctx(dev->queue, hctx, i) {
+ q = &dev->hw_queues[i];
+ rnbd_init_hw_queue(dev, q, hctx);
+ hctx->driver_data = q;
+ }
+}
+
+static int setup_mq_dev(struct rnbd_clt_dev *dev)
+{
+ dev->queue = blk_mq_init_queue(&dev->sess->tag_set);
+ if (IS_ERR(dev->queue)) {
+ rnbd_clt_err(dev, "Initializing multiqueue queue failed, err: %ld\n",
+ PTR_ERR(dev->queue));
+ return PTR_ERR(dev->queue);
+ }
+ rnbd_init_mq_hw_queues(dev);
+ return 0;
+}
+
+static void setup_request_queue(struct rnbd_clt_dev *dev)
+{
+ blk_queue_logical_block_size(dev->queue, dev->logical_block_size);
+ blk_queue_physical_block_size(dev->queue, dev->physical_block_size);
+ blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors);
+ blk_queue_max_write_same_sectors(dev->queue,
+ dev->max_write_same_sectors);
+
+ /*
+ * we don't support discards to "discontiguous" segments
+ * in on request
+ */
+ blk_queue_max_discard_segments(dev->queue, 1);
+
+ blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors);
+ dev->queue->limits.discard_granularity = dev->discard_granularity;
+ dev->queue->limits.discard_alignment = dev->discard_alignment;
+ if (dev->max_discard_sectors)
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->queue);
+ if (dev->secure_discard)
+ blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue);
+
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
+ blk_queue_max_segments(dev->queue, dev->max_segments);
+ blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
+ blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
+ blk_queue_write_cache(dev->queue, true, true);
+ dev->queue->queuedata = dev;
+}
+
+static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
+{
+ dev->gd->major = rnbd_client_major;
+ dev->gd->first_minor = idx << RNBD_PART_BITS;
+ dev->gd->fops = &rnbd_client_ops;
+ dev->gd->queue = dev->queue;
+ dev->gd->private_data = dev;
+ snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d",
+ idx);
+ pr_debug("disk_name=%s, capacity=%zu\n",
+ dev->gd->disk_name,
+ dev->nsectors * (dev->logical_block_size / SECTOR_SIZE)
+ );
+
+ set_capacity(dev->gd, dev->nsectors);
+
+ if (dev->access_mode == RNBD_ACCESS_RO) {
+ dev->read_only = true;
+ set_disk_ro(dev->gd, true);
+ } else {
+ dev->read_only = false;
+ }
+
+ if (!dev->rotational)
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
+}
+
+static int rnbd_client_setup_device(struct rnbd_clt_session *sess,
+ struct rnbd_clt_dev *dev, int idx)
+{
+ int err;
+
+ dev->size = dev->nsectors * dev->logical_block_size;
+
+ err = setup_mq_dev(dev);
+ if (err)
+ return err;
+
+ setup_request_queue(dev);
+
+ dev->gd = alloc_disk_node(1 << RNBD_PART_BITS, NUMA_NO_NODE);
+ if (!dev->gd) {
+ blk_cleanup_queue(dev->queue);
+ return -ENOMEM;
+ }
+
+ rnbd_clt_setup_gen_disk(dev, idx);
+
+ return 0;
+}
+
+static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ enum rnbd_access_mode access_mode,
+ const char *pathname)
+{
+ struct rnbd_clt_dev *dev;
+ int ret;
+
+ dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, NUMA_NO_NODE);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->hw_queues = kcalloc(nr_cpu_ids, sizeof(*dev->hw_queues),
+ GFP_KERNEL);
+ if (!dev->hw_queues) {
+ ret = -ENOMEM;
+ goto out_alloc;
+ }
+
+ mutex_lock(&ida_lock);
+ ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS),
+ GFP_KERNEL);
+ mutex_unlock(&ida_lock);
+ if (ret < 0) {
+ pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+ pathname, sess->sessname, ret);
+ goto out_queues;
+ }
+ dev->clt_device_id = ret;
+ dev->sess = sess;
+ dev->access_mode = access_mode;
+ strlcpy(dev->pathname, pathname, sizeof(dev->pathname));
+ mutex_init(&dev->lock);
+ refcount_set(&dev->refcount, 1);
+ dev->dev_state = DEV_STATE_INIT;
+
+ /*
+ * Here we called from sysfs entry, thus clt-sysfs is
+ * responsible that session will not disappear.
+ */
+ WARN_ON(!rnbd_clt_get_sess(sess));
+
+ return dev;
+
+out_queues:
+ kfree(dev->hw_queues);
+out_alloc:
+ kfree(dev);
+ return ERR_PTR(ret);
+}
+
+static bool __exists_dev(const char *pathname)
+{
+ struct rnbd_clt_session *sess;
+ struct rnbd_clt_dev *dev;
+ bool found = false;
+
+ list_for_each_entry(sess, &sess_list, list) {
+ mutex_lock(&sess->lock);
+ list_for_each_entry(dev, &sess->devs_list, list) {
+ if (!strncmp(dev->pathname, pathname,
+ sizeof(dev->pathname))) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&sess->lock);
+ if (found)
+ break;
+ }
+
+ return found;
+}
+
+static bool exists_devpath(const char *pathname)
+{
+ bool found;
+
+ mutex_lock(&sess_lock);
+ found = __exists_dev(pathname);
+ mutex_unlock(&sess_lock);
+
+ return found;
+}
+
+static bool insert_dev_if_not_exists_devpath(const char *pathname,
+ struct rnbd_clt_session *sess,
+ struct rnbd_clt_dev *dev)
+{
+ bool found;
+
+ mutex_lock(&sess_lock);
+ found = __exists_dev(pathname);
+ if (!found) {
+ mutex_lock(&sess->lock);
+ list_add_tail(&dev->list, &sess->devs_list);
+ mutex_unlock(&sess->lock);
+ }
+ mutex_unlock(&sess_lock);
+
+ return found;
+}
+
+static void delete_dev(struct rnbd_clt_dev *dev)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+
+ mutex_lock(&sess->lock);
+ list_del(&dev->list);
+ mutex_unlock(&sess->lock);
+}
+
+struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
+ struct rtrs_addr *paths,
+ size_t path_cnt, u16 port_nr,
+ const char *pathname,
+ enum rnbd_access_mode access_mode)
+{
+ struct rnbd_clt_session *sess;
+ struct rnbd_clt_dev *dev;
+ int ret;
+
+ if (exists_devpath(pathname))
+ return ERR_PTR(-EEXIST);
+
+ sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr);
+ if (IS_ERR(sess))
+ return ERR_CAST(sess);
+
+ dev = init_dev(sess, access_mode, pathname);
+ if (IS_ERR(dev)) {
+ pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n",
+ pathname, sess->sessname, PTR_ERR(dev));
+ ret = PTR_ERR(dev);
+ goto put_sess;
+ }
+ if (insert_dev_if_not_exists_devpath(pathname, sess, dev)) {
+ ret = -EEXIST;
+ goto put_dev;
+ }
+ ret = send_msg_open(dev, WAIT);
+ if (ret) {
+ rnbd_clt_err(dev,
+ "map_device: failed, can't open remote device, err: %d\n",
+ ret);
+ goto del_dev;
+ }
+ mutex_lock(&dev->lock);
+ pr_debug("Opened remote device: session=%s, path='%s'\n",
+ sess->sessname, pathname);
+ ret = rnbd_client_setup_device(sess, dev, dev->clt_device_id);
+ if (ret) {
+ rnbd_clt_err(dev,
+ "map_device: Failed to configure device, err: %d\n",
+ ret);
+ mutex_unlock(&dev->lock);
+ goto del_dev;
+ }
+
+ rnbd_clt_info(dev,
+ "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d)\n",
+ dev->gd->disk_name, dev->nsectors,
+ dev->logical_block_size, dev->physical_block_size,
+ dev->max_write_same_sectors, dev->max_discard_sectors,
+ dev->discard_granularity, dev->discard_alignment,
+ dev->secure_discard, dev->max_segments,
+ dev->max_hw_sectors, dev->rotational);
+
+ mutex_unlock(&dev->lock);
+
+ add_disk(dev->gd);
+ rnbd_clt_put_sess(sess);
+
+ return dev;
+
+del_dev:
+ delete_dev(dev);
+put_dev:
+ rnbd_clt_put_dev(dev);
+put_sess:
+ rnbd_clt_put_sess(sess);
+
+ return ERR_PTR(ret);
+}
+
+static void destroy_gen_disk(struct rnbd_clt_dev *dev)
+{
+ del_gendisk(dev->gd);
+ blk_cleanup_queue(dev->queue);
+ put_disk(dev->gd);
+}
+
+static void destroy_sysfs(struct rnbd_clt_dev *dev,
+ const struct attribute *sysfs_self)
+{
+ rnbd_clt_remove_dev_symlink(dev);
+ if (dev->kobj.state_initialized) {
+ if (sysfs_self)
+ /* To avoid deadlock firstly remove itself */
+ sysfs_remove_file_self(&dev->kobj, sysfs_self);
+ kobject_del(&dev->kobj);
+ kobject_put(&dev->kobj);
+ }
+}
+
+int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
+ const struct attribute *sysfs_self)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+ int refcount, ret = 0;
+ bool was_mapped;
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state == DEV_STATE_UNMAPPED) {
+ rnbd_clt_info(dev, "Device is already being unmapped\n");
+ ret = -EALREADY;
+ goto err;
+ }
+ refcount = refcount_read(&dev->refcount);
+ if (!force && refcount > 1) {
+ rnbd_clt_err(dev,
+ "Closing device failed, device is in use, (%d device users)\n",
+ refcount - 1);
+ ret = -EBUSY;
+ goto err;
+ }
+ was_mapped = (dev->dev_state == DEV_STATE_MAPPED);
+ dev->dev_state = DEV_STATE_UNMAPPED;
+ mutex_unlock(&dev->lock);
+
+ delete_dev(dev);
+ destroy_sysfs(dev, sysfs_self);
+ destroy_gen_disk(dev);
+ if (was_mapped && sess->rtrs)
+ send_msg_close(dev, dev->device_id, WAIT);
+
+ rnbd_clt_info(dev, "Device is unmapped\n");
+
+ /* Likely last reference put */
+ rnbd_clt_put_dev(dev);
+
+ /*
+ * Here device and session can be vanished!
+ */
+
+ return 0;
+err:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+int rnbd_clt_remap_device(struct rnbd_clt_dev *dev)
+{
+ int err;
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED)
+ err = 0;
+ else if (dev->dev_state == DEV_STATE_UNMAPPED)
+ err = -ENODEV;
+ else if (dev->dev_state == DEV_STATE_MAPPED)
+ err = -EALREADY;
+ else
+ err = -EBUSY;
+ mutex_unlock(&dev->lock);
+ if (!err) {
+ rnbd_clt_info(dev, "Remapping device.\n");
+ err = send_msg_open(dev, WAIT);
+ if (err)
+ rnbd_clt_err(dev, "remap_device: %d\n", err);
+ }
+
+ return err;
+}
+
+static void unmap_device_work(struct work_struct *work)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(work, typeof(*dev), unmap_on_rmmod_work);
+ rnbd_clt_unmap_device(dev, true, NULL);
+}
+
+static void rnbd_destroy_sessions(void)
+{
+ struct rnbd_clt_session *sess, *sn;
+ struct rnbd_clt_dev *dev, *tn;
+
+ /* Firstly forbid access through sysfs interface */
+ rnbd_clt_destroy_default_group();
+ rnbd_clt_destroy_sysfs_files();
+
+ /*
+ * Here at this point there is no any concurrent access to sessions
+ * list and devices list:
+ * 1. New session or device can'be be created - session sysfs files
+ * are removed.
+ * 2. Device or session can't be removed - module reference is taken
+ * into account in unmap device sysfs callback.
+ * 3. No IO requests inflight - each file open of block_dev increases
+ * module reference in get_disk().
+ *
+ * But still there can be user requests inflights, which are sent by
+ * asynchronous send_msg_*() functions, thus before unmapping devices
+ * RTRS session must be explicitly closed.
+ */
+
+ list_for_each_entry_safe(sess, sn, &sess_list, list) {
+ WARN_ON(!rnbd_clt_get_sess(sess));
+ close_rtrs(sess);
+ list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
+ /*
+ * Here unmap happens in parallel for only one reason:
+ * blk_cleanup_queue() takes around half a second, so
+ * on huge amount of devices the whole module unload
+ * procedure takes minutes.
+ */
+ INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work);
+ queue_work(system_long_wq, &dev->unmap_on_rmmod_work);
+ }
+ rnbd_clt_put_sess(sess);
+ }
+ /* Wait for all scheduled unmap works */
+ flush_workqueue(system_long_wq);
+ WARN_ON(!list_empty(&sess_list));
+}
+
+static int __init rnbd_client_init(void)
+{
+ int err = 0;
+
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56);
+ rnbd_client_major = register_blkdev(rnbd_client_major, "rnbd");
+ if (rnbd_client_major <= 0) {
+ pr_err("Failed to load module, block device registration failed\n");
+ return -EBUSY;
+ }
+
+ err = rnbd_clt_create_sysfs_files();
+ if (err) {
+ pr_err("Failed to load module, creating sysfs device files failed, err: %d\n",
+ err);
+ unregister_blkdev(rnbd_client_major, "rnbd");
+ }
+
+ return err;
+}
+
+static void __exit rnbd_client_exit(void)
+{
+ rnbd_destroy_sessions();
+ unregister_blkdev(rnbd_client_major, "rnbd");
+ ida_destroy(&index_ida);
+}
+
+module_init(rnbd_client_init);
+module_exit(rnbd_client_exit);
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
new file mode 100644
index 000000000000..ed33654aa486
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RNBD_CLT_H
+#define RNBD_CLT_H
+
+#include <linux/wait.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/blk-mq.h>
+#include <linux/refcount.h>
+
+#include <rtrs.h>
+#include "rnbd-proto.h"
+#include "rnbd-log.h"
+
+/* Max. number of segments per IO request, Mellanox Connect X ~ Connect X5,
+ * choose minimial 30 for all, minus 1 for internal protocol, so 29.
+ */
+#define BMAX_SEGMENTS 29
+/* time in seconds between reconnect tries, default to 30 s */
+#define RECONNECT_DELAY 30
+/*
+ * Number of times to reconnect on error before giving up, 0 for * disabled,
+ * -1 for forever
+ */
+#define MAX_RECONNECTS -1
+
+enum rnbd_clt_dev_state {
+ DEV_STATE_INIT,
+ DEV_STATE_MAPPED,
+ DEV_STATE_MAPPED_DISCONNECTED,
+ DEV_STATE_UNMAPPED,
+};
+
+struct rnbd_iu_comp {
+ wait_queue_head_t wait;
+ int errno;
+};
+
+struct rnbd_iu {
+ union {
+ struct request *rq; /* for block io */
+ void *buf; /* for user messages */
+ };
+ struct rtrs_permit *permit;
+ union {
+ /* use to send msg associated with a dev */
+ struct rnbd_clt_dev *dev;
+ /* use to send msg associated with a sess */
+ struct rnbd_clt_session *sess;
+ };
+ struct scatterlist sglist[BMAX_SEGMENTS];
+ struct work_struct work;
+ int errno;
+ struct rnbd_iu_comp comp;
+ atomic_t refcount;
+};
+
+struct rnbd_cpu_qlist {
+ struct list_head requeue_list;
+ spinlock_t requeue_lock;
+ unsigned int cpu;
+};
+
+struct rnbd_clt_session {
+ struct list_head list;
+ struct rtrs_clt *rtrs;
+ wait_queue_head_t rtrs_waitq;
+ bool rtrs_ready;
+ struct rnbd_cpu_qlist __percpu
+ *cpu_queues;
+ DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
+ int __percpu *cpu_rr; /* per-cpu var for CPU round-robin */
+ atomic_t busy;
+ int queue_depth;
+ u32 max_io_size;
+ struct blk_mq_tag_set tag_set;
+ struct mutex lock; /* protects state and devs_list */
+ struct list_head devs_list; /* list of struct rnbd_clt_dev */
+ refcount_t refcount;
+ char sessname[NAME_MAX];
+ u8 ver; /* protocol version */
+};
+
+/**
+ * Submission queues.
+ */
+struct rnbd_queue {
+ struct list_head requeue_list;
+ unsigned long in_list;
+ struct rnbd_clt_dev *dev;
+ struct blk_mq_hw_ctx *hctx;
+};
+
+struct rnbd_clt_dev {
+ struct rnbd_clt_session *sess;
+ struct request_queue *queue;
+ struct rnbd_queue *hw_queues;
+ u32 device_id;
+ /* local Idr index - used to track minor number allocations. */
+ u32 clt_device_id;
+ struct mutex lock;
+ enum rnbd_clt_dev_state dev_state;
+ char pathname[NAME_MAX];
+ enum rnbd_access_mode access_mode;
+ bool read_only;
+ bool rotational;
+ u32 max_hw_sectors;
+ u32 max_write_same_sectors;
+ u32 max_discard_sectors;
+ u32 discard_granularity;
+ u32 discard_alignment;
+ u16 secure_discard;
+ u16 physical_block_size;
+ u16 logical_block_size;
+ u16 max_segments;
+ size_t nsectors;
+ u64 size; /* device size in bytes */
+ struct list_head list;
+ struct gendisk *gd;
+ struct kobject kobj;
+ char blk_symlink_name[NAME_MAX];
+ refcount_t refcount;
+ struct work_struct unmap_on_rmmod_work;
+};
+
+/* rnbd-clt.c */
+
+struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
+ struct rtrs_addr *paths,
+ size_t path_cnt, u16 port_nr,
+ const char *pathname,
+ enum rnbd_access_mode access_mode);
+int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
+ const struct attribute *sysfs_self);
+
+int rnbd_clt_remap_device(struct rnbd_clt_dev *dev);
+int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize);
+
+/* rnbd-clt-sysfs.c */
+
+int rnbd_clt_create_sysfs_files(void);
+
+void rnbd_clt_destroy_sysfs_files(void);
+void rnbd_clt_destroy_default_group(void);
+
+void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev);
+
+#endif /* RNBD_CLT_H */
diff --git a/drivers/block/rnbd/rnbd-common.c b/drivers/block/rnbd/rnbd-common.c
new file mode 100644
index 000000000000..596c3f732403
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-common.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#include "rnbd-proto.h"
+
+const char *rnbd_access_mode_str(enum rnbd_access_mode mode)
+{
+ switch (mode) {
+ case RNBD_ACCESS_RO:
+ return "ro";
+ case RNBD_ACCESS_RW:
+ return "rw";
+ case RNBD_ACCESS_MIGRATION:
+ return "migration";
+ default:
+ return "unknown";
+ }
+}
diff --git a/drivers/block/rnbd/rnbd-log.h b/drivers/block/rnbd/rnbd-log.h
new file mode 100644
index 000000000000..136e7d6c3451
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-log.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RNBD_LOG_H
+#define RNBD_LOG_H
+
+#include "rnbd-clt.h"
+#include "rnbd-srv.h"
+
+#define rnbd_clt_log(fn, dev, fmt, ...) ( \
+ fn("<%s@%s> " fmt, (dev)->pathname, \
+ (dev)->sess->sessname, \
+ ##__VA_ARGS__))
+#define rnbd_srv_log(fn, dev, fmt, ...) ( \
+ fn("<%s@%s>: " fmt, (dev)->pathname, \
+ (dev)->sess->sessname, ##__VA_ARGS__))
+
+#define rnbd_clt_err(dev, fmt, ...) \
+ rnbd_clt_log(pr_err, dev, fmt, ##__VA_ARGS__)
+#define rnbd_clt_err_rl(dev, fmt, ...) \
+ rnbd_clt_log(pr_err_ratelimited, dev, fmt, ##__VA_ARGS__)
+#define rnbd_clt_info(dev, fmt, ...) \
+ rnbd_clt_log(pr_info, dev, fmt, ##__VA_ARGS__)
+#define rnbd_clt_info_rl(dev, fmt, ...) \
+ rnbd_clt_log(pr_info_ratelimited, dev, fmt, ##__VA_ARGS__)
+
+#define rnbd_srv_err(dev, fmt, ...) \
+ rnbd_srv_log(pr_err, dev, fmt, ##__VA_ARGS__)
+#define rnbd_srv_err_rl(dev, fmt, ...) \
+ rnbd_srv_log(pr_err_ratelimited, dev, fmt, ##__VA_ARGS__)
+#define rnbd_srv_info(dev, fmt, ...) \
+ rnbd_srv_log(pr_info, dev, fmt, ##__VA_ARGS__)
+#define rnbd_srv_info_rl(dev, fmt, ...) \
+ rnbd_srv_log(pr_info_ratelimited, dev, fmt, ##__VA_ARGS__)
+
+#endif /* RNBD_LOG_H */
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
new file mode 100644
index 000000000000..ca166241452c
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RNBD_PROTO_H
+#define RNBD_PROTO_H
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/limits.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <rdma/ib.h>
+
+#define RNBD_PROTO_VER_MAJOR 2
+#define RNBD_PROTO_VER_MINOR 0
+
+/* The default port number the RTRS server is listening on. */
+#define RTRS_PORT 1234
+
+/**
+ * enum rnbd_msg_types - RNBD message types
+ * @RNBD_MSG_SESS_INFO: initial session info from client to server
+ * @RNBD_MSG_SESS_INFO_RSP: initial session info from server to client
+ * @RNBD_MSG_OPEN: open (map) device request
+ * @RNBD_MSG_OPEN_RSP: response to an @RNBD_MSG_OPEN
+ * @RNBD_MSG_IO: block IO request operation
+ * @RNBD_MSG_CLOSE: close (unmap) device request
+ */
+enum rnbd_msg_type {
+ RNBD_MSG_SESS_INFO,
+ RNBD_MSG_SESS_INFO_RSP,
+ RNBD_MSG_OPEN,
+ RNBD_MSG_OPEN_RSP,
+ RNBD_MSG_IO,
+ RNBD_MSG_CLOSE,
+};
+
+/**
+ * struct rnbd_msg_hdr - header of RNBD messages
+ * @type: Message type, valid values see: enum rnbd_msg_types
+ */
+struct rnbd_msg_hdr {
+ __le16 type;
+ __le16 __padding;
+};
+
+/**
+ * We allow to map RO many times and RW only once. We allow to map yet another
+ * time RW, if MIGRATION is provided (second RW export can be required for
+ * example for VM migration)
+ */
+enum rnbd_access_mode {
+ RNBD_ACCESS_RO,
+ RNBD_ACCESS_RW,
+ RNBD_ACCESS_MIGRATION,
+};
+
+/**
+ * struct rnbd_msg_sess_info - initial session info from client to server
+ * @hdr: message header
+ * @ver: RNBD protocol version
+ */
+struct rnbd_msg_sess_info {
+ struct rnbd_msg_hdr hdr;
+ u8 ver;
+ u8 reserved[31];
+};
+
+/**
+ * struct rnbd_msg_sess_info_rsp - initial session info from server to client
+ * @hdr: message header
+ * @ver: RNBD protocol version
+ */
+struct rnbd_msg_sess_info_rsp {
+ struct rnbd_msg_hdr hdr;
+ u8 ver;
+ u8 reserved[31];
+};
+
+/**
+ * struct rnbd_msg_open - request to open a remote device.
+ * @hdr: message header
+ * @access_mode: the mode to open remote device, valid values see:
+ * enum rnbd_access_mode
+ * @device_name: device path on remote side
+ */
+struct rnbd_msg_open {
+ struct rnbd_msg_hdr hdr;
+ u8 access_mode;
+ u8 resv1;
+ s8 dev_name[NAME_MAX];
+ u8 reserved[3];
+};
+
+/**
+ * struct rnbd_msg_close - request to close a remote device.
+ * @hdr: message header
+ * @device_id: device_id on server side to identify the device
+ */
+struct rnbd_msg_close {
+ struct rnbd_msg_hdr hdr;
+ __le32 device_id;
+};
+
+/**
+ * struct rnbd_msg_open_rsp - response message to RNBD_MSG_OPEN
+ * @hdr: message header
+ * @device_id: device_id on server side to identify the device
+ * @nsectors: number of sectors in the usual 512b unit
+ * @max_hw_sectors: max hardware sectors in the usual 512b unit
+ * @max_write_same_sectors: max sectors for WRITE SAME in the 512b unit
+ * @max_discard_sectors: max. sectors that can be discarded at once in 512b
+ * unit.
+ * @discard_granularity: size of the internal discard allocation unit in bytes
+ * @discard_alignment: offset from internal allocation assignment in bytes
+ * @physical_block_size: physical block size device supports in bytes
+ * @logical_block_size: logical block size device supports in bytes
+ * @max_segments: max segments hardware support in one transfer
+ * @secure_discard: supports secure discard
+ * @rotation: is a rotational disc?
+ */
+struct rnbd_msg_open_rsp {
+ struct rnbd_msg_hdr hdr;
+ __le32 device_id;
+ __le64 nsectors;
+ __le32 max_hw_sectors;
+ __le32 max_write_same_sectors;
+ __le32 max_discard_sectors;
+ __le32 discard_granularity;
+ __le32 discard_alignment;
+ __le16 physical_block_size;
+ __le16 logical_block_size;
+ __le16 max_segments;
+ __le16 secure_discard;
+ u8 rotational;
+ u8 reserved[11];
+};
+
+/**
+ * struct rnbd_msg_io - message for I/O read/write
+ * @hdr: message header
+ * @device_id: device_id on server side to find the right device
+ * @sector: bi_sector attribute from struct bio
+ * @rw: valid values are defined in enum rnbd_io_flags
+ * @bi_size: number of bytes for I/O read/write
+ * @prio: priority
+ */
+struct rnbd_msg_io {
+ struct rnbd_msg_hdr hdr;
+ __le32 device_id;
+ __le64 sector;
+ __le32 rw;
+ __le32 bi_size;
+ __le16 prio;
+};
+
+#define RNBD_OP_BITS 8
+#define RNBD_OP_MASK ((1 << RNBD_OP_BITS) - 1)
+
+/**
+ * enum rnbd_io_flags - RNBD request types from rq_flag_bits
+ * @RNBD_OP_READ: read sectors from the device
+ * @RNBD_OP_WRITE: write sectors to the device
+ * @RNBD_OP_FLUSH: flush the volatile write cache
+ * @RNBD_OP_DISCARD: discard sectors
+ * @RNBD_OP_SECURE_ERASE: securely erase sectors
+ * @RNBD_OP_WRITE_SAME: write the same sectors many times
+
+ * @RNBD_F_SYNC: request is sync (sync write or read)
+ * @RNBD_F_FUA: forced unit access
+ */
+enum rnbd_io_flags {
+
+ /* Operations */
+
+ RNBD_OP_READ = 0,
+ RNBD_OP_WRITE = 1,
+ RNBD_OP_FLUSH = 2,
+ RNBD_OP_DISCARD = 3,
+ RNBD_OP_SECURE_ERASE = 4,
+ RNBD_OP_WRITE_SAME = 5,
+
+ RNBD_OP_LAST,
+
+ /* Flags */
+
+ RNBD_F_SYNC = 1<<(RNBD_OP_BITS + 0),
+ RNBD_F_FUA = 1<<(RNBD_OP_BITS + 1),
+
+ RNBD_F_ALL = (RNBD_F_SYNC | RNBD_F_FUA)
+
+};
+
+static inline u32 rnbd_op(u32 flags)
+{
+ return flags & RNBD_OP_MASK;
+}
+
+static inline u32 rnbd_flags(u32 flags)
+{
+ return flags & ~RNBD_OP_MASK;
+}
+
+static inline bool rnbd_flags_supported(u32 flags)
+{
+ u32 op;
+
+ op = rnbd_op(flags);
+ flags = rnbd_flags(flags);
+
+ if (op >= RNBD_OP_LAST)
+ return false;
+ if (flags & ~RNBD_F_ALL)
+ return false;
+
+ return true;
+}
+
+static inline u32 rnbd_to_bio_flags(u32 rnbd_opf)
+{
+ u32 bio_opf;
+
+ switch (rnbd_op(rnbd_opf)) {
+ case RNBD_OP_READ:
+ bio_opf = REQ_OP_READ;
+ break;
+ case RNBD_OP_WRITE:
+ bio_opf = REQ_OP_WRITE;
+ break;
+ case RNBD_OP_FLUSH:
+ bio_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
+ break;
+ case RNBD_OP_DISCARD:
+ bio_opf = REQ_OP_DISCARD;
+ break;
+ case RNBD_OP_SECURE_ERASE:
+ bio_opf = REQ_OP_SECURE_ERASE;
+ break;
+ case RNBD_OP_WRITE_SAME:
+ bio_opf = REQ_OP_WRITE_SAME;
+ break;
+ default:
+ WARN(1, "Unknown RNBD type: %d (flags %d)\n",
+ rnbd_op(rnbd_opf), rnbd_opf);
+ bio_opf = 0;
+ }
+
+ if (rnbd_opf & RNBD_F_SYNC)
+ bio_opf |= REQ_SYNC;
+
+ if (rnbd_opf & RNBD_F_FUA)
+ bio_opf |= REQ_FUA;
+
+ return bio_opf;
+}
+
+static inline u32 rq_to_rnbd_flags(struct request *rq)
+{
+ u32 rnbd_opf;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ rnbd_opf = RNBD_OP_READ;
+ break;
+ case REQ_OP_WRITE:
+ rnbd_opf = RNBD_OP_WRITE;
+ break;
+ case REQ_OP_DISCARD:
+ rnbd_opf = RNBD_OP_DISCARD;
+ break;
+ case REQ_OP_SECURE_ERASE:
+ rnbd_opf = RNBD_OP_SECURE_ERASE;
+ break;
+ case REQ_OP_WRITE_SAME:
+ rnbd_opf = RNBD_OP_WRITE_SAME;
+ break;
+ case REQ_OP_FLUSH:
+ rnbd_opf = RNBD_OP_FLUSH;
+ break;
+ default:
+ WARN(1, "Unknown request type %d (flags %llu)\n",
+ req_op(rq), (unsigned long long)rq->cmd_flags);
+ rnbd_opf = 0;
+ }
+
+ if (op_is_sync(rq->cmd_flags))
+ rnbd_opf |= RNBD_F_SYNC;
+
+ if (op_is_flush(rq->cmd_flags))
+ rnbd_opf |= RNBD_F_FUA;
+
+ return rnbd_opf;
+}
+
+const char *rnbd_access_mode_str(enum rnbd_access_mode mode);
+
+#endif /* RNBD_PROTO_H */
diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
new file mode 100644
index 000000000000..5eddfd29ab64
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-dev.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rnbd-srv-dev.h"
+#include "rnbd-log.h"
+
+struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
+ struct bio_set *bs)
+{
+ struct rnbd_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->blk_open_flags = flags;
+ dev->bdev = blkdev_get_by_path(path, flags, THIS_MODULE);
+ ret = PTR_ERR_OR_ZERO(dev->bdev);
+ if (ret)
+ goto err;
+
+ dev->blk_open_flags = flags;
+ bdevname(dev->bdev, dev->name);
+ dev->ibd_bio_set = bs;
+
+ return dev;
+
+err:
+ kfree(dev);
+ return ERR_PTR(ret);
+}
+
+void rnbd_dev_close(struct rnbd_dev *dev)
+{
+ blkdev_put(dev->bdev, dev->blk_open_flags);
+ kfree(dev);
+}
+
+static void rnbd_dev_bi_end_io(struct bio *bio)
+{
+ struct rnbd_dev_blk_io *io = bio->bi_private;
+
+ rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
+ bio_put(bio);
+}
+
+/**
+ * rnbd_bio_map_kern - map kernel address into bio
+ * @data: pointer to buffer to map
+ * @bs: bio_set to use.
+ * @len: length in bytes
+ * @gfp_mask: allocation flags for bio allocation
+ *
+ * Map the kernel address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+static struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
+ unsigned int len, gfp_t gfp_mask)
+{
+ unsigned long kaddr = (unsigned long)data;
+ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = kaddr >> PAGE_SHIFT;
+ const int nr_pages = end - start;
+ int offset, i;
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(gfp_mask, nr_pages, bs);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+
+ offset = offset_in_page(kaddr);
+ for (i = 0; i < nr_pages; i++) {
+ unsigned int bytes = PAGE_SIZE - offset;
+
+ if (len <= 0)
+ break;
+
+ if (bytes > len)
+ bytes = len;
+
+ if (bio_add_page(bio, virt_to_page(data), bytes,
+ offset) < bytes) {
+ /* we don't support partial mappings */
+ bio_put(bio);
+ return ERR_PTR(-EINVAL);
+ }
+
+ data += bytes;
+ len -= bytes;
+ offset = 0;
+ }
+
+ bio->bi_end_io = bio_put;
+ return bio;
+}
+
+int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
+ size_t len, u32 bi_size, enum rnbd_io_flags flags,
+ short prio, void *priv)
+{
+ struct rnbd_dev_blk_io *io;
+ struct bio *bio;
+
+ /* Generate bio with pages pointing to the rdma buffer */
+ bio = rnbd_bio_map_kern(data, dev->ibd_bio_set, len, GFP_KERNEL);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ io = container_of(bio, struct rnbd_dev_blk_io, bio);
+
+ io->dev = dev;
+ io->priv = priv;
+
+ bio->bi_end_io = rnbd_dev_bi_end_io;
+ bio->bi_private = io;
+ bio->bi_opf = rnbd_to_bio_flags(flags);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_iter.bi_size = bi_size;
+ bio_set_prio(bio, prio);
+ bio_set_dev(bio, dev->bdev);
+
+ submit_bio(bio);
+
+ return 0;
+}
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
new file mode 100644
index 000000000000..0f65b09a270e
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-dev.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RNBD_SRV_DEV_H
+#define RNBD_SRV_DEV_H
+
+#include <linux/fs.h>
+#include "rnbd-proto.h"
+
+struct rnbd_dev {
+ struct block_device *bdev;
+ struct bio_set *ibd_bio_set;
+ fmode_t blk_open_flags;
+ char name[BDEVNAME_SIZE];
+};
+
+struct rnbd_dev_blk_io {
+ struct rnbd_dev *dev;
+ void *priv;
+ /* have to be last member for front_pad usage of bioset_init */
+ struct bio bio;
+};
+
+/**
+ * rnbd_dev_open() - Open a device
+ * @flags: open flags
+ * @bs: bio_set to use during block io,
+ */
+struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
+ struct bio_set *bs);
+
+/**
+ * rnbd_dev_close() - Close a device
+ */
+void rnbd_dev_close(struct rnbd_dev *dev);
+
+void rnbd_endio(void *priv, int error);
+
+static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
+{
+ return queue_max_segments(bdev_get_queue(dev->bdev));
+}
+
+static inline int rnbd_dev_get_max_hw_sects(const struct rnbd_dev *dev)
+{
+ return queue_max_hw_sectors(bdev_get_queue(dev->bdev));
+}
+
+static inline int rnbd_dev_get_secure_discard(const struct rnbd_dev *dev)
+{
+ return blk_queue_secure_erase(bdev_get_queue(dev->bdev));
+}
+
+static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
+{
+ if (!blk_queue_discard(bdev_get_queue(dev->bdev)))
+ return 0;
+
+ return blk_queue_get_max_sectors(bdev_get_queue(dev->bdev),
+ REQ_OP_DISCARD);
+}
+
+static inline int rnbd_dev_get_discard_granularity(const struct rnbd_dev *dev)
+{
+ return bdev_get_queue(dev->bdev)->limits.discard_granularity;
+}
+
+static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev)
+{
+ return bdev_get_queue(dev->bdev)->limits.discard_alignment;
+}
+
+/**
+ * rnbd_dev_submit_io() - Submit an I/O to the disk
+ * @dev: device to that the I/O is submitted
+ * @sector: address to read/write data to
+ * @data: I/O data to write or buffer to read I/O date into
+ * @len: length of @data
+ * @bi_size: Amount of data that will be read/written
+ * @prio: IO priority
+ * @priv: private data passed to @io_fn
+ */
+int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
+ size_t len, u32 bi_size, enum rnbd_io_flags flags,
+ short prio, void *priv);
+
+#endif /* RNBD_SRV_DEV_H */
diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c
new file mode 100644
index 000000000000..106775c074d1
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-sysfs.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <uapi/linux/limits.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/genhd.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+
+#include "rnbd-srv.h"
+
+static struct device *rnbd_dev;
+static struct class *rnbd_dev_class;
+static struct kobject *rnbd_devs_kobj;
+
+static void rnbd_srv_dev_release(struct kobject *kobj)
+{
+ struct rnbd_srv_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_srv_dev, dev_kobj);
+
+ kfree(dev);
+}
+
+static struct kobj_type dev_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rnbd_srv_dev_release
+};
+
+int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
+ struct block_device *bdev,
+ const char *dev_name)
+{
+ struct kobject *bdev_kobj;
+ int ret;
+
+ ret = kobject_init_and_add(&dev->dev_kobj, &dev_ktype,
+ rnbd_devs_kobj, dev_name);
+ if (ret)
+ return ret;
+
+ dev->dev_sessions_kobj = kobject_create_and_add("sessions",
+ &dev->dev_kobj);
+ if (!dev->dev_sessions_kobj)
+ goto put_dev_kobj;
+
+ bdev_kobj = &disk_to_dev(bdev->bd_disk)->kobj;
+ ret = sysfs_create_link(&dev->dev_kobj, bdev_kobj, "block_dev");
+ if (ret)
+ goto put_sess_kobj;
+
+ return 0;
+
+put_sess_kobj:
+ kobject_put(dev->dev_sessions_kobj);
+put_dev_kobj:
+ kobject_put(&dev->dev_kobj);
+ return ret;
+}
+
+void rnbd_srv_destroy_dev_sysfs(struct rnbd_srv_dev *dev)
+{
+ sysfs_remove_link(&dev->dev_kobj, "block_dev");
+ kobject_del(dev->dev_sessions_kobj);
+ kobject_put(dev->dev_sessions_kobj);
+ kobject_del(&dev->dev_kobj);
+ kobject_put(&dev->dev_kobj);
+}
+
+static ssize_t read_only_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *page)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%d\n",
+ !(sess_dev->open_flags & FMODE_WRITE));
+}
+
+static struct kobj_attribute rnbd_srv_dev_session_ro_attr =
+ __ATTR_RO(read_only);
+
+static ssize_t access_mode_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n",
+ rnbd_access_mode_str(sess_dev->access_mode));
+}
+
+static struct kobj_attribute rnbd_srv_dev_session_access_mode_attr =
+ __ATTR_RO(access_mode);
+
+static ssize_t mapping_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", sess_dev->pathname);
+}
+
+static struct kobj_attribute rnbd_srv_dev_session_mapping_path_attr =
+ __ATTR_RO(mapping_path);
+
+static struct attribute *rnbd_srv_default_dev_sessions_attrs[] = {
+ &rnbd_srv_dev_session_access_mode_attr.attr,
+ &rnbd_srv_dev_session_ro_attr.attr,
+ &rnbd_srv_dev_session_mapping_path_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rnbd_srv_default_dev_session_attr_group = {
+ .attrs = rnbd_srv_default_dev_sessions_attrs,
+};
+
+void rnbd_srv_destroy_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev)
+{
+ sysfs_remove_group(&sess_dev->kobj,
+ &rnbd_srv_default_dev_session_attr_group);
+
+ kobject_del(&sess_dev->kobj);
+ kobject_put(&sess_dev->kobj);
+}
+
+static void rnbd_srv_sess_dev_release(struct kobject *kobj)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+ rnbd_destroy_sess_dev(sess_dev);
+}
+
+static struct kobj_type rnbd_srv_sess_dev_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rnbd_srv_sess_dev_release,
+};
+
+int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev)
+{
+ int ret;
+
+ ret = kobject_init_and_add(&sess_dev->kobj, &rnbd_srv_sess_dev_ktype,
+ sess_dev->dev->dev_sessions_kobj, "%s",
+ sess_dev->sess->sessname);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&sess_dev->kobj,
+ &rnbd_srv_default_dev_session_attr_group);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kobject_put(&sess_dev->kobj);
+
+ return ret;
+}
+
+int rnbd_srv_create_sysfs_files(void)
+{
+ int err;
+
+ rnbd_dev_class = class_create(THIS_MODULE, "rnbd-server");
+ if (IS_ERR(rnbd_dev_class))
+ return PTR_ERR(rnbd_dev_class);
+
+ rnbd_dev = device_create(rnbd_dev_class, NULL,
+ MKDEV(0, 0), NULL, "ctl");
+ if (IS_ERR(rnbd_dev)) {
+ err = PTR_ERR(rnbd_dev);
+ goto cls_destroy;
+ }
+ rnbd_devs_kobj = kobject_create_and_add("devices", &rnbd_dev->kobj);
+ if (!rnbd_devs_kobj) {
+ err = -ENOMEM;
+ goto dev_destroy;
+ }
+
+ return 0;
+
+dev_destroy:
+ device_destroy(rnbd_dev_class, MKDEV(0, 0));
+cls_destroy:
+ class_destroy(rnbd_dev_class);
+
+ return err;
+}
+
+void rnbd_srv_destroy_sysfs_files(void)
+{
+ kobject_del(rnbd_devs_kobj);
+ kobject_put(rnbd_devs_kobj);
+ device_destroy(rnbd_dev_class, MKDEV(0, 0));
+ class_destroy(rnbd_dev_class);
+}
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
new file mode 100644
index 000000000000..86e61523907b
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -0,0 +1,844 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+
+#include "rnbd-srv.h"
+#include "rnbd-srv-dev.h"
+
+MODULE_DESCRIPTION("RDMA Network Block Device Server");
+MODULE_LICENSE("GPL");
+
+static u16 port_nr = RTRS_PORT;
+
+module_param_named(port_nr, port_nr, ushort, 0444);
+MODULE_PARM_DESC(port_nr,
+ "The port number the server is listening on (default: "
+ __stringify(RTRS_PORT)")");
+
+#define DEFAULT_DEV_SEARCH_PATH "/"
+
+static char dev_search_path[PATH_MAX] = DEFAULT_DEV_SEARCH_PATH;
+
+static int dev_search_path_set(const char *val, const struct kernel_param *kp)
+{
+ const char *p = strrchr(val, '\n') ? : val + strlen(val);
+
+ if (strlen(val) >= sizeof(dev_search_path))
+ return -EINVAL;
+
+ snprintf(dev_search_path, sizeof(dev_search_path), "%.*s",
+ (int)(p - val), val);
+
+ pr_info("dev_search_path changed to '%s'\n", dev_search_path);
+
+ return 0;
+}
+
+static struct kparam_string dev_search_path_kparam_str = {
+ .maxlen = sizeof(dev_search_path),
+ .string = dev_search_path
+};
+
+static const struct kernel_param_ops dev_search_path_ops = {
+ .set = dev_search_path_set,
+ .get = param_get_string,
+};
+
+module_param_cb(dev_search_path, &dev_search_path_ops,
+ &dev_search_path_kparam_str, 0444);
+MODULE_PARM_DESC(dev_search_path,
+ "Sets the dev_search_path. When a device is mapped this path is prepended to the device path from the map device operation. If %SESSNAME% is specified in a path, then device will be searched in a session namespace. (default: "
+ DEFAULT_DEV_SEARCH_PATH ")");
+
+static DEFINE_MUTEX(sess_lock);
+static DEFINE_SPINLOCK(dev_lock);
+
+static LIST_HEAD(sess_list);
+static LIST_HEAD(dev_list);
+
+struct rnbd_io_private {
+ struct rtrs_srv_op *id;
+ struct rnbd_srv_sess_dev *sess_dev;
+};
+
+static void rnbd_sess_dev_release(struct kref *kref)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kref, struct rnbd_srv_sess_dev, kref);
+ complete(sess_dev->destroy_comp);
+}
+
+static inline void rnbd_put_sess_dev(struct rnbd_srv_sess_dev *sess_dev)
+{
+ kref_put(&sess_dev->kref, rnbd_sess_dev_release);
+}
+
+void rnbd_endio(void *priv, int error)
+{
+ struct rnbd_io_private *rnbd_priv = priv;
+ struct rnbd_srv_sess_dev *sess_dev = rnbd_priv->sess_dev;
+
+ rnbd_put_sess_dev(sess_dev);
+
+ rtrs_srv_resp_rdma(rnbd_priv->id, error);
+
+ kfree(priv);
+}
+
+static struct rnbd_srv_sess_dev *
+rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+ int ret = 0;
+
+ rcu_read_lock();
+ sess_dev = xa_load(&srv_sess->index_idr, dev_id);
+ if (likely(sess_dev))
+ ret = kref_get_unless_zero(&sess_dev->kref);
+ rcu_read_unlock();
+
+ if (!sess_dev || !ret)
+ return ERR_PTR(-ENXIO);
+
+ return sess_dev;
+}
+
+static int process_rdma(struct rtrs_srv *sess,
+ struct rnbd_srv_session *srv_sess,
+ struct rtrs_srv_op *id, void *data, u32 datalen,
+ const void *usr, size_t usrlen)
+{
+ const struct rnbd_msg_io *msg = usr;
+ struct rnbd_io_private *priv;
+ struct rnbd_srv_sess_dev *sess_dev;
+ u32 dev_id;
+ int err;
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_id = le32_to_cpu(msg->device_id);
+
+ sess_dev = rnbd_get_sess_dev(dev_id, srv_sess);
+ if (IS_ERR(sess_dev)) {
+ pr_err_ratelimited("Got I/O request on session %s for unknown device id %d\n",
+ srv_sess->sessname, dev_id);
+ err = -ENOTCONN;
+ goto err;
+ }
+
+ priv->sess_dev = sess_dev;
+ priv->id = id;
+
+ err = rnbd_dev_submit_io(sess_dev->rnbd_dev, le64_to_cpu(msg->sector),
+ data, datalen, le32_to_cpu(msg->bi_size),
+ le32_to_cpu(msg->rw),
+ srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
+ usrlen < sizeof(*msg) ?
+ 0 : le16_to_cpu(msg->prio), priv);
+ if (unlikely(err)) {
+ rnbd_srv_err(sess_dev, "Submitting I/O to device failed, err: %d\n",
+ err);
+ goto sess_dev_put;
+ }
+
+ return 0;
+
+sess_dev_put:
+ rnbd_put_sess_dev(sess_dev);
+err:
+ kfree(priv);
+ return err;
+}
+
+static void destroy_device(struct rnbd_srv_dev *dev)
+{
+ WARN_ONCE(!list_empty(&dev->sess_dev_list),
+ "Device %s is being destroyed but still in use!\n",
+ dev->id);
+
+ spin_lock(&dev_lock);
+ list_del(&dev->list);
+ spin_unlock(&dev_lock);
+
+ mutex_destroy(&dev->lock);
+ if (dev->dev_kobj.state_in_sysfs)
+ /*
+ * Destroy kobj only if it was really created.
+ */
+ rnbd_srv_destroy_dev_sysfs(dev);
+ else
+ kfree(dev);
+}
+
+static void destroy_device_cb(struct kref *kref)
+{
+ struct rnbd_srv_dev *dev;
+
+ dev = container_of(kref, struct rnbd_srv_dev, kref);
+
+ destroy_device(dev);
+}
+
+static void rnbd_put_srv_dev(struct rnbd_srv_dev *dev)
+{
+ kref_put(&dev->kref, destroy_device_cb);
+}
+
+void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev)
+{
+ DECLARE_COMPLETION_ONSTACK(dc);
+
+ xa_erase(&sess_dev->sess->index_idr, sess_dev->device_id);
+ synchronize_rcu();
+ sess_dev->destroy_comp = &dc;
+ rnbd_put_sess_dev(sess_dev);
+ wait_for_completion(&dc); /* wait for inflights to drop to zero */
+
+ rnbd_dev_close(sess_dev->rnbd_dev);
+ list_del(&sess_dev->sess_list);
+ mutex_lock(&sess_dev->dev->lock);
+ list_del(&sess_dev->dev_list);
+ if (sess_dev->open_flags & FMODE_WRITE)
+ sess_dev->dev->open_write_cnt--;
+ mutex_unlock(&sess_dev->dev->lock);
+
+ rnbd_put_srv_dev(sess_dev->dev);
+
+ rnbd_srv_info(sess_dev, "Device closed\n");
+ kfree(sess_dev);
+}
+
+static void destroy_sess(struct rnbd_srv_session *srv_sess)
+{
+ struct rnbd_srv_sess_dev *sess_dev, *tmp;
+
+ if (list_empty(&srv_sess->sess_dev_list))
+ goto out;
+
+ mutex_lock(&srv_sess->lock);
+ list_for_each_entry_safe(sess_dev, tmp, &srv_sess->sess_dev_list,
+ sess_list)
+ rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ mutex_unlock(&srv_sess->lock);
+
+out:
+ xa_destroy(&srv_sess->index_idr);
+ bioset_exit(&srv_sess->sess_bio_set);
+
+ pr_info("RTRS Session %s disconnected\n", srv_sess->sessname);
+
+ mutex_lock(&sess_lock);
+ list_del(&srv_sess->list);
+ mutex_unlock(&sess_lock);
+
+ mutex_destroy(&srv_sess->lock);
+ kfree(srv_sess);
+}
+
+static int create_sess(struct rtrs_srv *rtrs)
+{
+ struct rnbd_srv_session *srv_sess;
+ char sessname[NAME_MAX];
+ int err;
+
+ err = rtrs_srv_get_sess_name(rtrs, sessname, sizeof(sessname));
+ if (err) {
+ pr_err("rtrs_srv_get_sess_name(%s): %d\n", sessname, err);
+
+ return err;
+ }
+ srv_sess = kzalloc(sizeof(*srv_sess), GFP_KERNEL);
+ if (!srv_sess)
+ return -ENOMEM;
+
+ srv_sess->queue_depth = rtrs_srv_get_queue_depth(rtrs);
+ err = bioset_init(&srv_sess->sess_bio_set, srv_sess->queue_depth,
+ offsetof(struct rnbd_dev_blk_io, bio),
+ BIOSET_NEED_BVECS);
+ if (err) {
+ pr_err("Allocating srv_session for session %s failed\n",
+ sessname);
+ kfree(srv_sess);
+ return err;
+ }
+
+ xa_init_flags(&srv_sess->index_idr, XA_FLAGS_ALLOC);
+ INIT_LIST_HEAD(&srv_sess->sess_dev_list);
+ mutex_init(&srv_sess->lock);
+ mutex_lock(&sess_lock);
+ list_add(&srv_sess->list, &sess_list);
+ mutex_unlock(&sess_lock);
+
+ srv_sess->rtrs = rtrs;
+ strlcpy(srv_sess->sessname, sessname, sizeof(srv_sess->sessname));
+
+ rtrs_srv_set_sess_priv(rtrs, srv_sess);
+
+ return 0;
+}
+
+static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
+ enum rtrs_srv_link_ev ev, void *priv)
+{
+ struct rnbd_srv_session *srv_sess = priv;
+
+ switch (ev) {
+ case RTRS_SRV_LINK_EV_CONNECTED:
+ return create_sess(rtrs);
+
+ case RTRS_SRV_LINK_EV_DISCONNECTED:
+ if (WARN_ON_ONCE(!srv_sess))
+ return -EINVAL;
+
+ destroy_sess(srv_sess);
+ return 0;
+
+ default:
+ pr_warn("Received unknown RTRS session event %d from session %s\n",
+ ev, srv_sess->sessname);
+ return -EINVAL;
+ }
+}
+
+static int process_msg_close(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ void *data, size_t datalen, const void *usr,
+ size_t usrlen)
+{
+ const struct rnbd_msg_close *close_msg = usr;
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = rnbd_get_sess_dev(le32_to_cpu(close_msg->device_id),
+ srv_sess);
+ if (IS_ERR(sess_dev))
+ return 0;
+
+ rnbd_put_sess_dev(sess_dev);
+ mutex_lock(&srv_sess->lock);
+ rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ mutex_unlock(&srv_sess->lock);
+ return 0;
+}
+
+static int process_msg_open(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ const void *msg, size_t len,
+ void *data, size_t datalen);
+
+static int process_msg_sess_info(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ const void *msg, size_t len,
+ void *data, size_t datalen);
+
+static int rnbd_srv_rdma_ev(struct rtrs_srv *rtrs, void *priv,
+ struct rtrs_srv_op *id, int dir,
+ void *data, size_t datalen, const void *usr,
+ size_t usrlen)
+{
+ struct rnbd_srv_session *srv_sess = priv;
+ const struct rnbd_msg_hdr *hdr = usr;
+ int ret = 0;
+ u16 type;
+
+ if (WARN_ON_ONCE(!srv_sess))
+ return -ENODEV;
+
+ type = le16_to_cpu(hdr->type);
+
+ switch (type) {
+ case RNBD_MSG_IO:
+ return process_rdma(rtrs, srv_sess, id, data, datalen, usr,
+ usrlen);
+ case RNBD_MSG_CLOSE:
+ ret = process_msg_close(rtrs, srv_sess, data, datalen,
+ usr, usrlen);
+ break;
+ case RNBD_MSG_OPEN:
+ ret = process_msg_open(rtrs, srv_sess, usr, usrlen,
+ data, datalen);
+ break;
+ case RNBD_MSG_SESS_INFO:
+ ret = process_msg_sess_info(rtrs, srv_sess, usr, usrlen,
+ data, datalen);
+ break;
+ default:
+ pr_warn("Received unexpected message type %d with dir %d from session %s\n",
+ type, dir, srv_sess->sessname);
+ return -EINVAL;
+ }
+
+ rtrs_srv_resp_rdma(id, ret);
+ return 0;
+}
+
+static struct rnbd_srv_sess_dev
+*rnbd_sess_dev_alloc(struct rnbd_srv_session *srv_sess)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+ int error;
+
+ sess_dev = kzalloc(sizeof(*sess_dev), GFP_KERNEL);
+ if (!sess_dev)
+ return ERR_PTR(-ENOMEM);
+
+ error = xa_alloc(&srv_sess->index_idr, &sess_dev->device_id, sess_dev,
+ xa_limit_32b, GFP_NOWAIT);
+ if (error < 0) {
+ pr_warn("Allocating idr failed, err: %d\n", error);
+ kfree(sess_dev);
+ return ERR_PTR(error);
+ }
+
+ return sess_dev;
+}
+
+static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(const char *id)
+{
+ struct rnbd_srv_dev *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ strlcpy(dev->id, id, sizeof(dev->id));
+ kref_init(&dev->kref);
+ INIT_LIST_HEAD(&dev->sess_dev_list);
+ mutex_init(&dev->lock);
+
+ return dev;
+}
+
+static struct rnbd_srv_dev *
+rnbd_srv_find_or_add_srv_dev(struct rnbd_srv_dev *new_dev)
+{
+ struct rnbd_srv_dev *dev;
+
+ spin_lock(&dev_lock);
+ list_for_each_entry(dev, &dev_list, list) {
+ if (!strncmp(dev->id, new_dev->id, sizeof(dev->id))) {
+ if (!kref_get_unless_zero(&dev->kref))
+ /*
+ * We lost the race, device is almost dead.
+ * Continue traversing to find a valid one.
+ */
+ continue;
+ spin_unlock(&dev_lock);
+ return dev;
+ }
+ }
+ list_add(&new_dev->list, &dev_list);
+ spin_unlock(&dev_lock);
+
+ return new_dev;
+}
+
+static int rnbd_srv_check_update_open_perm(struct rnbd_srv_dev *srv_dev,
+ struct rnbd_srv_session *srv_sess,
+ enum rnbd_access_mode access_mode)
+{
+ int ret = -EPERM;
+
+ mutex_lock(&srv_dev->lock);
+
+ switch (access_mode) {
+ case RNBD_ACCESS_RO:
+ ret = 0;
+ break;
+ case RNBD_ACCESS_RW:
+ if (srv_dev->open_write_cnt == 0) {
+ srv_dev->open_write_cnt++;
+ ret = 0;
+ } else {
+ pr_err("Mapping device '%s' for session %s with RW permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
+ srv_dev->id, srv_sess->sessname,
+ srv_dev->open_write_cnt,
+ rnbd_access_mode_str(access_mode));
+ }
+ break;
+ case RNBD_ACCESS_MIGRATION:
+ if (srv_dev->open_write_cnt < 2) {
+ srv_dev->open_write_cnt++;
+ ret = 0;
+ } else {
+ pr_err("Mapping device '%s' for session %s with migration permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
+ srv_dev->id, srv_sess->sessname,
+ srv_dev->open_write_cnt,
+ rnbd_access_mode_str(access_mode));
+ }
+ break;
+ default:
+ pr_err("Received mapping request for device '%s' on session %s with invalid access mode: %d\n",
+ srv_dev->id, srv_sess->sessname, access_mode);
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&srv_dev->lock);
+
+ return ret;
+}
+
+static struct rnbd_srv_dev *
+rnbd_srv_get_or_create_srv_dev(struct rnbd_dev *rnbd_dev,
+ struct rnbd_srv_session *srv_sess,
+ enum rnbd_access_mode access_mode)
+{
+ int ret;
+ struct rnbd_srv_dev *new_dev, *dev;
+
+ new_dev = rnbd_srv_init_srv_dev(rnbd_dev->name);
+ if (IS_ERR(new_dev))
+ return new_dev;
+
+ dev = rnbd_srv_find_or_add_srv_dev(new_dev);
+ if (dev != new_dev)
+ kfree(new_dev);
+
+ ret = rnbd_srv_check_update_open_perm(dev, srv_sess, access_mode);
+ if (ret) {
+ rnbd_put_srv_dev(dev);
+ return ERR_PTR(ret);
+ }
+
+ return dev;
+}
+
+static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
+ struct rnbd_srv_sess_dev *sess_dev)
+{
+ struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev;
+
+ rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
+ rsp->device_id =
+ cpu_to_le32(sess_dev->device_id);
+ rsp->nsectors =
+ cpu_to_le64(get_capacity(rnbd_dev->bdev->bd_disk));
+ rsp->logical_block_size =
+ cpu_to_le16(bdev_logical_block_size(rnbd_dev->bdev));
+ rsp->physical_block_size =
+ cpu_to_le16(bdev_physical_block_size(rnbd_dev->bdev));
+ rsp->max_segments =
+ cpu_to_le16(rnbd_dev_get_max_segs(rnbd_dev));
+ rsp->max_hw_sectors =
+ cpu_to_le32(rnbd_dev_get_max_hw_sects(rnbd_dev));
+ rsp->max_write_same_sectors =
+ cpu_to_le32(bdev_write_same(rnbd_dev->bdev));
+ rsp->max_discard_sectors =
+ cpu_to_le32(rnbd_dev_get_max_discard_sects(rnbd_dev));
+ rsp->discard_granularity =
+ cpu_to_le32(rnbd_dev_get_discard_granularity(rnbd_dev));
+ rsp->discard_alignment =
+ cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
+ rsp->secure_discard =
+ cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
+ rsp->rotational =
+ !blk_queue_nonrot(bdev_get_queue(rnbd_dev->bdev));
+}
+
+static struct rnbd_srv_sess_dev *
+rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
+ const struct rnbd_msg_open *open_msg,
+ struct rnbd_dev *rnbd_dev, fmode_t open_flags,
+ struct rnbd_srv_dev *srv_dev)
+{
+ struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess);
+
+ if (IS_ERR(sdev))
+ return sdev;
+
+ kref_init(&sdev->kref);
+
+ strlcpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname));
+
+ sdev->rnbd_dev = rnbd_dev;
+ sdev->sess = srv_sess;
+ sdev->dev = srv_dev;
+ sdev->open_flags = open_flags;
+ sdev->access_mode = open_msg->access_mode;
+
+ return sdev;
+}
+
+static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess,
+ const char *dev_name)
+{
+ char *full_path;
+ char *a, *b;
+
+ full_path = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!full_path)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Replace %SESSNAME% with a real session name in order to
+ * create device namespace.
+ */
+ a = strnstr(dev_search_path, "%SESSNAME%", sizeof(dev_search_path));
+ if (a) {
+ int len = a - dev_search_path;
+
+ len = snprintf(full_path, PATH_MAX, "%.*s/%s/%s", len,
+ dev_search_path, srv_sess->sessname, dev_name);
+ if (len >= PATH_MAX) {
+ pr_err("Too long path: %s, %s, %s\n",
+ dev_search_path, srv_sess->sessname, dev_name);
+ kfree(full_path);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ snprintf(full_path, PATH_MAX, "%s/%s",
+ dev_search_path, dev_name);
+ }
+
+ /* eliminitate duplicated slashes */
+ a = strchr(full_path, '/');
+ b = a;
+ while (*b != '\0') {
+ if (*b == '/' && *a == '/') {
+ b++;
+ } else {
+ a++;
+ *a = *b;
+ b++;
+ }
+ }
+ a++;
+ *a = '\0';
+
+ return full_path;
+}
+
+static int process_msg_sess_info(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ const void *msg, size_t len,
+ void *data, size_t datalen)
+{
+ const struct rnbd_msg_sess_info *sess_info_msg = msg;
+ struct rnbd_msg_sess_info_rsp *rsp = data;
+
+ srv_sess->ver = min_t(u8, sess_info_msg->ver, RNBD_PROTO_VER_MAJOR);
+ pr_debug("Session %s using protocol version %d (client version: %d, server version: %d)\n",
+ srv_sess->sessname, srv_sess->ver,
+ sess_info_msg->ver, RNBD_PROTO_VER_MAJOR);
+
+ rsp->hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO_RSP);
+ rsp->ver = srv_sess->ver;
+
+ return 0;
+}
+
+/**
+ * find_srv_sess_dev() - a dev is already opened by this name
+ * @srv_sess: the session to search.
+ * @dev_name: string containing the name of the device.
+ *
+ * Return struct rnbd_srv_sess_dev if srv_sess already opened the dev_name
+ * NULL if the session didn't open the device yet.
+ */
+static struct rnbd_srv_sess_dev *
+find_srv_sess_dev(struct rnbd_srv_session *srv_sess, const char *dev_name)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ if (list_empty(&srv_sess->sess_dev_list))
+ return NULL;
+
+ list_for_each_entry(sess_dev, &srv_sess->sess_dev_list, sess_list)
+ if (!strcmp(sess_dev->pathname, dev_name))
+ return sess_dev;
+
+ return NULL;
+}
+
+static int process_msg_open(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ const void *msg, size_t len,
+ void *data, size_t datalen)
+{
+ int ret;
+ struct rnbd_srv_dev *srv_dev;
+ struct rnbd_srv_sess_dev *srv_sess_dev;
+ const struct rnbd_msg_open *open_msg = msg;
+ fmode_t open_flags;
+ char *full_path;
+ struct rnbd_dev *rnbd_dev;
+ struct rnbd_msg_open_rsp *rsp = data;
+
+ pr_debug("Open message received: session='%s' path='%s' access_mode=%d\n",
+ srv_sess->sessname, open_msg->dev_name,
+ open_msg->access_mode);
+ open_flags = FMODE_READ;
+ if (open_msg->access_mode != RNBD_ACCESS_RO)
+ open_flags |= FMODE_WRITE;
+
+ mutex_lock(&srv_sess->lock);
+
+ srv_sess_dev = find_srv_sess_dev(srv_sess, open_msg->dev_name);
+ if (srv_sess_dev)
+ goto fill_response;
+
+ if ((strlen(dev_search_path) + strlen(open_msg->dev_name))
+ >= PATH_MAX) {
+ pr_err("Opening device for session %s failed, device path too long. '%s/%s' is longer than PATH_MAX (%d)\n",
+ srv_sess->sessname, dev_search_path, open_msg->dev_name,
+ PATH_MAX);
+ ret = -EINVAL;
+ goto reject;
+ }
+ if (strstr(open_msg->dev_name, "..")) {
+ pr_err("Opening device for session %s failed, device path %s contains relative path ..\n",
+ srv_sess->sessname, open_msg->dev_name);
+ ret = -EINVAL;
+ goto reject;
+ }
+ full_path = rnbd_srv_get_full_path(srv_sess, open_msg->dev_name);
+ if (IS_ERR(full_path)) {
+ ret = PTR_ERR(full_path);
+ pr_err("Opening device '%s' for client %s failed, failed to get device full path, err: %d\n",
+ open_msg->dev_name, srv_sess->sessname, ret);
+ goto reject;
+ }
+
+ rnbd_dev = rnbd_dev_open(full_path, open_flags,
+ &srv_sess->sess_bio_set);
+ if (IS_ERR(rnbd_dev)) {
+ pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %ld\n",
+ full_path, srv_sess->sessname, PTR_ERR(rnbd_dev));
+ ret = PTR_ERR(rnbd_dev);
+ goto free_path;
+ }
+
+ srv_dev = rnbd_srv_get_or_create_srv_dev(rnbd_dev, srv_sess,
+ open_msg->access_mode);
+ if (IS_ERR(srv_dev)) {
+ pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %ld\n",
+ full_path, srv_sess->sessname, PTR_ERR(srv_dev));
+ ret = PTR_ERR(srv_dev);
+ goto rnbd_dev_close;
+ }
+
+ srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg,
+ rnbd_dev, open_flags,
+ srv_dev);
+ if (IS_ERR(srv_sess_dev)) {
+ pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %ld\n",
+ full_path, srv_sess->sessname, PTR_ERR(srv_sess_dev));
+ ret = PTR_ERR(srv_sess_dev);
+ goto srv_dev_put;
+ }
+
+ /* Create the srv_dev sysfs files if they haven't been created yet. The
+ * reason to delay the creation is not to create the sysfs files before
+ * we are sure the device can be opened.
+ */
+ mutex_lock(&srv_dev->lock);
+ if (!srv_dev->dev_kobj.state_in_sysfs) {
+ ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev,
+ rnbd_dev->name);
+ if (ret) {
+ mutex_unlock(&srv_dev->lock);
+ rnbd_srv_err(srv_sess_dev,
+ "Opening device failed, failed to create device sysfs files, err: %d\n",
+ ret);
+ goto free_srv_sess_dev;
+ }
+ }
+
+ ret = rnbd_srv_create_dev_session_sysfs(srv_sess_dev);
+ if (ret) {
+ mutex_unlock(&srv_dev->lock);
+ rnbd_srv_err(srv_sess_dev,
+ "Opening device failed, failed to create dev client sysfs files, err: %d\n",
+ ret);
+ goto free_srv_sess_dev;
+ }
+
+ list_add(&srv_sess_dev->dev_list, &srv_dev->sess_dev_list);
+ mutex_unlock(&srv_dev->lock);
+
+ list_add(&srv_sess_dev->sess_list, &srv_sess->sess_dev_list);
+
+ rnbd_srv_info(srv_sess_dev, "Opened device '%s'\n", srv_dev->id);
+
+ kfree(full_path);
+
+fill_response:
+ rnbd_srv_fill_msg_open_rsp(rsp, srv_sess_dev);
+ mutex_unlock(&srv_sess->lock);
+ return 0;
+
+free_srv_sess_dev:
+ xa_erase(&srv_sess->index_idr, srv_sess_dev->device_id);
+ synchronize_rcu();
+ kfree(srv_sess_dev);
+srv_dev_put:
+ if (open_msg->access_mode != RNBD_ACCESS_RO) {
+ mutex_lock(&srv_dev->lock);
+ srv_dev->open_write_cnt--;
+ mutex_unlock(&srv_dev->lock);
+ }
+ rnbd_put_srv_dev(srv_dev);
+rnbd_dev_close:
+ rnbd_dev_close(rnbd_dev);
+free_path:
+ kfree(full_path);
+reject:
+ mutex_unlock(&srv_sess->lock);
+ return ret;
+}
+
+static struct rtrs_srv_ctx *rtrs_ctx;
+
+static struct rtrs_srv_ops rtrs_ops;
+static int __init rnbd_srv_init_module(void)
+{
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56);
+ rtrs_ops = (struct rtrs_srv_ops) {
+ .rdma_ev = rnbd_srv_rdma_ev,
+ .link_ev = rnbd_srv_link_ev,
+ };
+ rtrs_ctx = rtrs_srv_open(&rtrs_ops, port_nr);
+ if (IS_ERR(rtrs_ctx)) {
+ err = PTR_ERR(rtrs_ctx);
+ pr_err("rtrs_srv_open(), err: %d\n", err);
+ return err;
+ }
+
+ err = rnbd_srv_create_sysfs_files();
+ if (err) {
+ pr_err("rnbd_srv_create_sysfs_files(), err: %d\n", err);
+ rtrs_srv_close(rtrs_ctx);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit rnbd_srv_cleanup_module(void)
+{
+ rtrs_srv_close(rtrs_ctx);
+ WARN_ON(!list_empty(&sess_list));
+ rnbd_srv_destroy_sysfs_files();
+}
+
+module_init(rnbd_srv_init_module);
+module_exit(rnbd_srv_cleanup_module);
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
new file mode 100644
index 000000000000..5a8544b5e74f
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RNBD_SRV_H
+#define RNBD_SRV_H
+
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+
+#include <rtrs.h>
+#include "rnbd-proto.h"
+#include "rnbd-log.h"
+
+struct rnbd_srv_session {
+ /* Entry inside global sess_list */
+ struct list_head list;
+ struct rtrs_srv *rtrs;
+ char sessname[NAME_MAX];
+ int queue_depth;
+ struct bio_set sess_bio_set;
+
+ struct xarray index_idr;
+ /* List of struct rnbd_srv_sess_dev */
+ struct list_head sess_dev_list;
+ struct mutex lock;
+ u8 ver;
+};
+
+struct rnbd_srv_dev {
+ /* Entry inside global dev_list */
+ struct list_head list;
+ struct kobject dev_kobj;
+ struct kobject *dev_sessions_kobj;
+ struct kref kref;
+ char id[NAME_MAX];
+ /* List of rnbd_srv_sess_dev structs */
+ struct list_head sess_dev_list;
+ struct mutex lock;
+ int open_write_cnt;
+};
+
+/* Structure which binds N devices and N sessions */
+struct rnbd_srv_sess_dev {
+ /* Entry inside rnbd_srv_dev struct */
+ struct list_head dev_list;
+ /* Entry inside rnbd_srv_session struct */
+ struct list_head sess_list;
+ struct rnbd_dev *rnbd_dev;
+ struct rnbd_srv_session *sess;
+ struct rnbd_srv_dev *dev;
+ struct kobject kobj;
+ u32 device_id;
+ fmode_t open_flags;
+ struct kref kref;
+ struct completion *destroy_comp;
+ char pathname[NAME_MAX];
+ enum rnbd_access_mode access_mode;
+};
+
+/* rnbd-srv-sysfs.c */
+
+int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
+ struct block_device *bdev,
+ const char *dir_name);
+void rnbd_srv_destroy_dev_sysfs(struct rnbd_srv_dev *dev);
+int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
+void rnbd_srv_destroy_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
+int rnbd_srv_create_sysfs_files(void);
+void rnbd_srv_destroy_sysfs_files(void);
+void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev);
+
+#endif /* RNBD_SRV_H */
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 8ffa8260dcaf..3ba07ab30c84 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -96,20 +96,6 @@ static const struct block_device_operations rsxx_fops = {
.ioctl = rsxx_blkdev_ioctl,
};
-static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
-{
- generic_start_io_acct(card->queue, bio_op(bio), bio_sectors(bio),
- &card->gendisk->part0);
-}
-
-static void disk_stats_complete(struct rsxx_cardinfo *card,
- struct bio *bio,
- unsigned long start_time)
-{
- generic_end_io_acct(card->queue, bio_op(bio),
- &card->gendisk->part0, start_time);
-}
-
static void bio_dma_done_cb(struct rsxx_cardinfo *card,
void *cb_data,
unsigned int error)
@@ -121,7 +107,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
if (atomic_dec_and_test(&meta->pending_dmas)) {
if (!card->eeh_state && card->gendisk)
- disk_stats_complete(card, meta->bio, meta->start_time);
+ bio_end_io_acct(meta->bio, meta->start_time);
if (atomic_read(&meta->error))
bio_io_error(meta->bio);
@@ -167,10 +153,9 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
bio_meta->bio = bio;
atomic_set(&bio_meta->error, 0);
atomic_set(&bio_meta->pending_dmas, 0);
- bio_meta->start_time = jiffies;
if (!unlikely(card->halt))
- disk_stats_start(card, bio);
+ bio_meta->start_time = bio_start_io_acct(bio);
dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
bio_data_dir(bio) ? 'W' : 'R', bio_meta,
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 4c297f69171d..dd34504382e5 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -327,7 +327,7 @@ static inline void swim_motor(struct swim __iomem *base,
swim_select(base, RELAX);
if (swim_readbit(base, MOTOR_ON))
break;
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
} else if (action == OFF) {
@@ -346,7 +346,7 @@ static inline void swim_eject(struct swim __iomem *base)
swim_select(base, RELAX);
if (!swim_readbit(base, DISK_IN))
break;
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
swim_select(base, RELAX);
@@ -370,7 +370,7 @@ static inline int swim_step(struct swim __iomem *base)
for (wait = 0; wait < HZ; wait++) {
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
swim_select(base, RELAX);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 600430685e28..0e734802ee7c 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -35,10 +35,10 @@
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/pgtable.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
-#include <asm/pgtable.h>
#include <linux/zorro.h>
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 1a8564a79d8d..33e3b76c4fa9 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -29,7 +29,6 @@ static const char * const backends[] = {
#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
"zstd",
#endif
- NULL
};
static void zcomp_strm_free(struct zcomp_strm *zstrm)
@@ -37,19 +36,16 @@ static void zcomp_strm_free(struct zcomp_strm *zstrm)
if (!IS_ERR_OR_NULL(zstrm->tfm))
crypto_free_comp(zstrm->tfm);
free_pages((unsigned long)zstrm->buffer, 1);
- kfree(zstrm);
+ zstrm->tfm = NULL;
+ zstrm->buffer = NULL;
}
/*
- * allocate new zcomp_strm structure with ->tfm initialized by
- * backend, return NULL on error
+ * Initialize zcomp_strm structure with ->tfm initialized by backend, and
+ * ->buffer. Return a negative value on error.
*/
-static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
+static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
{
- struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
- if (!zstrm)
- return NULL;
-
zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
@@ -58,16 +54,16 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
zcomp_strm_free(zstrm);
- zstrm = NULL;
+ return -ENOMEM;
}
- return zstrm;
+ return 0;
}
bool zcomp_available_algorithm(const char *comp)
{
int i;
- i = __sysfs_match_string(backends, -1, comp);
+ i = sysfs_match_string(backends, comp);
if (i >= 0)
return true;
@@ -86,9 +82,9 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
{
bool known_algorithm = false;
ssize_t sz = 0;
- int i = 0;
+ int i;
- for (; backends[i]; i++) {
+ for (i = 0; i < ARRAY_SIZE(backends); i++) {
if (!strcmp(comp, backends[i])) {
known_algorithm = true;
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
@@ -113,12 +109,13 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
- return *get_cpu_ptr(comp->stream);
+ local_lock(&comp->stream->lock);
+ return this_cpu_ptr(comp->stream);
}
void zcomp_stream_put(struct zcomp *comp)
{
- put_cpu_ptr(comp->stream);
+ local_unlock(&comp->stream->lock);
}
int zcomp_compress(struct zcomp_strm *zstrm,
@@ -159,17 +156,15 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm;
+ int ret;
- if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
- return 0;
+ zstrm = per_cpu_ptr(comp->stream, cpu);
+ local_lock_init(&zstrm->lock);
- zstrm = zcomp_strm_alloc(comp);
- if (IS_ERR_OR_NULL(zstrm)) {
+ ret = zcomp_strm_init(zstrm, comp);
+ if (ret)
pr_err("Can't allocate a compression stream\n");
- return -ENOMEM;
- }
- *per_cpu_ptr(comp->stream, cpu) = zstrm;
- return 0;
+ return ret;
}
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
@@ -177,10 +172,8 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm;
- zstrm = *per_cpu_ptr(comp->stream, cpu);
- if (!IS_ERR_OR_NULL(zstrm))
- zcomp_strm_free(zstrm);
- *per_cpu_ptr(comp->stream, cpu) = NULL;
+ zstrm = per_cpu_ptr(comp->stream, cpu);
+ zcomp_strm_free(zstrm);
return 0;
}
@@ -188,7 +181,7 @@ static int zcomp_init(struct zcomp *comp)
{
int ret;
- comp->stream = alloc_percpu(struct zcomp_strm *);
+ comp->stream = alloc_percpu(struct zcomp_strm);
if (!comp->stream)
return -ENOMEM;
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 1806475b919d..40f6420f4b2e 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -5,8 +5,11 @@
#ifndef _ZCOMP_H_
#define _ZCOMP_H_
+#include <linux/local_lock.h>
struct zcomp_strm {
+ /* The members ->buffer and ->tfm are protected by ->lock. */
+ local_lock_t lock;
/* compression/decompression buffer */
void *buffer;
struct crypto_comp *tfm;
@@ -14,7 +17,7 @@ struct zcomp_strm {
/* dynamic per-device compression frontend */
struct zcomp {
- struct zcomp_strm * __percpu *stream;
+ struct zcomp_strm __percpu *stream;
const char *name;
struct hlist_node node;
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index ebb234f36909..6e2ad90b17a3 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1510,13 +1510,8 @@ static void zram_bio_discard(struct zram *zram, u32 index,
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, unsigned int op, struct bio *bio)
{
- unsigned long start_time = jiffies;
- struct request_queue *q = zram->disk->queue;
int ret;
- generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT,
- &zram->disk->part0);
-
if (!op_is_write(op)) {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset, bio);
@@ -1526,8 +1521,6 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset, bio);
}
- generic_end_io_acct(q, op, &zram->disk->part0, start_time);
-
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
@@ -1548,6 +1541,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
u32 index;
struct bio_vec bvec;
struct bvec_iter iter;
+ unsigned long start_time;
index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
offset = (bio->bi_iter.bi_sector &
@@ -1563,6 +1557,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
break;
}
+ start_time = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
struct bio_vec bv = bvec;
unsigned int unwritten = bvec.bv_len;
@@ -1571,8 +1566,10 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
unwritten);
if (zram_bvec_rw(zram, &bv, index, offset,
- bio_op(bio), bio) < 0)
- goto out;
+ bio_op(bio), bio) < 0) {
+ bio->bi_status = BLK_STS_IOERR;
+ break;
+ }
bv.bv_offset += bv.bv_len;
unwritten -= bv.bv_len;
@@ -1580,12 +1577,8 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
update_position(&index, &offset, &bv);
} while (unwritten);
}
-
+ bio_end_io_acct(bio, start_time);
bio_endio(bio);
- return;
-
-out:
- bio_io_error(bio);
}
/*
@@ -1633,6 +1626,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
u32 index;
struct zram *zram;
struct bio_vec bv;
+ unsigned long start_time;
if (PageTransHuge(page))
return -ENOTSUPP;
@@ -1651,7 +1645,9 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
+ start_time = disk_start_io_acct(bdev->bd_disk, SECTORS_PER_PAGE, op);
ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
+ disk_end_io_acct(bdev->bd_disk, op, start_time);
out:
/*
* If I/O fails, just return error(ie, non-zero) without
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 1f498f358f60..1b9743b7f2ef 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -27,6 +27,11 @@
#define BDADDR_BCM4345C5 (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0xc5, 0x45, 0x43}})
#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}})
+#define BCM_FW_NAME_LEN 64
+#define BCM_FW_NAME_COUNT_MAX 2
+/* For kmalloc-ing the fw-name array instead of putting it on the stack */
+typedef char bcm_fw_name[BCM_FW_NAME_LEN];
+
int btbcm_check_bdaddr(struct hci_dev *hdev)
{
struct hci_rp_read_bd_addr *bda;
@@ -358,6 +363,13 @@ static int btbcm_read_info(struct hci_dev *hdev)
bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]);
kfree_skb(skb);
+ return 0;
+}
+
+static int btbcm_print_local_name(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
/* Read Local Name */
skb = btbcm_read_local_name(hdev);
if (IS_ERR(skb))
@@ -380,6 +392,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4204, "BCM2076B1" }, /* 002.002.004 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
+ { 0x4606, "BCM4324B5" }, /* 002.006.006 */
{ 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
{ 0x2122, "BCM4343A0" }, /* 001.001.034 */
@@ -395,27 +408,32 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
};
static const struct bcm_subver_table bcm_usb_subver_table[] = {
+ { 0x2105, "BCM20703A1" }, /* 001.001.005 */
{ 0x210b, "BCM43142A0" }, /* 001.001.011 */
{ 0x2112, "BCM4314A0" }, /* 001.001.018 */
{ 0x2118, "BCM20702A0" }, /* 001.001.024 */
{ 0x2126, "BCM4335A0" }, /* 001.001.038 */
{ 0x220e, "BCM20702A1" }, /* 001.002.014 */
- { 0x230f, "BCM4354A2" }, /* 001.003.015 */
+ { 0x230f, "BCM4356A2" }, /* 001.003.015 */
{ 0x4106, "BCM4335B0" }, /* 002.001.006 */
{ 0x410e, "BCM20702B0" }, /* 002.001.014 */
{ 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
+ { 0x6607, "BCM4350C5" }, /* 003.006.007 */
{ }
};
-int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
- bool reinit)
+int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done)
{
u16 subver, rev, pid, vid;
- const char *hw_name = "BCM";
struct sk_buff *skb;
struct hci_rp_read_local_version *ver;
const struct bcm_subver_table *bcm_subver_table;
+ const char *hw_name = NULL;
+ char postfix[16] = "";
+ int fw_name_count = 0;
+ bcm_fw_name *fw_name;
+ const struct firmware *fw;
int i, err;
/* Reset */
@@ -434,15 +452,14 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
kfree_skb(skb);
/* Read controller information */
- if (!reinit) {
+ if (!(*fw_load_done)) {
err = btbcm_read_info(hdev);
if (err)
return err;
}
-
- /* Upper nibble of rev should be between 0 and 3? */
- if (((rev & 0xf000) >> 12) > 3)
- return 0;
+ err = btbcm_print_local_name(hdev);
+ if (err)
+ return err;
bcm_subver_table = (hdev->bus == HCI_USB) ? bcm_usb_subver_table :
bcm_uart_subver_table;
@@ -454,6 +471,13 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
}
}
+ bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u",
+ hw_name ? hw_name : "BCM", (subver & 0xe000) >> 13,
+ (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+
+ if (*fw_load_done)
+ return 0;
+
if (hdev->bus == HCI_USB) {
/* Read USB Product Info */
skb = btbcm_read_usb_product(hdev);
@@ -464,85 +488,81 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
pid = get_unaligned_le16(skb->data + 3);
kfree_skb(skb);
- snprintf(fw_name, len, "brcm/%s-%4.4x-%4.4x.hcd",
- hw_name, vid, pid);
- } else {
- snprintf(fw_name, len, "brcm/%s.hcd", hw_name);
+ snprintf(postfix, sizeof(postfix), "-%4.4x-%4.4x", vid, pid);
}
- bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u",
- hw_name, (subver & 0xe000) >> 13,
- (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+ fw_name = kmalloc(BCM_FW_NAME_COUNT_MAX * BCM_FW_NAME_LEN, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+
+ if (hw_name) {
+ snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN,
+ "brcm/%s%s.hcd", hw_name, postfix);
+ fw_name_count++;
+ }
+
+ snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN,
+ "brcm/BCM%s.hcd", postfix);
+ fw_name_count++;
+
+ for (i = 0; i < fw_name_count; i++) {
+ err = firmware_request_nowarn(&fw, fw_name[i], &hdev->dev);
+ if (err == 0) {
+ bt_dev_info(hdev, "%s '%s' Patch",
+ hw_name ? hw_name : "BCM", fw_name[i]);
+ *fw_load_done = true;
+ break;
+ }
+ }
+
+ if (*fw_load_done) {
+ err = btbcm_patchram(hdev, fw);
+ if (err)
+ bt_dev_info(hdev, "BCM: Patch failed (%d)", err);
+
+ release_firmware(fw);
+ } else {
+ bt_dev_err(hdev, "BCM: firmware Patch file not found, tried:");
+ for (i = 0; i < fw_name_count; i++)
+ bt_dev_err(hdev, "BCM: '%s'", fw_name[i]);
+ }
+ kfree(fw_name);
return 0;
}
EXPORT_SYMBOL_GPL(btbcm_initialize);
-int btbcm_finalize(struct hci_dev *hdev)
+int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done)
{
- char fw_name[64];
int err;
- /* Re-initialize */
- err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), true);
- if (err)
- return err;
+ /* Re-initialize if necessary */
+ if (*fw_load_done) {
+ err = btbcm_initialize(hdev, fw_load_done);
+ if (err)
+ return err;
+ }
btbcm_check_bdaddr(hdev);
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- /* Some devices ship with the controller default address.
- * Allow the bootloader to set a valid address through the
- * device tree.
- */
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
-
return 0;
}
EXPORT_SYMBOL_GPL(btbcm_finalize);
int btbcm_setup_patchram(struct hci_dev *hdev)
{
- char fw_name[64];
- const struct firmware *fw;
- struct sk_buff *skb;
+ bool fw_load_done = false;
int err;
/* Initialize */
- err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), false);
- if (err)
- return err;
-
- err = request_firmware(&fw, fw_name, &hdev->dev);
- if (err < 0) {
- bt_dev_info(hdev, "BCM: Patch %s not found", fw_name);
- goto done;
- }
-
- btbcm_patchram(hdev, fw);
-
- release_firmware(fw);
-
- /* Re-initialize */
- err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), true);
+ err = btbcm_initialize(hdev, &fw_load_done);
if (err)
return err;
- /* Read Local Name */
- skb = btbcm_read_local_name(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
- kfree_skb(skb);
-
-done:
- btbcm_check_bdaddr(hdev);
-
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
-
- return 0;
+ /* Re-initialize after loading Patch */
+ return btbcm_finalize(hdev, &fw_load_done);
}
EXPORT_SYMBOL_GPL(btbcm_setup_patchram);
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index 014ef847a486..8bf01565fdfc 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -62,9 +62,8 @@ int btbcm_write_pcm_int_params(struct hci_dev *hdev,
int btbcm_setup_patchram(struct hci_dev *hdev);
int btbcm_setup_apple(struct hci_dev *hdev);
-int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
- bool reinit);
-int btbcm_finalize(struct hci_dev *hdev);
+int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done);
+int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done);
#else
@@ -105,13 +104,12 @@ static inline int btbcm_setup_apple(struct hci_dev *hdev)
return 0;
}
-static inline int btbcm_initialize(struct hci_dev *hdev, char *fw_name,
- size_t len, bool reinit)
+static inline int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done)
{
return 0;
}
-static inline int btbcm_finalize(struct hci_dev *hdev)
+static inline int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done)
{
return 0;
}
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 0f3a020703ab..a296f8526433 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -355,31 +355,31 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8688 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8688_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8688 },
/* Marvell SD8787 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8787 },
/* Marvell SD8787 Bluetooth AMP device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787_BT_AMP),
.driver_data = (unsigned long)&btmrvl_sdio_sd8787 },
/* Marvell SD8797 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8797 },
/* Marvell SD8887 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9136),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8887 },
/* Marvell SD8897 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8897 },
/* Marvell SD8977 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9146),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8977 },
/* Marvell SD8987 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x914A),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8987 },
/* Marvell SD8997 Bluetooth device */
- { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9142),
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997_BT),
.driver_data = (unsigned long)&btmrvl_sdio_sd8997 },
{ } /* Terminating entry */
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 519788c442ca..bff095be2f97 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -51,9 +51,9 @@ static const struct btmtksdio_data mt7668_data = {
};
static const struct sdio_device_id btmtksdio_table[] = {
- {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7663),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7663),
.driver_data = (kernel_ulong_t)&mt7663_data },
- {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7668),
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7668),
.driver_data = (kernel_ulong_t)&mt7668_data },
{ } /* Terminating entry */
};
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index e11169ad8247..6c40bc75fb5b 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -695,8 +695,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev)
/* Send a dummy byte 0xff to activate the new baudrate */
param = 0xff;
- err = serdev_device_write(bdev->serdev, &param, sizeof(param),
- MAX_SCHEDULE_TIMEOUT);
+ err = serdev_device_write_buf(bdev->serdev, &param, sizeof(param));
if (err < 0 || err < sizeof(param))
return err;
@@ -1015,7 +1014,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
if (btmtkuart_is_standalone(bdev)) {
err = clk_prepare_enable(bdev->osc);
if (err < 0)
- return err;
+ goto err_hci_free_dev;
if (bdev->boot) {
gpiod_set_value_cansleep(bdev->boot, 1);
@@ -1028,10 +1027,8 @@ static int btmtkuart_probe(struct serdev_device *serdev)
/* Power on */
err = regulator_enable(bdev->vcc);
- if (err < 0) {
- clk_disable_unprepare(bdev->osc);
- return err;
- }
+ if (err < 0)
+ goto err_clk_disable_unprepare;
/* Reset if the reset-gpios is available otherwise the board
* -level design should be guaranteed.
@@ -1063,7 +1060,6 @@ static int btmtkuart_probe(struct serdev_device *serdev)
err = hci_register_dev(hdev);
if (err < 0) {
dev_err(&serdev->dev, "Can't register HCI device\n");
- hci_free_dev(hdev);
goto err_regulator_disable;
}
@@ -1072,6 +1068,11 @@ static int btmtkuart_probe(struct serdev_device *serdev)
err_regulator_disable:
if (btmtkuart_is_standalone(bdev))
regulator_disable(bdev->vcc);
+err_clk_disable_unprepare:
+ if (btmtkuart_is_standalone(bdev))
+ clk_disable_unprepare(bdev->osc);
+err_hci_free_dev:
+ hci_free_dev(hdev);
return err;
}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index a16845c0751d..c5984966f315 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -32,7 +32,7 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
* VSE event. WCN3991 sends version command response as a payload to
* command complete event.
*/
- if (soc_type == QCA_WCN3991) {
+ if (soc_type >= QCA_WCN3991) {
event_type = 0;
rlen += 1;
rtype = EDL_PATCH_VER_REQ_CMD;
@@ -69,22 +69,26 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
goto out;
}
- if (soc_type == QCA_WCN3991)
+ if (soc_type >= QCA_WCN3991)
memmove(&edl->data, &edl->data[1], sizeof(*ver));
ver = (struct qca_btsoc_version *)(edl->data);
- BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id));
- BT_DBG("%s: Patch :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver));
- BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rom_ver));
- BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
+ bt_dev_info(hdev, "QCA Product ID :0x%08x",
+ le32_to_cpu(ver->product_id));
+ bt_dev_info(hdev, "QCA SOC Version :0x%08x",
+ le32_to_cpu(ver->soc_id));
+ bt_dev_info(hdev, "QCA ROM Version :0x%08x",
+ le16_to_cpu(ver->rom_ver));
+ bt_dev_info(hdev, "QCA Patch Version:0x%08x",
+ le16_to_cpu(ver->patch_ver));
/* QCA chipset version can be decided by patch and SoC
* version, combination with upper 2 bytes from SoC
* and lower 2 bytes from patch will be used.
*/
*soc_version = (le32_to_cpu(ver->soc_id) << 16) |
- (le16_to_cpu(ver->rom_ver) & 0x0000ffff);
+ (le16_to_cpu(ver->rom_ver) & 0x0000ffff);
if (*soc_version == 0)
err = -EILSEQ;
@@ -217,7 +221,7 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
tlv_nvm->data[0] |= 0x80;
/* UART Baud Rate */
- if (soc_type == QCA_WCN3991)
+ if (soc_type >= QCA_WCN3991)
tlv_nvm->data[1] = nvm_baud_rate;
else
tlv_nvm->data[2] = nvm_baud_rate;
@@ -268,7 +272,7 @@ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
* VSE event. WCN3991 sends version command response as a payload to
* command complete event.
*/
- if (soc_type == QCA_WCN3991) {
+ if (soc_type >= QCA_WCN3991) {
event_type = 0;
rlen = sizeof(*edl);
rtype = EDL_PATCH_TLV_REQ_CMD;
@@ -301,7 +305,7 @@ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
err = -EIO;
}
- if (soc_type == QCA_WCN3991)
+ if (soc_type >= QCA_WCN3991)
goto out;
tlv_resp = (struct tlv_seg_resp *)(edl->data);
@@ -442,6 +446,11 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
(soc_ver & 0x0000000f);
snprintf(config.fwname, sizeof(config.fwname),
"qca/crbtfw%02x.tlv", rom_ver);
+ } else if (soc_type == QCA_QCA6390) {
+ rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
+ (soc_ver & 0x0000000f);
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htbtfw%02x.tlv", rom_ver);
} else {
snprintf(config.fwname, sizeof(config.fwname),
"qca/rampatch_%08x.bin", soc_ver);
@@ -464,6 +473,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
else if (qca_is_wcn399x(soc_type))
snprintf(config.fwname, sizeof(config.fwname),
"qca/crnv%02x.bin", rom_ver);
+ else if (soc_type == QCA_QCA6390)
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htnv%02x.bin", rom_ver);
else
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index e16a4d650597..6e1e62dd4b95 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -125,8 +125,9 @@ enum qca_btsoc_type {
QCA_AR3002,
QCA_ROME,
QCA_WCN3990,
- QCA_WCN3991,
QCA_WCN3998,
+ QCA_WCN3991,
+ QCA_QCA6390,
};
#if IS_ENABLED(CONFIG_BT_QCA)
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 67f4bc21e7c5..3a9afc905f24 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -130,12 +130,19 @@ static const struct id_table ic_id_table[] = {
.cfg_name = "rtl_bt/rtl8821c_config" },
/* 8761A */
- { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8761A, 0x0,
+ { IC_INFO(RTL_ROM_LMP_8761A, 0xa),
.config_needed = false,
.has_rom_version = true,
.fw_name = "rtl_bt/rtl8761a_fw.bin",
.cfg_name = "rtl_bt/rtl8761a_config" },
+ /* 8761B */
+ { IC_INFO(RTL_ROM_LMP_8761A, 0xb),
+ .config_needed = false,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8761b_fw.bin",
+ .cfg_name = "rtl_bt/rtl8761b_config" },
+
/* 8822C with UART interface */
{ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
IC_MATCH_FL_HCIBUS,
@@ -267,6 +274,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8723B, 9 }, /* 8723D */
{ RTL_ROM_LMP_8821A, 10 }, /* 8821C */
{ RTL_ROM_LMP_8822B, 13 }, /* 8822C */
+ { RTL_ROM_LMP_8761A, 14 }, /* 8761B */
};
min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3bdec42c9612..5f022e9cf667 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -58,6 +58,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_CW6622 0x100000
#define BTUSB_MEDIATEK 0x200000
#define BTUSB_WIDEBAND_SPEECH 0x400000
+#define BTUSB_VALID_LE_STATES 0x800000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -335,11 +336,14 @@ static const struct usb_device_id blacklist_table[] = {
/* Intel Bluetooth devices */
{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW |
- BTUSB_WIDEBAND_SPEECH },
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH},
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
@@ -348,7 +352,8 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW |
- BTUSB_WIDEBAND_SPEECH },
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* Other Intel Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
@@ -492,6 +497,8 @@ struct btusb_data {
__u8 cmdreq;
unsigned int sco_num;
+ unsigned int air_mode;
+ bool usb_alt6_packet_flow;
int isoc_altsetting;
int suspend_count;
@@ -983,6 +990,42 @@ static void btusb_isoc_complete(struct urb *urb)
}
}
+static inline void __fill_isoc_descriptor_msbc(struct urb *urb, int len,
+ int mtu, struct btusb_data *data)
+{
+ int i, offset = 0;
+ unsigned int interval;
+
+ BT_DBG("len %d mtu %d", len, mtu);
+
+ /* For mSBC ALT 6 setting the host will send the packet at continuous
+ * flow. As per core spec 5, vol 4, part B, table 2.1. For ALT setting
+ * 6 the HCI PACKET INTERVAL should be 7.5ms for every usb packets.
+ * To maintain the rate we send 63bytes of usb packets alternatively for
+ * 7ms and 8ms to maintain the rate as 7.5ms.
+ */
+ if (data->usb_alt6_packet_flow) {
+ interval = 7;
+ data->usb_alt6_packet_flow = false;
+ } else {
+ interval = 6;
+ data->usb_alt6_packet_flow = true;
+ }
+
+ for (i = 0; i < interval; i++) {
+ urb->iso_frame_desc[i].offset = offset;
+ urb->iso_frame_desc[i].length = offset;
+ }
+
+ if (len && i < BTUSB_MAX_ISOC_FRAMES) {
+ urb->iso_frame_desc[i].offset = offset;
+ urb->iso_frame_desc[i].length = len;
+ i++;
+ }
+
+ urb->number_of_packets = i;
+}
+
static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
{
int i, offset = 0;
@@ -1386,9 +1429,13 @@ static struct urb *alloc_isoc_urb(struct hci_dev *hdev, struct sk_buff *skb)
urb->transfer_flags = URB_ISO_ASAP;
- __fill_isoc_descriptor(urb, skb->len,
- le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
-
+ if (data->isoc_altsetting == 6)
+ __fill_isoc_descriptor_msbc(urb, skb->len,
+ le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize),
+ data);
+ else
+ __fill_isoc_descriptor(urb, skb->len,
+ le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
skb->dev = (void *)hdev;
return urb;
@@ -1484,6 +1531,7 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
if (hci_conn_num(hdev, SCO_LINK) != data->sco_num) {
data->sco_num = hci_conn_num(hdev, SCO_LINK);
+ data->air_mode = evt;
schedule_work(&data->work);
}
}
@@ -1531,11 +1579,70 @@ static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
return 0;
}
+static int btusb_switch_alt_setting(struct hci_dev *hdev, int new_alts)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ int err;
+
+ if (data->isoc_altsetting != new_alts) {
+ unsigned long flags;
+
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ usb_kill_anchored_urbs(&data->isoc_anchor);
+
+ /* When isochronous alternate setting needs to be
+ * changed, because SCO connection has been added
+ * or removed, a packet fragment may be left in the
+ * reassembling state. This could lead to wrongly
+ * assembled fragments.
+ *
+ * Clear outstanding fragment when selecting a new
+ * alternate setting.
+ */
+ spin_lock_irqsave(&data->rxlock, flags);
+ kfree_skb(data->sco_skb);
+ data->sco_skb = NULL;
+ spin_unlock_irqrestore(&data->rxlock, flags);
+
+ err = __set_isoc_interface(hdev, new_alts);
+ if (err < 0)
+ return err;
+ }
+
+ if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
+ if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0)
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ else
+ btusb_submit_isoc_urb(hdev, GFP_KERNEL);
+ }
+
+ return 0;
+}
+
+static struct usb_host_interface *btusb_find_altsetting(struct btusb_data *data,
+ int alt)
+{
+ struct usb_interface *intf = data->isoc;
+ int i;
+
+ BT_DBG("Looking for Alt no :%d", alt);
+
+ if (!intf)
+ return NULL;
+
+ for (i = 0; i < intf->num_altsetting; i++) {
+ if (intf->altsetting[i].desc.bAlternateSetting == alt)
+ return &intf->altsetting[i];
+ }
+
+ return NULL;
+}
+
static void btusb_work(struct work_struct *work)
{
struct btusb_data *data = container_of(work, struct btusb_data, work);
struct hci_dev *hdev = data->hdev;
- int new_alts;
+ int new_alts = 0;
int err;
if (data->sco_num > 0) {
@@ -1550,44 +1657,27 @@ static void btusb_work(struct work_struct *work)
set_bit(BTUSB_DID_ISO_RESUME, &data->flags);
}
- if (hdev->voice_setting & 0x0020) {
- static const int alts[3] = { 2, 4, 5 };
-
- new_alts = alts[data->sco_num - 1];
- } else {
- new_alts = data->sco_num;
- }
-
- if (data->isoc_altsetting != new_alts) {
- unsigned long flags;
+ if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_CVSD) {
+ if (hdev->voice_setting & 0x0020) {
+ static const int alts[3] = { 2, 4, 5 };
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- usb_kill_anchored_urbs(&data->isoc_anchor);
-
- /* When isochronous alternate setting needs to be
- * changed, because SCO connection has been added
- * or removed, a packet fragment may be left in the
- * reassembling state. This could lead to wrongly
- * assembled fragments.
- *
- * Clear outstanding fragment when selecting a new
- * alternate setting.
- */
- spin_lock_irqsave(&data->rxlock, flags);
- kfree_skb(data->sco_skb);
- data->sco_skb = NULL;
- spin_unlock_irqrestore(&data->rxlock, flags);
+ new_alts = alts[data->sco_num - 1];
+ } else {
+ new_alts = data->sco_num;
+ }
+ } else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) {
- if (__set_isoc_interface(hdev, new_alts) < 0)
- return;
- }
+ data->usb_alt6_packet_flow = true;
- if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
- if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0)
- clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ /* Check if Alt 6 is supported for Transparent audio */
+ if (btusb_find_altsetting(data, 6))
+ new_alts = 6;
else
- btusb_submit_isoc_urb(hdev, GFP_KERNEL);
+ bt_dev_err(hdev, "Device does not support ALT setting 6");
}
+
+ if (btusb_switch_alt_setting(hdev, new_alts) < 0)
+ bt_dev_err(hdev, "set USB alt:(%d) failed!", new_alts);
} else {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -2252,7 +2342,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
if (ver.fw_variant == 0x23) {
clear_bit(BTUSB_BOOTLOADER, &data->flags);
btintel_check_bdaddr(hdev);
- return 0;
+ goto finish;
}
/* If the device is not in bootloader mode, then the only possible
@@ -2452,6 +2542,23 @@ done:
*/
btintel_load_ddc_config(hdev, fwname);
+ /* Read the Intel version information after loading the FW */
+ err = btintel_read_version(hdev, &ver);
+ if (err)
+ return err;
+
+ btintel_version_info(hdev, &ver);
+
+finish:
+ /* All Intel controllers that support the Microsoft vendor
+ * extension are using 0xFC1E for VsMsftOpCode.
+ */
+ switch (ver.hw_variant) {
+ case 0x12: /* ThP */
+ hci_set_msft_opcode(hdev, 0xFC1E);
+ break;
+ }
+
/* Set the event mask for Intel specific vendor events. This enables
* a few extra events that are useful during general operation. It
* does not enable any debugging related events.
@@ -2461,13 +2568,6 @@ done:
*/
btintel_set_event_mask(hdev, false);
- /* Read the Intel version information after loading the FW */
- err = btintel_read_version(hdev, &ver);
- if (err)
- return err;
-
- btintel_version_info(hdev, &ver);
-
return 0;
}
@@ -3600,6 +3700,13 @@ static void btusb_check_needs_reset_resume(struct usb_interface *intf)
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
+static bool btusb_prevent_wake(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+
+ return !device_may_wakeup(&data->udev->dev);
+}
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -3733,6 +3840,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->flush = btusb_flush;
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
+ hdev->prevent_wake = btusb_prevent_wake;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
@@ -3877,6 +3985,9 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_WIDEBAND_SPEECH)
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ if (id->driver_info & BTUSB_VALID_LE_STATES)
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index b236cb11c0dc..8ea5ca8d71d6 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -118,6 +118,7 @@ struct bcm_device {
u32 oper_speed;
int irq;
bool irq_active_low;
+ bool irq_acquired;
#ifdef CONFIG_PM
struct hci_uart *hu;
@@ -333,6 +334,8 @@ static int bcm_request_irq(struct bcm_data *bcm)
goto unlock;
}
+ bdev->irq_acquired = true;
+
device_init_wakeup(bdev->dev, true);
pm_runtime_set_autosuspend_delay(bdev->dev,
@@ -514,7 +517,7 @@ static int bcm_close(struct hci_uart *hu)
}
if (bdev) {
- if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
+ if (IS_ENABLED(CONFIG_PM) && bdev->irq_acquired) {
devm_free_irq(bdev->dev, bdev->irq, bdev);
device_init_wakeup(bdev->dev, false);
pm_runtime_disable(bdev->dev);
@@ -550,8 +553,7 @@ static int bcm_flush(struct hci_uart *hu)
static int bcm_setup(struct hci_uart *hu)
{
struct bcm_data *bcm = hu->priv;
- char fw_name[64];
- const struct firmware *fw;
+ bool fw_load_done = false;
unsigned int speed;
int err;
@@ -560,21 +562,12 @@ static int bcm_setup(struct hci_uart *hu)
hu->hdev->set_diag = bcm_set_diag;
hu->hdev->set_bdaddr = btbcm_set_bdaddr;
- err = btbcm_initialize(hu->hdev, fw_name, sizeof(fw_name), false);
+ err = btbcm_initialize(hu->hdev, &fw_load_done);
if (err)
return err;
- err = request_firmware(&fw, fw_name, &hu->hdev->dev);
- if (err < 0) {
- bt_dev_info(hu->hdev, "BCM: Patch %s not found", fw_name);
+ if (!fw_load_done)
return 0;
- }
-
- err = btbcm_patchram(hu->hdev, fw);
- if (err) {
- bt_dev_info(hu->hdev, "BCM: Patch failed (%d)", err);
- goto finalize;
- }
/* Init speed if any */
if (hu->init_speed)
@@ -613,13 +606,16 @@ static int bcm_setup(struct hci_uart *hu)
btbcm_write_pcm_int_params(hu->hdev, &params);
}
-finalize:
- release_firmware(fw);
-
- err = btbcm_finalize(hu->hdev);
+ err = btbcm_finalize(hu->hdev, &fw_load_done);
if (err)
return err;
+ /* Some devices ship with the controller default address.
+ * Allow the bootloader to set a valid address through the
+ * device tree.
+ */
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hu->hdev->quirks);
+
if (!bcm_request_irq(bcm))
err = bcm_setup_sleep(hu);
@@ -1153,7 +1149,8 @@ static int bcm_of_probe(struct bcm_device *bdev)
device_property_read_u8_array(bdev->dev, "brcm,bt-pcm-int-params",
bdev->pcm_int_params, 5);
bdev->irq = of_irq_get_byname(bdev->dev->of_node, "host-wakeup");
-
+ bdev->irq_active_low = irq_get_trigger_type(bdev->irq)
+ & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW);
return 0;
}
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 106c110efe56..e60b2e0773db 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -1018,6 +1018,8 @@ static const struct of_device_id rtl_bluetooth_of_match[] = {
#ifdef CONFIG_BT_HCIUART_RTL
{ .compatible = "realtek,rtl8822cs-bt",
.data = (const void *)&rtl_vnd },
+ { .compatible = "realtek,rtl8723bs-bt",
+ .data = (const void *)&rtl_vnd },
#endif
{ },
};
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 439392b1c043..81c3c38baba1 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -26,6 +26,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
@@ -74,6 +75,9 @@ enum qca_flags {
QCA_HW_ERROR_EVENT
};
+enum qca_capabilities {
+ QCA_CAP_WIDEBAND_SPEECH = BIT(0),
+};
/* HCI_IBS transmit side sleep protocol states */
enum tx_ibs_states {
@@ -110,6 +114,7 @@ struct qca_memdump_data {
char *memdump_buf_tail;
u32 current_seq_no;
u32 received_dump;
+ u32 ram_dump_size;
};
struct qca_memdump_event_hdr {
@@ -186,10 +191,11 @@ struct qca_vreg {
unsigned int load_uA;
};
-struct qca_vreg_data {
+struct qca_device_data {
enum qca_btsoc_type soc_type;
struct qca_vreg *vregs;
size_t num_vregs;
+ uint32_t capabilities;
};
/*
@@ -596,10 +602,12 @@ static int qca_open(struct hci_uart *hu)
if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qca_is_wcn399x(qcadev->btsoc_type)) {
+
+ if (qca_is_wcn399x(qcadev->btsoc_type))
hu->init_speed = qcadev->init_speed;
+
+ if (qcadev->oper_speed)
hu->oper_speed = qcadev->oper_speed;
- }
}
timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
@@ -969,6 +977,8 @@ static void qca_controller_memdump(struct work_struct *work)
char nullBuff[QCA_DUMP_PACKET_SIZE] = { 0 };
u16 seq_no;
u32 dump_size;
+ u32 rx_size;
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
@@ -1018,10 +1028,12 @@ static void qca_controller_memdump(struct work_struct *work)
dump_size);
queue_delayed_work(qca->workqueue,
&qca->ctrl_memdump_timeout,
- msecs_to_jiffies(MEMDUMP_TIMEOUT_MS));
+ msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)
+ );
skb_pull(skb, sizeof(dump_size));
memdump_buf = vmalloc(dump_size);
+ qca_memdump->ram_dump_size = dump_size;
qca_memdump->memdump_buf_head = memdump_buf;
qca_memdump->memdump_buf_tail = memdump_buf;
}
@@ -1044,26 +1056,57 @@ static void qca_controller_memdump(struct work_struct *work)
* the controller. In such cases let us store the dummy
* packets in the buffer.
*/
+ /* For QCA6390, controller does not lost packets but
+ * sequence number field of packat sometimes has error
+ * bits, so skip this checking for missing packet.
+ */
while ((seq_no > qca_memdump->current_seq_no + 1) &&
- seq_no != QCA_LAST_SEQUENCE_NUM) {
+ (soc_type != QCA_QCA6390) &&
+ seq_no != QCA_LAST_SEQUENCE_NUM) {
bt_dev_err(hu->hdev, "QCA controller missed packet:%d",
qca_memdump->current_seq_no);
+ rx_size = qca_memdump->received_dump;
+ rx_size += QCA_DUMP_PACKET_SIZE;
+ if (rx_size > qca_memdump->ram_dump_size) {
+ bt_dev_err(hu->hdev,
+ "QCA memdump received %d, no space for missed packet",
+ qca_memdump->received_dump);
+ break;
+ }
memcpy(memdump_buf, nullBuff, QCA_DUMP_PACKET_SIZE);
memdump_buf = memdump_buf + QCA_DUMP_PACKET_SIZE;
qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
qca_memdump->current_seq_no++;
}
- memcpy(memdump_buf, (unsigned char *) skb->data, skb->len);
- memdump_buf = memdump_buf + skb->len;
- qca_memdump->memdump_buf_tail = memdump_buf;
- qca_memdump->current_seq_no = seq_no + 1;
- qca_memdump->received_dump += skb->len;
+ rx_size = qca_memdump->received_dump + skb->len;
+ if (rx_size <= qca_memdump->ram_dump_size) {
+ if ((seq_no != QCA_LAST_SEQUENCE_NUM) &&
+ (seq_no != qca_memdump->current_seq_no))
+ bt_dev_err(hu->hdev,
+ "QCA memdump unexpected packet %d",
+ seq_no);
+ bt_dev_dbg(hu->hdev,
+ "QCA memdump packet %d with length %d",
+ seq_no, skb->len);
+ memcpy(memdump_buf, (unsigned char *)skb->data,
+ skb->len);
+ memdump_buf = memdump_buf + skb->len;
+ qca_memdump->memdump_buf_tail = memdump_buf;
+ qca_memdump->current_seq_no = seq_no + 1;
+ qca_memdump->received_dump += skb->len;
+ } else {
+ bt_dev_err(hu->hdev,
+ "QCA memdump received %d, no space for packet %d",
+ qca_memdump->received_dump, seq_no);
+ }
qca->qca_memdump = qca_memdump;
kfree_skb(skb);
if (seq_no == QCA_LAST_SEQUENCE_NUM) {
- bt_dev_info(hu->hdev, "QCA writing crash dump of size %d bytes",
- qca_memdump->received_dump);
+ bt_dev_info(hu->hdev,
+ "QCA memdump Done, received %d, total %d",
+ qca_memdump->received_dump,
+ qca_memdump->ram_dump_size);
memdump_buf = qca_memdump->memdump_buf_head;
dev_coredumpv(&hu->serdev->dev, memdump_buf,
qca_memdump->received_dump, GFP_KERNEL);
@@ -1596,7 +1639,7 @@ static int qca_setup(struct hci_uart *hu)
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
bt_dev_info(hdev, "setting up %s",
- qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME");
+ qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME/QCA6390");
retry:
ret = qca_power_on(hdev);
@@ -1665,10 +1708,10 @@ retry:
}
/* Setup bdaddr */
- if (qca_is_wcn399x(soc_type))
- hu->hdev->set_bdaddr = qca_set_bdaddr;
- else
+ if (soc_type == QCA_ROME)
hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
+ else
+ hu->hdev->set_bdaddr = qca_set_bdaddr;
return ret;
}
@@ -1688,7 +1731,7 @@ static const struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
-static const struct qca_vreg_data qca_soc_data_wcn3990 = {
+static const struct qca_device_data qca_soc_data_wcn3990 = {
.soc_type = QCA_WCN3990,
.vregs = (struct qca_vreg []) {
{ "vddio", 15000 },
@@ -1699,7 +1742,7 @@ static const struct qca_vreg_data qca_soc_data_wcn3990 = {
.num_vregs = 4,
};
-static const struct qca_vreg_data qca_soc_data_wcn3991 = {
+static const struct qca_device_data qca_soc_data_wcn3991 = {
.soc_type = QCA_WCN3991,
.vregs = (struct qca_vreg []) {
{ "vddio", 15000 },
@@ -1708,9 +1751,10 @@ static const struct qca_vreg_data qca_soc_data_wcn3991 = {
{ "vddch0", 450000 },
},
.num_vregs = 4,
+ .capabilities = QCA_CAP_WIDEBAND_SPEECH,
};
-static const struct qca_vreg_data qca_soc_data_wcn3998 = {
+static const struct qca_device_data qca_soc_data_wcn3998 = {
.soc_type = QCA_WCN3998,
.vregs = (struct qca_vreg []) {
{ "vddio", 10000 },
@@ -1721,6 +1765,11 @@ static const struct qca_vreg_data qca_soc_data_wcn3998 = {
.num_vregs = 4,
};
+static const struct qca_device_data qca_soc_data_qca6390 = {
+ .soc_type = QCA_QCA6390,
+ .num_vregs = 0,
+};
+
static void qca_power_shutdown(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
@@ -1764,7 +1813,7 @@ static int qca_power_off(struct hci_dev *hdev)
enum qca_btsoc_type soc_type = qca_soc_type(hu);
/* Stop sending shutdown command if soc crashes. */
- if (qca_is_wcn399x(soc_type)
+ if (soc_type != QCA_ROME
&& qca->memdump_state == QCA_MEMDUMP_IDLE) {
qca_send_pre_shutdown_cmd(hdev);
usleep_range(8000, 10000);
@@ -1852,7 +1901,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
struct hci_dev *hdev;
- const struct qca_vreg_data *data;
+ const struct qca_device_data *data;
int err;
bool power_ctrl_enabled = true;
@@ -1865,6 +1914,11 @@ static int qca_serdev_probe(struct serdev_device *serdev)
serdev_device_set_drvdata(serdev, qcadev);
device_property_read_string(&serdev->dev, "firmware-name",
&qcadev->firmware_name);
+ device_property_read_u32(&serdev->dev, "max-speed",
+ &qcadev->oper_speed);
+ if (!qcadev->oper_speed)
+ BT_DBG("UART will pick default operating speed");
+
if (data && qca_is_wcn399x(data->soc_type)) {
qcadev->btsoc_type = data->soc_type;
qcadev->bt_power = devm_kzalloc(&serdev->dev,
@@ -1889,18 +1943,17 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return PTR_ERR(qcadev->susclk);
}
- device_property_read_u32(&serdev->dev, "max-speed",
- &qcadev->oper_speed);
- if (!qcadev->oper_speed)
- BT_DBG("UART will pick default operating speed");
-
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("wcn3990 serdev registration failed");
return err;
}
} else {
- qcadev->btsoc_type = QCA_ROME;
+ if (data)
+ qcadev->btsoc_type = data->soc_type;
+ else
+ qcadev->btsoc_type = QCA_ROME;
+
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (!qcadev->bt_en) {
@@ -1930,12 +1983,19 @@ static int qca_serdev_probe(struct serdev_device *serdev)
}
}
+ hdev = qcadev->serdev_hu.hdev;
+
if (power_ctrl_enabled) {
- hdev = qcadev->serdev_hu.hdev;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
hdev->shutdown = qca_power_off;
}
+ /* Wideband speech support must be set per driver since it can't be
+ * queried via hci.
+ */
+ if (data && (data->capabilities & QCA_CAP_WIDEBAND_SPEECH))
+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+
return 0;
}
@@ -1951,10 +2011,43 @@ static void qca_serdev_remove(struct serdev_device *serdev)
hci_uart_unregister_device(&qcadev->serdev_hu);
}
+static void qca_serdev_shutdown(struct device *dev)
+{
+ int ret;
+ int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
+ struct serdev_device *serdev = to_serdev_device(dev);
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ const u8 ibs_wake_cmd[] = { 0xFD };
+ const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
+
+ if (qcadev->btsoc_type == QCA_QCA6390) {
+ serdev_device_write_flush(serdev);
+ ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
+ sizeof(ibs_wake_cmd));
+ if (ret < 0) {
+ BT_ERR("QCA send IBS_WAKE_IND error: %d", ret);
+ return;
+ }
+ serdev_device_wait_until_sent(serdev, timeout);
+ usleep_range(8000, 10000);
+
+ serdev_device_write_flush(serdev);
+ ret = serdev_device_write_buf(serdev, edl_reset_soc_cmd,
+ sizeof(edl_reset_soc_cmd));
+ if (ret < 0) {
+ BT_ERR("QCA send EDL_RESET_REQ error: %d", ret);
+ return;
+ }
+ serdev_device_wait_until_sent(serdev, timeout);
+ usleep_range(8000, 10000);
+ }
+}
+
static int __maybe_unused qca_suspend(struct device *dev)
{
- struct hci_dev *hdev = container_of(dev, struct hci_dev, dev);
- struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = to_serdev_device(dev);
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ struct hci_uart *hu = &qcadev->serdev_hu;
struct qca_data *qca = hu->priv;
unsigned long flags;
int ret = 0;
@@ -2033,8 +2126,9 @@ error:
static int __maybe_unused qca_resume(struct device *dev)
{
- struct hci_dev *hdev = container_of(dev, struct hci_dev, dev);
- struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = to_serdev_device(dev);
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ struct hci_uart *hu = &qcadev->serdev_hu;
struct qca_data *qca = hu->priv;
clear_bit(QCA_SUSPENDING, &qca->flags);
@@ -2044,21 +2138,39 @@ static int __maybe_unused qca_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
+#ifdef CONFIG_OF
static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
+ { .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
+ { .compatible = "qcom,qca9377-bt" },
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id qca_bluetooth_acpi_match[] = {
+ { "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { "DLB26390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, qca_bluetooth_acpi_match);
+#endif
+
static struct serdev_device_driver qca_serdev_driver = {
.probe = qca_serdev_probe,
.remove = qca_serdev_remove,
.driver = {
.name = "hci_uart_qca",
- .of_match_table = qca_bluetooth_of_match,
+ .of_match_table = of_match_ptr(qca_bluetooth_of_match),
+ .acpi_match_table = ACPI_PTR(qca_bluetooth_acpi_match),
+ .shutdown = qca_serdev_shutdown,
.pm = &qca_pm_ops,
},
};
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 4652896d4990..599855e4c57c 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -21,8 +21,6 @@
#include "hci_uart.h"
-static struct serdev_device_ops hci_serdev_client_ops;
-
static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type)
{
struct hci_dev *hdev = hu->hdev;
@@ -260,7 +258,7 @@ static int hci_uart_receive_buf(struct serdev_device *serdev, const u8 *data,
return count;
}
-static struct serdev_device_ops hci_serdev_client_ops = {
+static const struct serdev_device_ops hci_serdev_client_ops = {
.receive_buf = hci_uart_receive_buf,
.write_wakeup = hci_uart_write_wakeup,
};
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 6d4e4497b59b..c8818e3b1079 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -20,6 +20,15 @@ config ARM_CCI400_PORT_CTRL
Low level power management driver for CCI400 cache coherent
interconnect for ARM platforms.
+config ARM_INTEGRATOR_LM
+ bool "ARM Integrator Logic Module bus"
+ depends on HAS_IOMEM
+ depends on ARCH_INTEGRATOR || COMPILE_TEST
+ default ARCH_INTEGRATOR
+ help
+ Say y here to enable support for the ARM Logic Module bus
+ found on the ARM Integrator AP (Application Platform)
+
config BRCMSTB_GISB_ARB
bool "Broadcom STB GISB bus arbiter"
depends on ARM || ARM64 || MIPS
@@ -29,6 +38,36 @@ config BRCMSTB_GISB_ARB
arbiter. This driver provides timeout and target abort error handling
and internal bus master decoding.
+config BT1_APB
+ bool "Baikal-T1 APB-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Baikal-T1 AXI-APB bridge is used to access the SoC subsystem CSRs.
+ IO requests are routed to this bus by means of the DW AMBA 3 AXI
+ Interconnect. In case of any APB protocol collisions, slave device
+ not responding on timeout an IRQ is raised with an erroneous address
+ reported to the APB terminator (APB Errors Handler Block). This
+ driver provides the interrupt handler to detect the erroneous
+ address, prints an error message about the address fault, updates an
+ errors counter. The counter and the APB-bus operations timeout can be
+ accessed via corresponding sysfs nodes.
+
+config BT1_AXI
+ bool "Baikal-T1 AXI-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ AXI3-bus is the main communication bus connecting all high-speed
+ peripheral IP-cores with RAM controller and with MIPS P5600 cores on
+ Baikal-T1 SoC. Traffic arbitration is done by means of DW AMBA 3 AXI
+ Interconnect (so called AXI Main Interconnect) routing IO requests
+ from one SoC block to another. This driver provides a way to detect
+ any bus protocol errors and device not responding situations by
+ means of an embedded on top of the interconnect errors handler
+ block (EHB). AXI Interconnect QoS arbitration tuning is currently
+ unsupported.
+
config MOXTET
tristate "CZ.NIC Turris Mox module configuration bus"
depends on SPI_MASTER && OF
@@ -183,7 +222,7 @@ config UNIPHIER_SYSTEM_BUS
needed to use on-board devices connected to UniPhier SoCs.
config VEXPRESS_CONFIG
- bool "Versatile Express configuration bus"
+ tristate "Versatile Express configuration bus"
default y if ARCH_VEXPRESS
depends on ARM || ARM64
depends on OF
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 05f32cd694a4..397e35392bff 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -5,7 +5,7 @@
# Interconnect bus drivers for ARM platforms
obj-$(CONFIG_ARM_CCI) += arm-cci.o
-
+obj-$(CONFIG_ARM_INTEGRATOR_LM) += arm-integrator-lm.o
obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o
obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
obj-$(CONFIG_MOXTET) += moxtet.o
@@ -13,6 +13,8 @@ obj-$(CONFIG_MOXTET) += moxtet.o
# DPAA2 fsl-mc bus
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
+obj-$(CONFIG_BT1_APB) += bt1-apb.o
+obj-$(CONFIG_BT1_AXI) += bt1-axi.o
obj-$(CONFIG_IMX_WEIM) += imx-weim.o
obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
new file mode 100644
index 000000000000..845b6c43fef8
--- /dev/null
+++ b/drivers/bus/arm-integrator-lm.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Integrator Logical Module bus driver
+ * Copyright (C) 2020 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+/* All information about the connected logic modules are in here */
+#define INTEGRATOR_SC_DEC_OFFSET 0x10
+
+/* Base address for the expansion modules */
+#define INTEGRATOR_AP_EXP_BASE 0xc0000000
+#define INTEGRATOR_AP_EXP_STRIDE 0x10000000
+
+static int integrator_lm_populate(int num, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ u32 base;
+ int ret;
+
+ base = INTEGRATOR_AP_EXP_BASE + (num * INTEGRATOR_AP_EXP_STRIDE);
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ struct resource res;
+
+ ret = of_address_to_resource(child, 0, &res);
+ if (ret) {
+ dev_info(dev, "no valid address on child\n");
+ continue;
+ }
+
+ /* First populate the syscon then any devices */
+ if (res.start == base) {
+ dev_info(dev, "populate module @0x%08x from DT\n",
+ base);
+ ret = of_platform_default_populate(child, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to populate module\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_syscon_match[] = {
+ { .compatible = "arm,integrator-ap-syscon"},
+ { },
+};
+
+static int integrator_ap_lm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *syscon;
+ static struct regmap *map;
+ u32 val;
+ int ret;
+ int i;
+
+ /* Look up the system controller */
+ syscon = of_find_matching_node(NULL, integrator_ap_syscon_match);
+ if (!syscon) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return -ENODEV;
+ }
+ map = syscon_node_to_regmap(syscon);
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return PTR_ERR(map);
+ }
+
+ ret = regmap_read(map, INTEGRATOR_SC_DEC_OFFSET, &val);
+ if (ret) {
+ dev_err(dev, "could not read from Integrator/AP syscon\n");
+ return ret;
+ }
+
+ /* Loop over the connected modules */
+ for (i = 0; i < 4; i++) {
+ if (!(val & BIT(4 + i)))
+ continue;
+
+ dev_info(dev, "detected module in slot %d\n", i);
+ ret = integrator_lm_populate(i, dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_lm_match[] = {
+ { .compatible = "arm,integrator-ap-lm"},
+ { },
+};
+
+static struct platform_driver integrator_ap_lm_driver = {
+ .probe = integrator_ap_lm_probe,
+ .driver = {
+ .name = "integratorap-lm",
+ .of_match_table = integrator_ap_lm_match,
+ },
+};
+module_platform_driver(integrator_ap_lm_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Integrator AP Logical Module driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
new file mode 100644
index 000000000000..b25ff941e7c7
--- /dev/null
+++ b/drivers/bus/bt1-apb.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 APB-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/time64.h>
+#include <linux/clk.h>
+#include <linux/sysfs.h>
+
+#define APB_EHB_ISR 0x00
+#define APB_EHB_ISR_PENDING BIT(0)
+#define APB_EHB_ISR_MASK BIT(1)
+#define APB_EHB_ADDR 0x04
+#define APB_EHB_TIMEOUT 0x08
+
+#define APB_EHB_TIMEOUT_MIN 0x000003FFU
+#define APB_EHB_TIMEOUT_MAX 0xFFFFFFFFU
+
+/*
+ * struct bt1_apb - Baikal-T1 APB EHB private data
+ * @dev: Pointer to the device structure.
+ * @regs: APB EHB registers map.
+ * @res: No-device error injection memory region.
+ * @irq: Errors IRQ number.
+ * @rate: APB-bus reference clock rate.
+ * @pclk: APB-reference clock.
+ * @prst: APB domain reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_apb {
+ struct device *dev;
+
+ struct regmap *regs;
+ void __iomem *res;
+ int irq;
+
+ unsigned long rate;
+ struct clk *pclk;
+
+ struct reset_control *prst;
+
+ atomic_t count;
+};
+
+static const struct regmap_config bt1_apb_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = APB_EHB_TIMEOUT,
+ .fast_io = true
+};
+
+static inline unsigned long bt1_apb_n_to_timeout_us(struct bt1_apb *apb, u32 n)
+{
+ u64 timeout = (u64)n * USEC_PER_SEC;
+
+ do_div(timeout, apb->rate);
+
+ return timeout;
+
+}
+
+static inline unsigned long bt1_apb_timeout_to_n_us(struct bt1_apb *apb,
+ unsigned long timeout)
+{
+ u64 n = (u64)timeout * apb->rate;
+
+ do_div(n, USEC_PER_SEC);
+
+ return n;
+
+}
+
+static irqreturn_t bt1_apb_isr(int irq, void *data)
+{
+ struct bt1_apb *apb = data;
+ u32 addr = 0;
+
+ regmap_read(apb->regs, APB_EHB_ADDR, &addr);
+
+ dev_crit_ratelimited(apb->dev,
+ "APB-bus fault %d: Slave access timeout at 0x%08x\n",
+ atomic_inc_return(&apb->count),
+ addr);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_apb_clear_data(void *data)
+{
+ struct bt1_apb *apb = data;
+ struct platform_device *pdev = to_platform_device(apb->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_apb *bt1_apb_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = devm_kzalloc(dev, sizeof(*apb), GFP_KERNEL);
+ if (!apb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_apb_clear_data, apb);
+ if (ret) {
+ dev_err(dev, "Can't add APB EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ apb->dev = dev;
+ atomic_set(&apb->count, 0);
+ platform_set_drvdata(pdev, apb);
+
+ return apb;
+}
+
+static int bt1_apb_request_regs(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ void __iomem *regs;
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "ehb");
+ if (IS_ERR(regs)) {
+ dev_err(apb->dev, "Couldn't map APB EHB registers\n");
+ return PTR_ERR(regs);
+ }
+
+ apb->regs = devm_regmap_init_mmio(apb->dev, regs, &bt1_apb_regmap_cfg);
+ if (IS_ERR(apb->regs)) {
+ dev_err(apb->dev, "Couldn't create APB EHB regmap\n");
+ return PTR_ERR(apb->regs);
+ }
+
+ apb->res = devm_platform_ioremap_resource_byname(pdev, "nodev");
+ if (IS_ERR(apb->res))
+ dev_err(apb->dev, "Couldn't map reserved region\n");
+
+ return PTR_ERR_OR_ZERO(apb->res);
+}
+
+static int bt1_apb_request_rst(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst");
+ if (IS_ERR(apb->prst)) {
+ dev_warn(apb->dev, "Couldn't get reset control line\n");
+ return PTR_ERR(apb->prst);
+ }
+
+ ret = reset_control_deassert(apb->prst);
+ if (ret)
+ dev_err(apb->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_apb_disable_clk(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ clk_disable_unprepare(apb->pclk);
+}
+
+static int bt1_apb_request_clk(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->pclk = devm_clk_get(apb->dev, "pclk");
+ if (IS_ERR(apb->pclk)) {
+ dev_err(apb->dev, "Couldn't get APB clock descriptor\n");
+ return PTR_ERR(apb->pclk);
+ }
+
+ ret = clk_prepare_enable(apb->pclk);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't enable the APB clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB clocks disable action\n");
+ return ret;
+ }
+
+ apb->rate = clk_get_rate(apb->pclk);
+ if (!apb->rate) {
+ dev_err(apb->dev, "Invalid clock rate\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bt1_apb_clear_irq(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_MASK, 0);
+}
+
+static int bt1_apb_request_irq(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ int ret;
+
+ apb->irq = platform_get_irq(pdev, 0);
+ if (apb->irq < 0)
+ return apb->irq;
+
+ ret = devm_request_irq(apb->dev, apb->irq, bt1_apb_isr, IRQF_SHARED,
+ "bt1-apb", apb);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't request APB EHB IRQ\n");
+ return ret;
+ }
+
+ ret = devm_add_action(apb->dev, bt1_apb_clear_irq, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB IRQs clear action\n");
+ return ret;
+ }
+
+ /* Unmask IRQ and clear it' pending flag. */
+ regmap_update_bits(apb->regs, APB_EHB_ISR,
+ APB_EHB_ISR_PENDING | APB_EHB_ISR_MASK,
+ APB_EHB_ISR_MASK);
+
+ return 0;
+}
+
+static ssize_t count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&apb->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ ret = regmap_read(apb->regs, APB_EHB_TIMEOUT, &n);
+ if (ret)
+ return ret;
+
+ timeout = bt1_apb_n_to_timeout_us(apb, n);
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", timeout);
+}
+
+static ssize_t timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ if (kstrtoul(buf, 0, &timeout) < 0)
+ return -EINVAL;
+
+ n = bt1_apb_timeout_to_n_us(apb, timeout);
+ n = clamp(n, APB_EHB_TIMEOUT_MIN, APB_EHB_TIMEOUT_MAX);
+
+ ret = regmap_write(apb->regs, APB_EHB_TIMEOUT, n);
+
+ return ret ?: count;
+}
+static DEVICE_ATTR_RW(timeout);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: nodev irq\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ /*
+ * Either dummy read from the unmapped address in the APB IO area
+ * or manually set the IRQ status.
+ */
+ if (sysfs_streq(data, "nodev"))
+ readl(apb->res);
+ else if (sysfs_streq(data, "irq"))
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING,
+ APB_EHB_ISR_PENDING);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_apb_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_apb_sysfs);
+
+static void bt1_apb_remove_sysfs(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ device_remove_groups(apb->dev, bt1_apb_sysfs_groups);
+}
+
+static int bt1_apb_init_sysfs(struct bt1_apb *apb)
+{
+ int ret;
+
+ ret = device_add_groups(apb->dev, bt1_apb_sysfs_groups);
+ if (ret) {
+ dev_err(apb->dev, "Failed to create EHB APB sysfs nodes\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_remove_sysfs, apb);
+ if (ret)
+ dev_err(apb->dev, "Can't add APB EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_apb_probe(struct platform_device *pdev)
+{
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = bt1_apb_create_data(pdev);
+ if (IS_ERR(apb))
+ return PTR_ERR(apb);
+
+ ret = bt1_apb_request_regs(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_rst(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_clk(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_irq(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_init_sysfs(apb);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_apb_of_match[] = {
+ { .compatible = "baikal,bt1-apb" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_apb_of_match);
+
+static struct platform_driver bt1_apb_driver = {
+ .probe = bt1_apb_probe,
+ .driver = {
+ .name = "bt1-apb",
+ .of_match_table = bt1_apb_of_match
+ }
+};
+module_platform_driver(bt1_apb_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 APB-bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
new file mode 100644
index 000000000000..e7a6744acc7b
--- /dev/null
+++ b/drivers/bus/bt1-axi.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 AXI-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/sysfs.h>
+
+#define BT1_AXI_WERRL 0x110
+#define BT1_AXI_WERRH 0x114
+#define BT1_AXI_WERRH_TYPE BIT(23)
+#define BT1_AXI_WERRH_ADDR_FLD 24
+#define BT1_AXI_WERRH_ADDR_MASK GENMASK(31, BT1_AXI_WERRH_ADDR_FLD)
+
+/*
+ * struct bt1_axi - Baikal-T1 AXI-bus private data
+ * @dev: Pointer to the device structure.
+ * @qos_regs: AXI Interconnect QoS tuning registers.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @irq: Errors IRQ number.
+ * @aclk: AXI reference clock.
+ * @arst: AXI Interconnect reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_axi {
+ struct device *dev;
+
+ void __iomem *qos_regs;
+ struct regmap *sys_regs;
+ int irq;
+
+ struct clk *aclk;
+
+ struct reset_control *arst;
+
+ atomic_t count;
+};
+
+static irqreturn_t bt1_axi_isr(int irq, void *data)
+{
+ struct bt1_axi *axi = data;
+ u32 low = 0, high = 0;
+
+ regmap_read(axi->sys_regs, BT1_AXI_WERRL, &low);
+ regmap_read(axi->sys_regs, BT1_AXI_WERRH, &high);
+
+ dev_crit_ratelimited(axi->dev,
+ "AXI-bus fault %d: %s at 0x%x%08x\n",
+ atomic_inc_return(&axi->count),
+ high & BT1_AXI_WERRH_TYPE ? "no slave" : "slave protocol error",
+ high, low);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_axi_clear_data(void *data)
+{
+ struct bt1_axi *axi = data;
+ struct platform_device *pdev = to_platform_device(axi->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_axi *bt1_axi_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = devm_kzalloc(dev, sizeof(*axi), GFP_KERNEL);
+ if (!axi)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_axi_clear_data, axi);
+ if (ret) {
+ dev_err(dev, "Can't add AXI EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ axi->dev = dev;
+ atomic_set(&axi->count, 0);
+ platform_set_drvdata(pdev, axi);
+
+ return axi;
+}
+
+static int bt1_axi_request_regs(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ struct device *dev = axi->dev;
+
+ axi->sys_regs = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(axi->sys_regs)) {
+ dev_err(dev, "Couldn't find syscon registers\n");
+ return PTR_ERR(axi->sys_regs);
+ }
+
+ axi->qos_regs = devm_platform_ioremap_resource_byname(pdev, "qos");
+ if (IS_ERR(axi->qos_regs))
+ dev_err(dev, "Couldn't map AXI-bus QoS registers\n");
+
+ return PTR_ERR_OR_ZERO(axi->qos_regs);
+}
+
+static int bt1_axi_request_rst(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst");
+ if (IS_ERR(axi->arst)) {
+ dev_warn(axi->dev, "Couldn't get reset control line\n");
+ return PTR_ERR(axi->arst);
+ }
+
+ ret = reset_control_deassert(axi->arst);
+ if (ret)
+ dev_err(axi->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_axi_disable_clk(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ clk_disable_unprepare(axi->aclk);
+}
+
+static int bt1_axi_request_clk(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->aclk = devm_clk_get(axi->dev, "aclk");
+ if (IS_ERR(axi->aclk)) {
+ dev_err(axi->dev, "Couldn't get AXI Interconnect clock\n");
+ return PTR_ERR(axi->aclk);
+ }
+
+ ret = clk_prepare_enable(axi->aclk);
+ if (ret) {
+ dev_err(axi->dev, "Couldn't enable the AXI clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI clock disable action\n");
+
+ return ret;
+}
+
+static int bt1_axi_request_irq(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ int ret;
+
+ axi->irq = platform_get_irq(pdev, 0);
+ if (axi->irq < 0)
+ return axi->irq;
+
+ ret = devm_request_irq(axi->dev, axi->irq, bt1_axi_isr, IRQF_SHARED,
+ "bt1-axi", axi);
+ if (ret)
+ dev_err(axi->dev, "Couldn't request AXI EHB IRQ\n");
+
+ return ret;
+}
+
+static ssize_t count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&axi->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: bus unaligned\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ /*
+ * Performing unaligned read from the memory will cause the CM2 bus
+ * error while unaligned writing - the AXI bus write error handled
+ * by this driver.
+ */
+ if (sysfs_streq(data, "bus"))
+ readb(axi->qos_regs);
+ else if (sysfs_streq(data, "unaligned"))
+ writeb(0, axi->qos_regs);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_axi_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_axi_sysfs);
+
+static void bt1_axi_remove_sysfs(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ device_remove_groups(axi->dev, bt1_axi_sysfs_groups);
+}
+
+static int bt1_axi_init_sysfs(struct bt1_axi *axi)
+{
+ int ret;
+
+ ret = device_add_groups(axi->dev, bt1_axi_sysfs_groups);
+ if (ret) {
+ dev_err(axi->dev, "Failed to add sysfs files group\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_remove_sysfs, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_axi_probe(struct platform_device *pdev)
+{
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = bt1_axi_create_data(pdev);
+ if (IS_ERR(axi))
+ return PTR_ERR(axi);
+
+ ret = bt1_axi_request_regs(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_rst(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_clk(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_irq(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_init_sysfs(axi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_axi_of_match[] = {
+ { .compatible = "baikal,bt1-axi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_axi_of_match);
+
+static struct platform_driver bt1_axi_driver = {
+ .probe = bt1_axi_probe,
+ .driver = {
+ .name = "bt1-axi",
+ .of_match_table = bt1_axi_of_match
+ }
+};
+module_platform_driver(bt1_axi_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 AXI-bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
index ebad5eb48e5a..0b38014d040e 100644
--- a/drivers/bus/mhi/core/boot.c
+++ b/drivers/bus/mhi/core/boot.c
@@ -43,10 +43,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
- sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
-
- if (unlikely(!sequence_id))
- sequence_id = 1;
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
@@ -121,7 +118,8 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
ee = mhi_get_exec_env(mhi_cntrl);
}
- dev_dbg(dev, "Waiting for image download completion, current EE: %s\n",
+ dev_dbg(dev,
+ "Waiting for RDDM image download via BHIe, current EE:%s\n",
TO_MHI_EXEC_STR(ee));
while (retry--) {
@@ -152,11 +150,14 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
{
void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 rx_status;
if (in_panic)
return __mhi_download_rddm_in_panic(mhi_cntrl);
+ dev_dbg(dev, "Waiting for RDDM image download via BHIe\n");
+
/* Wait for the image download to complete */
wait_event_timeout(mhi_cntrl->state_event,
mhi_read_reg_field(mhi_cntrl, base,
@@ -174,8 +175,10 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
u32 tx_status, sequence_id;
+ int ret;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
@@ -183,6 +186,9 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
return -EIO;
}
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting AMSS download via BHIe. Sequence ID:%u\n",
+ sequence_id);
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
@@ -191,26 +197,25 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
- sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
sequence_id);
read_unlock_bh(pm_lock);
/* Wait for the image download to complete */
- wait_event_timeout(mhi_cntrl->state_event,
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
- mhi_read_reg_field(mhi_cntrl, base,
- BHIE_TXVECSTATUS_OFFS,
- BHIE_TXVECSTATUS_STATUS_BMSK,
- BHIE_TXVECSTATUS_STATUS_SHFT,
- &tx_status) || tx_status,
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_TXVECSTATUS_OFFS,
+ BHIE_TXVECSTATUS_STATUS_BMSK,
+ BHIE_TXVECSTATUS_STATUS_SHFT,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL)
return -EIO;
- return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+ return (!ret) ? -ETIMEDOUT : 0;
}
static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
@@ -239,14 +244,15 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
goto invalid_pm_state;
}
- dev_dbg(dev, "Starting SBL download via BHI\n");
+ session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting SBL download via BHI. Session ID:%u\n",
+ session_id);
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
upper_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
lower_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
- session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
read_unlock_bh(pm_lock);
@@ -377,30 +383,18 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
}
}
-void mhi_fw_load_worker(struct work_struct *work)
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
- struct mhi_controller *mhi_cntrl;
const struct firmware *firmware = NULL;
struct image_info *image_info;
- struct device *dev;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
const char *fw_name;
void *buf;
dma_addr_t dma_addr;
size_t size;
int ret;
- mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
- dev = &mhi_cntrl->mhi_dev->dev;
-
- dev_dbg(dev, "Waiting for device to enter PBL from: %s\n",
- TO_MHI_EXEC_STR(mhi_cntrl->ee));
-
- ret = wait_event_timeout(mhi_cntrl->state_event,
- MHI_IN_PBL(mhi_cntrl->ee) ||
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
- if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
dev_err(dev, "Device MHI is not in valid state\n");
return;
}
@@ -446,7 +440,12 @@ void mhi_fw_load_worker(struct work_struct *work)
release_firmware(firmware);
/* Error or in EDL mode, we're done */
- if (ret || mhi_cntrl->ee == MHI_EE_EDL)
+ if (ret) {
+ dev_err(dev, "MHI did not load SBL, ret:%d\n", ret);
+ return;
+ }
+
+ if (mhi_cntrl->ee == MHI_EE_EDL)
return;
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -474,8 +473,10 @@ fw_load_ee_pthru:
if (!mhi_cntrl->fbc_download)
return;
- if (ret)
+ if (ret) {
+ dev_err(dev, "MHI did not enter READY state\n");
goto error_read;
+ }
/* Wait for the SBL event */
ret = wait_event_timeout(mhi_cntrl->state_event,
@@ -493,6 +494,8 @@ fw_load_ee_pthru:
ret = mhi_fw_load_amss(mhi_cntrl,
/* Vector table is the last entry */
&image_info->mhi_buf[image_info->entries - 1]);
+ if (ret)
+ dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
release_firmware(firmware);
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index 1f8c82603179..e43a190a7a36 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -34,6 +34,8 @@ const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
[DEV_ST_TRANSITION_READY] = "READY",
[DEV_ST_TRANSITION_SBL] = "SBL",
[DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
+ [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR",
+ [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
};
const char * const mhi_state_str[MHI_STATE_MAX] = {
@@ -835,8 +837,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
spin_lock_init(&mhi_cntrl->transition_lock);
spin_lock_init(&mhi_cntrl->wlock);
INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
- INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
- INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
init_waitqueue_head(&mhi_cntrl->state_event);
mhi_cmd = mhi_cntrl->mhi_cmd;
@@ -864,6 +864,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
mutex_init(&mhi_chan->mutex);
init_completion(&mhi_chan->completion);
rwlock_init(&mhi_chan->lock);
+
+ /* used in setting bei field of TRE */
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ mhi_chan->intmod = mhi_event->intmod;
}
if (mhi_cntrl->bounce_buf) {
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index 095d95bc0e37..b1f640b75a94 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -386,6 +386,8 @@ enum dev_st_transition {
DEV_ST_TRANSITION_READY,
DEV_ST_TRANSITION_SBL,
DEV_ST_TRANSITION_MISSION_MODE,
+ DEV_ST_TRANSITION_SYS_ERR,
+ DEV_ST_TRANSITION_DISABLE,
DEV_ST_TRANSITION_MAX,
};
@@ -452,6 +454,7 @@ enum mhi_pm_state {
#define PRIMARY_CMD_RING 0
#define MHI_DEV_WAKE_DB 127
#define MHI_MAX_MTU 0xffff
+#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1)
enum mhi_er_type {
MHI_ER_TYPE_INVALID = 0x0,
@@ -586,7 +589,7 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
enum dev_st_transition state);
void mhi_pm_st_worker(struct work_struct *work);
-void mhi_pm_sys_err_worker(struct work_struct *work);
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
void mhi_fw_load_worker(struct work_struct *work);
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
void mhi_ctrl_ev_task(unsigned long data);
@@ -627,6 +630,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
@@ -670,8 +674,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
- void *buf, void *cb, size_t buf_len, enum mhi_flags flags);
-
+ struct mhi_buf_info *info, enum mhi_flags flags);
int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info);
int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 97e06cc586e4..1f622ce6be8b 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -258,7 +258,7 @@ int mhi_destroy_device(struct device *dev, void *data)
return 0;
}
-static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
{
struct mhi_driver *mhi_drv;
@@ -270,6 +270,7 @@ static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
if (mhi_drv->status_cb)
mhi_drv->status_cb(mhi_dev, cb_reason);
}
+EXPORT_SYMBOL_GPL(mhi_notify);
/* Bind MHI channels to MHI devices */
void mhi_create_devices(struct mhi_controller *mhi_cntrl)
@@ -368,30 +369,37 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
return IRQ_HANDLED;
}
-irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
{
- struct mhi_controller *mhi_cntrl = dev;
+ struct mhi_controller *mhi_cntrl = priv;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state state = MHI_STATE_MAX;
enum mhi_pm_state pm_state = 0;
enum mhi_ee_type ee = 0;
write_lock_irq(&mhi_cntrl->pm_lock);
- if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
- state = mhi_get_mhi_state(mhi_cntrl);
- ee = mhi_cntrl->ee;
- mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ goto exit_intvec;
}
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_cntrl->ee;
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
+ TO_MHI_STATE_STR(state));
+
if (state == MHI_STATE_SYS_ERR) {
- dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n");
+ dev_dbg(dev, "System error detected\n");
pm_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
}
write_unlock_irq(&mhi_cntrl->pm_lock);
- /* If device in RDDM don't bother processing SYS error */
- if (mhi_cntrl->ee == MHI_EE_RDDM) {
- if (mhi_cntrl->ee != ee) {
+ /* If device supports RDDM don't bother processing SYS error */
+ if (mhi_cntrl->rddm_image) {
+ if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
wake_up_all(&mhi_cntrl->state_event);
}
@@ -405,7 +413,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
if (MHI_IN_PBL(ee))
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
else
- schedule_work(&mhi_cntrl->syserr_worker);
+ mhi_pm_sys_err_handler(mhi_cntrl);
}
exit_intvec:
@@ -513,7 +521,10 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
result.buf_addr = buf_info->cb_buf;
- result.bytes_xferd = xfer_len;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd =
+ min_t(u16, xfer_len, buf_info->len);
mhi_del_ring_element(mhi_cntrl, buf_ring);
mhi_del_ring_element(mhi_cntrl, tre_ring);
local_rp = tre_ring->rp;
@@ -597,7 +608,9 @@ static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
-EOVERFLOW : 0;
- result.bytes_xferd = xfer_len;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
result.buf_addr = buf_info->cb_buf;
result.dir = mhi_chan->dir;
@@ -722,13 +735,18 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
{
enum mhi_pm_state new_state;
+ /* skip SYS_ERROR handling if RDDM supported */
+ if (mhi_cntrl->ee == MHI_EE_RDDM ||
+ mhi_cntrl->rddm_image)
+ break;
+
dev_dbg(dev, "System error detected\n");
write_lock_irq(&mhi_cntrl->pm_lock);
new_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (new_state == MHI_PM_SYS_ERR_DETECT)
- schedule_work(&mhi_cntrl->syserr_worker);
+ mhi_pm_sys_err_handler(mhi_cntrl);
break;
}
default:
@@ -774,9 +792,18 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
}
case MHI_PKT_TYPE_TX_EVENT:
chan = MHI_TRE_GET_EV_CHID(local_rp);
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
- parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
break;
default:
dev_err(dev, "Unhandled event type: %d\n", type);
@@ -819,14 +846,23 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
chan = MHI_TRE_GET_EV_CHID(local_rp);
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
-
- if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
- parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
- } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
- parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+ if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
+ parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
}
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
@@ -896,7 +932,7 @@ void mhi_ctrl_ev_task(unsigned long data)
}
write_unlock_irq(&mhi_cntrl->pm_lock);
if (pm_state == MHI_PM_SYS_ERR_DETECT)
- schedule_work(&mhi_cntrl->syserr_worker);
+ mhi_pm_sys_err_handler(mhi_cntrl);
}
}
@@ -918,9 +954,7 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
- struct mhi_buf_info *buf_info;
- struct mhi_tre *mhi_tre;
+ struct mhi_buf_info buf_info = { };
int ret;
/* If MHI host pre-allocates buffers then client drivers cannot queue */
@@ -945,27 +979,15 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
- /* Generate the TRE */
- buf_info = buf_ring->wp;
+ buf_info.v_addr = skb->data;
+ buf_info.cb_buf = skb;
+ buf_info.len = len;
- buf_info->v_addr = skb->data;
- buf_info->cb_buf = skb;
- buf_info->wp = tre_ring->wp;
- buf_info->dir = mhi_chan->dir;
- buf_info->len = len;
- ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret)
- goto map_error;
-
- mhi_tre = tre_ring->wp;
-
- mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
- mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
-
- /* increment WP */
- mhi_add_ring_element(mhi_cntrl, tre_ring);
- mhi_add_ring_element(mhi_cntrl, buf_ring);
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
+ if (unlikely(ret)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return ret;
+ }
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
@@ -979,11 +1001,6 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
read_unlock_bh(&mhi_cntrl->pm_lock);
return 0;
-
-map_error:
- read_unlock_bh(&mhi_cntrl->pm_lock);
-
- return ret;
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);
@@ -995,9 +1012,8 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
mhi_dev->dl_chan;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
- struct mhi_buf_info *buf_info;
- struct mhi_tre *mhi_tre;
+ struct mhi_buf_info buf_info = { };
+ int ret;
/* If MHI host pre-allocates buffers then client drivers cannot queue */
if (mhi_chan->pre_alloc)
@@ -1024,25 +1040,16 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
- /* Generate the TRE */
- buf_info = buf_ring->wp;
- WARN_ON(buf_info->used);
- buf_info->p_addr = mhi_buf->dma_addr;
- buf_info->pre_mapped = true;
- buf_info->cb_buf = mhi_buf;
- buf_info->wp = tre_ring->wp;
- buf_info->dir = mhi_chan->dir;
- buf_info->len = len;
-
- mhi_tre = tre_ring->wp;
-
- mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
- mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+ buf_info.p_addr = mhi_buf->dma_addr;
+ buf_info.cb_buf = mhi_buf;
+ buf_info.pre_mapped = true;
+ buf_info.len = len;
- /* increment WP */
- mhi_add_ring_element(mhi_cntrl, tre_ring);
- mhi_add_ring_element(mhi_cntrl, buf_ring);
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
+ if (unlikely(ret)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return ret;
+ }
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
@@ -1060,7 +1067,7 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
EXPORT_SYMBOL_GPL(mhi_queue_dma);
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
- void *buf, void *cb, size_t buf_len, enum mhi_flags flags)
+ struct mhi_buf_info *info, enum mhi_flags flags)
{
struct mhi_ring *buf_ring, *tre_ring;
struct mhi_tre *mhi_tre;
@@ -1072,15 +1079,22 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
tre_ring = &mhi_chan->tre_ring;
buf_info = buf_ring->wp;
- buf_info->v_addr = buf;
- buf_info->cb_buf = cb;
+ WARN_ON(buf_info->used);
+ buf_info->pre_mapped = info->pre_mapped;
+ if (info->pre_mapped)
+ buf_info->p_addr = info->p_addr;
+ else
+ buf_info->v_addr = info->v_addr;
+ buf_info->cb_buf = info->cb_buf;
buf_info->wp = tre_ring->wp;
buf_info->dir = mhi_chan->dir;
- buf_info->len = buf_len;
+ buf_info->len = info->len;
- ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret)
- return ret;
+ if (!info->pre_mapped) {
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret)
+ return ret;
+ }
eob = !!(flags & MHI_EOB);
eot = !!(flags & MHI_EOT);
@@ -1089,7 +1103,7 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_tre = tre_ring->wp;
mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
/* increment WP */
@@ -1106,6 +1120,7 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring;
+ struct mhi_buf_info buf_info = { };
unsigned long flags;
int ret;
@@ -1121,7 +1136,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags);
+ buf_info.v_addr = buf;
+ buf_info.cb_buf = buf;
+ buf_info.len = len;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
if (unlikely(ret))
return ret;
@@ -1322,7 +1341,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
while (nr_el--) {
void *buf;
-
+ struct mhi_buf_info info = { };
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
@@ -1330,8 +1349,10 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
}
/* Prepare transfer descriptors */
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf,
- len, MHI_EOT);
+ info.v_addr = buf;
+ info.cb_buf = buf;
+ info.len = len;
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
if (ret) {
kfree(buf);
goto error_pre_alloc;
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index dc83d65f7784..796098078083 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -288,14 +288,18 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- write_lock_irq(&mhi_chan->lock);
- if (mhi_chan->db_cfg.reset_req)
+ if (mhi_chan->db_cfg.reset_req) {
+ write_lock_irq(&mhi_chan->lock);
mhi_chan->db_cfg.db_mode = true;
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ read_lock_irq(&mhi_chan->lock);
/* Only ring DB if ring is not empty */
if (tre_ring->base && tre_ring->wp != tre_ring->rp)
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- write_unlock_irq(&mhi_chan->lock);
+ read_unlock_irq(&mhi_chan->lock);
}
mhi_cntrl->wake_put(mhi_cntrl, false);
@@ -449,19 +453,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
to_mhi_pm_state_str(transition_state));
/* We must notify MHI control driver so it can clean up first */
- if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
- /*
- * If controller supports RDDM, we do not process
- * SYS error state, instead we will jump directly
- * to RDDM state
- */
- if (mhi_cntrl->rddm_image) {
- dev_dbg(dev,
- "Controller supports RDDM, so skip SYS_ERR\n");
- return;
- }
+ if (transition_state == MHI_PM_SYS_ERR_PROCESS)
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
- }
mutex_lock(&mhi_cntrl->pm_mutex);
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -527,8 +520,6 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
mutex_unlock(&mhi_cntrl->pm_mutex);
dev_dbg(dev, "Waiting for all pending threads to complete\n");
wake_up_all(&mhi_cntrl->state_event);
- flush_work(&mhi_cntrl->st_worker);
- flush_work(&mhi_cntrl->fw_worker);
dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
@@ -608,13 +599,17 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
}
/* SYS_ERR worker */
-void mhi_pm_sys_err_worker(struct work_struct *work)
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
{
- struct mhi_controller *mhi_cntrl = container_of(work,
- struct mhi_controller,
- syserr_worker);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ /* skip if controller supports RDDM */
+ if (mhi_cntrl->rddm_image) {
+ dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
+ return;
+ }
- mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
}
/* Device State Transition worker */
@@ -643,7 +638,7 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (MHI_IN_PBL(mhi_cntrl->ee))
- wake_up_all(&mhi_cntrl->state_event);
+ mhi_fw_load_handler(mhi_cntrl);
break;
case DEV_ST_TRANSITION_SBL:
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -662,6 +657,14 @@ void mhi_pm_st_worker(struct work_struct *work)
case DEV_ST_TRANSITION_READY:
mhi_ready_state_transition(mhi_cntrl);
break;
+ case DEV_ST_TRANSITION_SYS_ERR:
+ mhi_pm_disable_transition
+ (mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+ break;
+ case DEV_ST_TRANSITION_DISABLE:
+ mhi_pm_disable_transition
+ (mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+ break;
default:
break;
}
@@ -669,6 +672,149 @@ void mhi_pm_st_worker(struct work_struct *work)
}
}
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
+ int ret;
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return -EINVAL;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Return busy if there are any pending resources */
+ if (atomic_read(&mhi_cntrl->dev_wake))
+ return -EBUSY;
+
+ /* Take MHI out of M2 state */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M1 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Could not enter M0/M1 state");
+ return -EIO;
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+
+ if (atomic_read(&mhi_cntrl->dev_wake)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ return -EBUSY;
+ }
+
+ dev_info(dev, "Allowing M3 transition\n");
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
+ if (new_state != MHI_PM_M3_ENTER) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_err(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_ENTER),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M3 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev, "Wait for M3 completion\n");
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M3 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M3 state, MHI state: %s, PM state: %s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Notify clients about entering LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
+ mutex_unlock(&itr->mutex);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_suspend);
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state cur_state;
+ int ret;
+
+ dev_info(dev, "Entered with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return 0;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Notify clients about exiting LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
+ mutex_unlock(&itr->mutex);
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
+ if (cur_state != MHI_PM_M3_EXIT) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_EXIT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M0 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M0 state, MHI state: %s, PM state: %s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume);
+
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
{
int ret;
@@ -760,6 +906,7 @@ static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
{
+ enum mhi_state state;
enum mhi_ee_type current_ee;
enum dev_st_transition next_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -829,13 +976,36 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
goto error_bhi_offset;
}
+ state = mhi_get_mhi_state(mhi_cntrl);
+ if (state == MHI_STATE_SYS_ERR) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl,
+ mhi_cntrl->regs,
+ MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT,
+ &val) ||
+ !val,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (ret) {
+ ret = -EIO;
+ dev_info(dev, "Failed to reset MHI due to syserr state\n");
+ goto error_bhi_offset;
+ }
+
+ /*
+ * device cleares INTVEC as part of RESET processing,
+ * re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
/* Transition to next state */
next_state = MHI_IN_PBL(current_ee) ?
DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
- if (next_state == DEV_ST_TRANSITION_PBL)
- schedule_work(&mhi_cntrl->fw_worker);
-
mhi_queue_state_transition(mhi_cntrl, next_state);
mutex_unlock(&mhi_cntrl->pm_mutex);
@@ -876,7 +1046,12 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
}
- mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
+
+ /* Wait for shutdown to complete */
+ flush_work(&mhi_cntrl->st_worker);
+
mhi_deinit_free_irq(mhi_cntrl);
if (!mhi_cntrl->pre_init) {
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index e5f5f48d69d2..3affd180baac 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1275,13 +1275,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
- 0),
- /* Some timers on omap4 and later */
- SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff,
- 0),
- SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff,
- 0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
@@ -1404,6 +1397,13 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
+ /* Some timers on omap4 and later */
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
@@ -2744,6 +2744,17 @@ static int sysc_init_soc(struct sysc *ddata)
if (match && match->data)
sysc_soc->soc = (int)match->data;
+ /* Ignore devices that are not available on HS and EMU SoCs */
+ if (!sysc_soc->general_purpose) {
+ switch (sysc_soc->soc) {
+ case SOC_3430 ... SOC_3630:
+ sysc_add_disabled(0x48304000); /* timer12 */
+ break;
+ default:
+ break;
+ };
+ }
+
match = soc_device_match(sysc_soc_feat_match);
if (!match)
return 0;
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index ff70575b2db6..a58ac0c8e282 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -6,10 +6,61 @@
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
#include <linux/vexpress.h>
+#define SYS_MISC 0x0
+#define SYS_MISC_MASTERSITE (1 << 14)
+
+#define SYS_PROCID0 0x24
+#define SYS_PROCID1 0x28
+#define SYS_HBI_MASK 0xfff
+#define SYS_PROCIDx_HBI_SHIFT 0
+
+#define SYS_CFGDATA 0x40
+
+#define SYS_CFGCTRL 0x44
+#define SYS_CFGCTRL_START (1 << 31)
+#define SYS_CFGCTRL_WRITE (1 << 30)
+#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
+#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
+#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
+#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
+#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
+
+#define SYS_CFGSTAT 0x48
+#define SYS_CFGSTAT_ERR (1 << 1)
+#define SYS_CFGSTAT_COMPLETE (1 << 0)
+
+#define VEXPRESS_SITE_MB 0
+#define VEXPRESS_SITE_DB1 1
+#define VEXPRESS_SITE_DB2 2
+#define VEXPRESS_SITE_MASTER 0xf
+
+struct vexpress_syscfg {
+ struct device *dev;
+ void __iomem *base;
+ struct list_head funcs;
+};
+
+struct vexpress_syscfg_func {
+ struct list_head list;
+ struct vexpress_syscfg *syscfg;
+ struct regmap *regmap;
+ int num_templates;
+ u32 template[]; /* Keep it last! */
+};
+
+struct vexpress_config_bridge_ops {
+ struct regmap * (*regmap_init)(struct device *dev, void *context);
+ void (*regmap_exit)(struct regmap *regmap, void *context);
+};
struct vexpress_config_bridge {
struct vexpress_config_bridge_ops *ops;
@@ -18,26 +69,20 @@ struct vexpress_config_bridge {
static DEFINE_MUTEX(vexpress_config_mutex);
-static struct class *vexpress_config_class;
static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER;
-void vexpress_config_set_master(u32 site)
+static void vexpress_config_set_master(u32 site)
{
vexpress_config_site_master = site;
}
-u32 vexpress_config_get_master(void)
-{
- return vexpress_config_site_master;
-}
-
-void vexpress_config_lock(void *arg)
+static void vexpress_config_lock(void *arg)
{
mutex_lock(&vexpress_config_mutex);
}
-void vexpress_config_unlock(void *arg)
+static void vexpress_config_unlock(void *arg)
{
mutex_unlock(&vexpress_config_mutex);
}
@@ -59,7 +104,7 @@ static void vexpress_config_find_prop(struct device_node *node,
}
}
-int vexpress_config_get_topo(struct device_node *node, u32 *site,
+static int vexpress_config_get_topo(struct device_node *node, u32 *site,
u32 *position, u32 *dcc)
{
vexpress_config_find_prop(node, "arm,vexpress,site", site);
@@ -88,9 +133,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
struct regmap *regmap;
struct regmap **res;
- if (WARN_ON(dev->parent->class != vexpress_config_class))
- return ERR_PTR(-ENODEV);
-
bridge = dev_get_drvdata(dev->parent);
if (WARN_ON(!bridge))
return ERR_PTR(-EINVAL);
@@ -113,91 +155,265 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config);
-struct device *vexpress_config_bridge_register(struct device *parent,
- struct vexpress_config_bridge_ops *ops, void *context)
+static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
+ int index, bool write, u32 *data)
{
- struct device *dev;
- struct vexpress_config_bridge *bridge;
+ struct vexpress_syscfg *syscfg = func->syscfg;
+ u32 command, status;
+ int tries;
+ long timeout;
- if (!vexpress_config_class) {
- vexpress_config_class = class_create(THIS_MODULE,
- "vexpress-config");
- if (IS_ERR(vexpress_config_class))
- return (void *)vexpress_config_class;
+ if (WARN_ON(index >= func->num_templates))
+ return -EINVAL;
+
+ command = readl(syscfg->base + SYS_CFGCTRL);
+ if (WARN_ON(command & SYS_CFGCTRL_START))
+ return -EBUSY;
+
+ command = func->template[index];
+ command |= SYS_CFGCTRL_START;
+ command |= write ? SYS_CFGCTRL_WRITE : 0;
+
+ /* Use a canary for reads */
+ if (!write)
+ *data = 0xdeadbeef;
+
+ dev_dbg(syscfg->dev, "func %p, command %x, data %x\n",
+ func, command, *data);
+ writel(*data, syscfg->base + SYS_CFGDATA);
+ writel(0, syscfg->base + SYS_CFGSTAT);
+ writel(command, syscfg->base + SYS_CFGCTRL);
+ mb();
+
+ /* The operation can take ages... Go to sleep, 100us initially */
+ tries = 100;
+ timeout = 100;
+ do {
+ if (!irqs_disabled()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(timeout));
+ if (signal_pending(current))
+ return -EINTR;
+ } else {
+ udelay(timeout);
+ }
+
+ status = readl(syscfg->base + SYS_CFGSTAT);
+ if (status & SYS_CFGSTAT_ERR)
+ return -EFAULT;
+
+ if (timeout > 20)
+ timeout -= 20;
+ } while (--tries && !(status & SYS_CFGSTAT_COMPLETE));
+ if (WARN_ON_ONCE(!tries))
+ return -ETIMEDOUT;
+
+ if (!write) {
+ *data = readl(syscfg->base + SYS_CFGDATA);
+ dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data);
}
- dev = device_create(vexpress_config_class, parent, 0,
- NULL, "%s.bridge", dev_name(parent));
+ return 0;
+}
+
+static int vexpress_syscfg_read(void *context, unsigned int index,
+ unsigned int *val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, false, val);
+}
+
+static int vexpress_syscfg_write(void *context, unsigned int index,
+ unsigned int val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, true, &val);
+}
+
+static struct regmap_config vexpress_syscfg_regmap_config = {
+ .lock = vexpress_config_lock,
+ .unlock = vexpress_config_unlock,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_read = vexpress_syscfg_read,
+ .reg_write = vexpress_syscfg_write,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
- if (IS_ERR(dev))
- return dev;
+static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
+ void *context)
+{
+ int err;
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func;
+ struct property *prop;
+ const __be32 *val = NULL;
+ __be32 energy_quirk[4];
+ int num;
+ u32 site, position, dcc;
+ int i;
+
+ err = vexpress_config_get_topo(dev->of_node, &site,
+ &position, &dcc);
+ if (err)
+ return ERR_PTR(err);
+
+ prop = of_find_property(dev->of_node,
+ "arm,vexpress-sysreg,func", NULL);
+ if (!prop)
+ return ERR_PTR(-EINVAL);
- bridge = devm_kmalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge) {
- put_device(dev);
- device_unregister(dev);
+ num = prop->length / sizeof(u32) / 2;
+ val = prop->value;
+
+ /*
+ * "arm,vexpress-energy" function used to be described
+ * by its first device only, now it requires both
+ */
+ if (num == 1 && of_device_is_compatible(dev->of_node,
+ "arm,vexpress-energy")) {
+ num = 2;
+ energy_quirk[0] = *val;
+ energy_quirk[2] = *val++;
+ energy_quirk[1] = *val;
+ energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1);
+ val = energy_quirk;
+ }
+
+ func = kzalloc(struct_size(func, template, num), GFP_KERNEL);
+ if (!func)
return ERR_PTR(-ENOMEM);
+
+ func->syscfg = syscfg;
+ func->num_templates = num;
+
+ for (i = 0; i < num; i++) {
+ u32 function, device;
+
+ function = be32_to_cpup(val++);
+ device = be32_to_cpup(val++);
+
+ dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
+ func, site, position, dcc,
+ function, device);
+
+ func->template[i] = SYS_CFGCTRL_DCC(dcc);
+ func->template[i] |= SYS_CFGCTRL_SITE(site);
+ func->template[i] |= SYS_CFGCTRL_POSITION(position);
+ func->template[i] |= SYS_CFGCTRL_FUNC(function);
+ func->template[i] |= SYS_CFGCTRL_DEVICE(device);
}
- bridge->ops = ops;
- bridge->context = context;
- dev_set_drvdata(dev, bridge);
+ vexpress_syscfg_regmap_config.max_register = num - 1;
- dev_dbg(parent, "Registered bridge '%s', parent node %p\n",
- dev_name(dev), parent->of_node);
+ func->regmap = regmap_init(dev, NULL, func,
+ &vexpress_syscfg_regmap_config);
- return dev;
-}
+ if (IS_ERR(func->regmap)) {
+ void *err = func->regmap;
+ kfree(func);
+ return err;
+ }
+
+ list_add(&func->list, &syscfg->funcs);
-static int vexpress_config_node_match(struct device *dev, const void *data)
+ return func->regmap;
+}
+
+static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context)
{
- const struct device_node *node = data;
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func, *tmp;
- dev_dbg(dev, "Parent node %p, looking for %p\n",
- dev->parent->of_node, node);
+ regmap_exit(regmap);
- return dev->parent->of_node == node;
+ list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) {
+ if (func->regmap == regmap) {
+ list_del(&syscfg->funcs);
+ kfree(func);
+ break;
+ }
+ }
}
-static int vexpress_config_populate(struct device_node *node)
+static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
+ .regmap_init = vexpress_syscfg_regmap_init,
+ .regmap_exit = vexpress_syscfg_regmap_exit,
+};
+
+
+static int vexpress_syscfg_probe(struct platform_device *pdev)
{
- struct device_node *bridge;
- struct device *parent;
- int ret;
+ struct vexpress_syscfg *syscfg;
+ struct resource *res;
+ struct vexpress_config_bridge *bridge;
+ struct device_node *node;
+ int master;
+ u32 dt_hbi;
+
+ syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL);
+ if (!syscfg)
+ return -ENOMEM;
+ syscfg->dev = &pdev->dev;
+ INIT_LIST_HEAD(&syscfg->funcs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ syscfg->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(syscfg->base))
+ return PTR_ERR(syscfg->base);
- bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ bridge = devm_kmalloc(&pdev->dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
- return -EINVAL;
+ return -ENOMEM;
- parent = class_find_device(vexpress_config_class, NULL, bridge,
- vexpress_config_node_match);
- of_node_put(bridge);
- if (WARN_ON(!parent))
- return -ENODEV;
+ bridge->ops = &vexpress_syscfg_bridge_ops;
+ bridge->context = syscfg;
- ret = of_platform_populate(node, NULL, NULL, parent);
+ dev_set_drvdata(&pdev->dev, bridge);
- put_device(parent);
+ master = readl(syscfg->base + SYS_MISC) & SYS_MISC_MASTERSITE ?
+ VEXPRESS_SITE_DB2 : VEXPRESS_SITE_DB1;
+ vexpress_config_set_master(master);
- return ret;
-}
+ /* Confirm board type against DT property, if available */
+ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
+ u32 id = readl(syscfg->base + (master == VEXPRESS_SITE_DB1 ?
+ SYS_PROCID0 : SYS_PROCID1));
+ u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
-static int __init vexpress_config_init(void)
-{
- int err = 0;
- struct device_node *node;
+ if (WARN_ON(dt_hbi != hbi))
+ dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n",
+ dt_hbi, hbi);
+ }
- /* Need the config devices early, before the "normal" devices... */
for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
- err = vexpress_config_populate(node);
- if (err) {
- of_node_put(node);
- break;
- }
+ struct device_node *bridge_np;
+
+ bridge_np = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ if (bridge_np != pdev->dev.parent->of_node)
+ continue;
+
+ of_platform_populate(node, NULL, NULL, &pdev->dev);
}
- return err;
+ return 0;
}
-postcore_initcall(vexpress_config_init);
+static const struct platform_device_id vexpress_syscfg_id_table[] = {
+ { "vexpress-syscfg", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, vexpress_syscfg_id_table);
+
+static struct platform_driver vexpress_syscfg_driver = {
+ .driver.name = "vexpress-syscfg",
+ .id_table = vexpress_syscfg_id_table,
+ .probe = vexpress_syscfg_probe,
+};
+module_platform_driver(vexpress_syscfg_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index faca0f346fff..d82b3b7658bd 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -586,7 +586,7 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
return 0;
}
-int register_cdrom(struct cdrom_device_info *cdi)
+int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
{
static char banner_printed;
const struct cdrom_device_ops *cdo = cdi->ops;
@@ -601,6 +601,9 @@ int register_cdrom(struct cdrom_device_info *cdi)
cdrom_sysctl_register();
}
+ cdi->disk = disk;
+ disk->cdi = cdi;
+
ENSURE(cdo, drive_status, CDC_DRIVE_STATUS);
if (cdo->check_events == NULL && cdo->media_changed == NULL)
WARN_ON_ONCE(cdo->capability & (CDC_MEDIA_CHANGED | CDC_SELECT_DISC));
@@ -2292,37 +2295,46 @@ retry:
return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);
}
-static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi,
- void __user *argp)
+int cdrom_multisession(struct cdrom_device_info *cdi,
+ struct cdrom_multisession *info)
{
- struct cdrom_multisession ms_info;
u8 requested_format;
int ret;
- cd_dbg(CD_DO_IOCTL, "entering CDROMMULTISESSION\n");
-
if (!(cdi->ops->capability & CDC_MULTI_SESSION))
return -ENOSYS;
- if (copy_from_user(&ms_info, argp, sizeof(ms_info)))
- return -EFAULT;
-
- requested_format = ms_info.addr_format;
+ requested_format = info->addr_format;
if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
return -EINVAL;
- ms_info.addr_format = CDROM_LBA;
+ info->addr_format = CDROM_LBA;
- ret = cdi->ops->get_last_session(cdi, &ms_info);
- if (ret)
- return ret;
+ ret = cdi->ops->get_last_session(cdi, info);
+ if (!ret)
+ sanitize_format(&info->addr, &info->addr_format,
+ requested_format);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdrom_multisession);
- sanitize_format(&ms_info.addr, &ms_info.addr_format, requested_format);
+static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi,
+ void __user *argp)
+{
+ struct cdrom_multisession info;
+ int ret;
+
+ cd_dbg(CD_DO_IOCTL, "entering CDROMMULTISESSION\n");
- if (copy_to_user(argp, &ms_info, sizeof(ms_info)))
+ if (copy_from_user(&info, argp, sizeof(info)))
+ return -EFAULT;
+ ret = cdrom_multisession(cdi, &info);
+ if (ret)
+ return ret;
+ if (copy_to_user(argp, &info, sizeof(info)))
return -EFAULT;
cd_dbg(CD_DO_IOCTL, "CDROMMULTISESSION successful\n");
- return 0;
+ return ret;
}
static int cdrom_ioctl_eject(struct cdrom_device_info *cdi)
@@ -2663,32 +2675,37 @@ static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi,
return 0;
}
+int cdrom_read_tocentry(struct cdrom_device_info *cdi,
+ struct cdrom_tocentry *entry)
+{
+ u8 requested_format = entry->cdte_format;
+ int ret;
+
+ if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
+ return -EINVAL;
+
+ /* make interface to low-level uniform */
+ entry->cdte_format = CDROM_MSF;
+ ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, entry);
+ if (!ret)
+ sanitize_format(&entry->cdte_addr, &entry->cdte_format,
+ requested_format);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdrom_read_tocentry);
+
static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_tocentry entry;
- u8 requested_format;
int ret;
- /* cd_dbg(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */
-
if (copy_from_user(&entry, argp, sizeof(entry)))
return -EFAULT;
-
- requested_format = entry.cdte_format;
- if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
- return -EINVAL;
- /* make interface to low-level uniform */
- entry.cdte_format = CDROM_MSF;
- ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry);
- if (ret)
- return ret;
- sanitize_format(&entry.cdte_addr, &entry.cdte_format, requested_format);
-
- if (copy_to_user(argp, &entry, sizeof(entry)))
+ ret = cdrom_read_tocentry(cdi, &entry);
+ if (!ret && copy_to_user(argp, &entry, sizeof(entry)))
return -EFAULT;
- /* cd_dbg(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */
- return 0;
+ return ret;
}
static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi,
@@ -3518,7 +3535,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
}
static int cdrom_sysctl_info(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int pos;
char *info = cdrom_sysctl_settings.info;
@@ -3631,7 +3648,7 @@ static void cdrom_update_settings(void)
}
static int cdrom_sysctl_handler(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index c51292c2a131..09b0cd292720 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -770,7 +770,7 @@ static int probe_gdrom(struct platform_device *devptr)
goto probe_fail_no_disk;
}
probe_gdrom_setupdisk();
- if (register_cdrom(gd.cd_info)) {
+ if (register_cdrom(gd.disk, gd.cd_info)) {
err = -ENODEV;
goto probe_fail_cdrom_register;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d4665fe9ccd2..ac25833eb19e 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -209,7 +209,7 @@ config DTLK
config XILINX_HWICAP
tristate "Xilinx HWICAP Support"
- depends on XILINX_VIRTEX || MICROBLAZE
+ depends on MICROBLAZE
help
This option enables support for Xilinx Internal Configuration
Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 47098648502d..00ff5fcb808a 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -39,7 +39,6 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include "agp.h"
struct agp_front_data agp_fe;
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 9e84239f88d4..3ffbb1c80c5c 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -42,7 +42,6 @@
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
-#include <asm/pgtable.h>
#include "agp.h"
__u32 *agp_gatt_table;
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 66a62d17a3f5..4b34a5195c65 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -846,6 +846,7 @@ void intel_gtt_insert_page(dma_addr_t addr,
unsigned int flags)
{
intel_private.driver->write_entry(addr, pg, flags);
+ readl(intel_private.gtt + pg);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
@@ -871,7 +872,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
j++;
}
}
- wmb();
+ readl(intel_private.gtt + j - 1);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
@@ -1105,6 +1106,7 @@ static void i9xx_cleanup(void)
static void i9xx_chipset_flush(void)
{
+ wmb();
if (intel_private.i9xx_flush_page)
writel(1, intel_private.i9xx_flush_page);
}
@@ -1405,13 +1407,16 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
- mask = intel_private.driver->dma_mask_size;
- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
- dev_err(&intel_private.pcidev->dev,
- "set gfx device dma mask %d-bit failed!\n", mask);
- else
- pci_set_consistent_dma_mask(intel_private.pcidev,
- DMA_BIT_MASK(mask));
+ if (bridge) {
+ mask = intel_private.driver->dma_mask_size;
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask %d-bit failed!\n",
+ mask);
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(mask));
+ }
if (intel_gtt_init() != 0) {
intel_gmch_remove();
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index e5e5333f302d..cce2af5df7b4 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -17,7 +17,6 @@
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
/*
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 9bc46da8d77a..ac00d78ee9cc 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -474,6 +474,19 @@ config HW_RANDOM_KEYSTONE
help
This option enables Keystone's hardware random generator.
+config HW_RANDOM_CCTRNG
+ tristate "Arm CryptoCell True Random Number Generator support"
+ depends on HAS_IOMEM && OF
+ help
+ Say 'Y' to enable the True Random Number Generator driver for the
+ Arm TrustZone CryptoCell family of processors.
+ Currently the CryptoCell 713 and 703 are supported.
+ The driver is supported only in SoC where Trusted Execution
+ Environment is not used.
+ Choose 'M' to compile this driver as a module. The module
+ will be called cctrng.
+ If unsure, say 'N'.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index a7801b49ce6c..2c6724735345 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
+obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
new file mode 100644
index 000000000000..619148fb2dc9
--- /dev/null
+++ b/drivers/char/hw_random/cctrng.c
@@ -0,0 +1,735 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/workqueue.h>
+#include <linux/circ_buf.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <linux/fips.h>
+
+#include "cctrng.h"
+
+#define CC_REG_LOW(name) (name ## _BIT_SHIFT)
+#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
+#define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
+
+#define CC_REG_FLD_GET(reg_name, fld_name, reg_val) \
+ (FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val))
+
+#define CC_HW_RESET_LOOP_COUNT 10
+#define CC_TRNG_SUSPEND_TIMEOUT 3000
+
+/* data circular buffer in words must be:
+ * - of a power-of-2 size (limitation of circ_buf.h macros)
+ * - at least 6, the size generated in the EHR according to HW implementation
+ */
+#define CCTRNG_DATA_BUF_WORDS 32
+
+/* The timeout for the TRNG operation should be calculated with the formula:
+ * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE
+ * while:
+ * - SAMPLE_CNT is input value from the characterisation process
+ * - all the rest are constants
+ */
+#define EHR_NUM 1
+#define VN_COEFF 4
+#define EHR_LENGTH CC_TRNG_EHR_IN_BITS
+#define SCALE_VALUE 2
+#define CCTRNG_TIMEOUT(smpl_cnt) \
+ (EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE)
+
+struct cctrng_drvdata {
+ struct platform_device *pdev;
+ void __iomem *cc_base;
+ struct clk *clk;
+ struct hwrng rng;
+ u32 active_rosc;
+ /* Sampling interval for each ring oscillator:
+ * count of ring oscillator cycles between consecutive bits sampling.
+ * Value of 0 indicates non-valid rosc
+ */
+ u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS];
+
+ u32 data_buf[CCTRNG_DATA_BUF_WORDS];
+ struct circ_buf circ;
+ struct work_struct compwork;
+ struct work_struct startwork;
+
+ /* pending_hw - 1 when HW is pending, 0 when it is idle */
+ atomic_t pending_hw;
+
+ /* protects against multiple concurrent consumers of data_buf */
+ spinlock_t read_lock;
+};
+
+
+/* functions for write/read CC registers */
+static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val)
+{
+ iowrite32(val, (drvdata->cc_base + reg));
+}
+static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg)
+{
+ return ioread32(drvdata->cc_base + reg);
+}
+
+
+static int cc_trng_pm_get(struct device *dev)
+{
+ int rc = 0;
+
+ rc = pm_runtime_get_sync(dev);
+
+ /* pm_runtime_get_sync() can return 1 as a valid return code */
+ return (rc == 1 ? 0 : rc);
+}
+
+static void cc_trng_pm_put_suspend(struct device *dev)
+{
+ int rc = 0;
+
+ pm_runtime_mark_last_busy(dev);
+ rc = pm_runtime_put_autosuspend(dev);
+ if (rc)
+ dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
+}
+
+static int cc_trng_pm_init(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ /* must be before the enabling to avoid redundant suspending */
+ pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ /* set us as active - note we won't do PM ops until cc_trng_pm_go()! */
+ return pm_runtime_set_active(dev);
+}
+
+static void cc_trng_pm_go(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ /* enable the PM module*/
+ pm_runtime_enable(dev);
+}
+
+static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ pm_runtime_disable(dev);
+}
+
+
+static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+ struct device_node *np = drvdata->pdev->dev.of_node;
+ int rc;
+ int i;
+ /* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */
+ int ret = -EINVAL;
+
+ rc = of_property_read_u32_array(np, "arm,rosc-ratio",
+ drvdata->smpl_ratio,
+ CC_TRNG_NUM_OF_ROSCS);
+ if (rc) {
+ /* arm,rosc-ratio was not found in device tree */
+ return rc;
+ }
+
+ /* verify that at least one rosc has (sampling ratio > 0) */
+ for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) {
+ dev_dbg(dev, "rosc %d sampling ratio %u",
+ i, drvdata->smpl_ratio[i]);
+
+ if (drvdata->smpl_ratio[i] > 0)
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata)
+{
+ struct device *dev = &(drvdata->pdev->dev);
+
+ dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc);
+ drvdata->active_rosc += 1;
+
+ while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) {
+ if (drvdata->smpl_ratio[drvdata->active_rosc] > 0)
+ return 0;
+
+ drvdata->active_rosc += 1;
+ }
+ return -EINVAL;
+}
+
+
+static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata)
+{
+ u32 max_cycles;
+
+ /* Set watchdog threshold to maximal allowed time (in CPU cycles) */
+ max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]);
+ cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles);
+
+ /* enable the RND source */
+ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1);
+
+ /* unmask RNG interrupts */
+ cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK);
+}
+
+
+/* increase circular data buffer index (head/tail) */
+static inline void circ_idx_inc(int *idx, int bytes)
+{
+ *idx += (bytes + 3) >> 2;
+ *idx &= (CCTRNG_DATA_BUF_WORDS - 1);
+}
+
+static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata)
+{
+ return CIRC_SPACE(drvdata->circ.head,
+ drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
+
+}
+
+static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ /* current implementation ignores "wait" */
+
+ struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv;
+ struct device *dev = &(drvdata->pdev->dev);
+ u32 *buf = (u32 *)drvdata->circ.buf;
+ size_t copied = 0;
+ size_t cnt_w;
+ size_t size;
+ size_t left;
+
+ if (!spin_trylock(&drvdata->read_lock)) {
+ /* concurrent consumers from data_buf cannot be served */
+ dev_dbg_ratelimited(dev, "unable to hold lock\n");
+ return 0;
+ }
+
+ /* copy till end of data buffer (without wrap back) */
+ cnt_w = CIRC_CNT_TO_END(drvdata->circ.head,
+ drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
+ size = min((cnt_w<<2), max);
+ memcpy(data, &(buf[drvdata->circ.tail]), size);
+ copied = size;
+ circ_idx_inc(&drvdata->circ.tail, size);
+ /* copy rest of data in data buffer */
+ left = max - copied;
+ if (left > 0) {
+ cnt_w = CIRC_CNT(drvdata->circ.head,
+ drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
+ size = min((cnt_w<<2), left);
+ memcpy(data, &(buf[drvdata->circ.tail]), size);
+ copied += size;
+ circ_idx_inc(&drvdata->circ.tail, size);
+ }
+
+ spin_unlock(&drvdata->read_lock);
+
+ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
+ if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
+ /* re-check space in buffer to avoid potential race */
+ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
+ /* increment device's usage counter */
+ int rc = cc_trng_pm_get(dev);
+
+ if (rc) {
+ dev_err(dev,
+ "cc_trng_pm_get returned %x\n",
+ rc);
+ return rc;
+ }
+
+ /* schedule execution of deferred work handler
+ * for filling of data buffer
+ */
+ schedule_work(&drvdata->startwork);
+ } else {
+ atomic_set(&drvdata->pending_hw, 0);
+ }
+ }
+ }
+
+ return copied;
+}
+
+static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata)
+{
+ u32 tmp_smpl_cnt = 0;
+ struct device *dev = &(drvdata->pdev->dev);
+
+ dev_dbg(dev, "cctrng hw trigger.\n");
+
+ /* enable the HW RND clock */
+ cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
+
+ /* do software reset */
+ cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1);
+ /* in order to verify that the reset has completed,
+ * the sample count need to be verified
+ */
+ do {
+ /* enable the HW RND clock */
+ cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
+
+ /* set sampling ratio (rng_clocks) between consecutive bits */
+ cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET,
+ drvdata->smpl_ratio[drvdata->active_rosc]);
+
+ /* read the sampling ratio */
+ tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET);
+
+ } while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]);
+
+ /* disable the RND source for setting new parameters in HW */
+ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
+
+ cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF);
+
+ cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc);
+
+ /* Debug Control register: set to 0 - no bypasses */
+ cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0);
+
+ cc_trng_enable_rnd_source(drvdata);
+}
+
+static void cc_trng_compwork_handler(struct work_struct *w)
+{
+ u32 isr = 0;
+ u32 ehr_valid = 0;
+ struct cctrng_drvdata *drvdata =
+ container_of(w, struct cctrng_drvdata, compwork);
+ struct device *dev = &(drvdata->pdev->dev);
+ int i;
+
+ /* stop DMA and the RNG source */
+ cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0);
+ cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
+
+ /* read RNG_ISR and check for errors */
+ isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET);
+ ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr);
+ dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid);
+
+ if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) {
+ fips_fail_notify();
+ /* FIPS error is fatal */
+ panic("Got HW CRNGT error while fips is enabled!\n");
+ }
+
+ /* Clear all pending RNG interrupts */
+ cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr);
+
+
+ if (!ehr_valid) {
+ /* in case of AUTOCORR/TIMEOUT error, try the next ROSC */
+ if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) ||
+ CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) {
+ dev_dbg(dev, "cctrng autocorr/timeout error.\n");
+ goto next_rosc;
+ }
+
+ /* in case of VN error, ignore it */
+ }
+
+ /* read EHR data from registers */
+ for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) {
+ /* calc word ptr in data_buf */
+ u32 *buf = (u32 *)drvdata->circ.buf;
+
+ buf[drvdata->circ.head] = cc_ioread(drvdata,
+ CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32)));
+
+ /* EHR_DATA registers are cleared on read. In case 0 value was
+ * returned, restart the entropy collection.
+ */
+ if (buf[drvdata->circ.head] == 0) {
+ dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n",
+ drvdata->active_rosc);
+ goto next_rosc;
+ }
+
+ circ_idx_inc(&drvdata->circ.head, 1<<2);
+ }
+
+ atomic_set(&drvdata->pending_hw, 0);
+
+ /* continue to fill data buffer if needed */
+ if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
+ if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
+ /* Re-enable rnd source */
+ cc_trng_enable_rnd_source(drvdata);
+ return;
+ }
+ }
+
+ cc_trng_pm_put_suspend(dev);
+
+ dev_dbg(dev, "compwork handler done\n");
+ return;
+
+next_rosc:
+ if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) &&
+ (cc_trng_change_rosc(drvdata) == 0)) {
+ /* trigger trng hw with next rosc */
+ cc_trng_hw_trigger(drvdata);
+ } else {
+ atomic_set(&drvdata->pending_hw, 0);
+ cc_trng_pm_put_suspend(dev);
+ }
+}
+
+static irqreturn_t cc_isr(int irq, void *dev_id)
+{
+ struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id;
+ struct device *dev = &(drvdata->pdev->dev);
+ u32 irr;
+
+ /* if driver suspended return, probably shared interrupt */
+ if (pm_runtime_suspended(dev))
+ return IRQ_NONE;
+
+ /* read the interrupt status */
+ irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
+ dev_dbg(dev, "Got IRR=0x%08X\n", irr);
+
+ if (irr == 0) /* Probably shared interrupt line */
+ return IRQ_NONE;
+
+ /* clear interrupt - must be before processing events */
+ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr);
+
+ /* RNG interrupt - most probable */
+ if (irr & CC_HOST_RNG_IRQ_MASK) {
+ /* Mask RNG interrupts - will be unmasked in deferred work */
+ cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF);
+
+ /* We clear RNG interrupt here,
+ * to avoid it from firing as we'll unmask RNG interrupts.
+ */
+ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET,
+ CC_HOST_RNG_IRQ_MASK);
+
+ irr &= ~CC_HOST_RNG_IRQ_MASK;
+
+ /* schedule execution of deferred work handler */
+ schedule_work(&drvdata->compwork);
+ }
+
+ if (irr) {
+ dev_dbg_ratelimited(dev,
+ "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
+ /* Just warning */
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cc_trng_startwork_handler(struct work_struct *w)
+{
+ struct cctrng_drvdata *drvdata =
+ container_of(w, struct cctrng_drvdata, startwork);
+
+ drvdata->active_rosc = 0;
+ cc_trng_hw_trigger(drvdata);
+}
+
+
+static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
+{
+ struct clk *clk;
+ struct device *dev = &(drvdata->pdev->dev);
+ int rc = 0;
+
+ clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(dev, "Error getting clock: %pe\n", clk);
+ return PTR_ERR(clk);
+ }
+ drvdata->clk = clk;
+
+ rc = clk_prepare_enable(drvdata->clk);
+ if (rc) {
+ dev_err(dev, "Failed to enable clock\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
+{
+ clk_disable_unprepare(drvdata->clk);
+}
+
+
+static int cctrng_probe(struct platform_device *pdev)
+{
+ struct resource *req_mem_cc_regs = NULL;
+ struct cctrng_drvdata *drvdata;
+ struct device *dev = &pdev->dev;
+ int rc = 0;
+ u32 val;
+ int irq;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!drvdata->rng.name)
+ return -ENOMEM;
+
+ drvdata->rng.read = cctrng_read;
+ drvdata->rng.priv = (unsigned long)drvdata;
+ drvdata->rng.quality = CC_TRNG_QUALITY;
+
+ platform_set_drvdata(pdev, drvdata);
+ drvdata->pdev = pdev;
+
+ drvdata->circ.buf = (char *)drvdata->data_buf;
+
+ /* Get device resources */
+ /* First CC registers space */
+ req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* Map registers space */
+ drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ if (IS_ERR(drvdata->cc_base)) {
+ dev_err(dev, "Failed to ioremap registers");
+ return PTR_ERR(drvdata->cc_base);
+ }
+
+ dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
+ req_mem_cc_regs);
+ dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
+ &req_mem_cc_regs->start, drvdata->cc_base);
+
+ /* Then IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Failed getting IRQ resource\n");
+ return irq;
+ }
+
+ /* parse sampling rate from device tree */
+ rc = cc_trng_parse_sampling_ratio(drvdata);
+ if (rc) {
+ dev_err(dev, "Failed to get legal sampling ratio for rosc\n");
+ return rc;
+ }
+
+ rc = cc_trng_clk_init(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_trng_clk_init failed\n");
+ return rc;
+ }
+
+ INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
+ INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
+ spin_lock_init(&drvdata->read_lock);
+
+ /* register the driver isr function */
+ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
+ if (rc) {
+ dev_err(dev, "Could not register to interrupt %d\n", irq);
+ goto post_clk_err;
+ }
+ dev_dbg(dev, "Registered to IRQ: %d\n", irq);
+
+ /* Clear all pending interrupts */
+ val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
+ dev_dbg(dev, "IRR=0x%08X\n", val);
+ cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val);
+
+ /* unmask HOST RNG interrupt */
+ cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
+ cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
+ ~CC_HOST_RNG_IRQ_MASK);
+
+ /* init PM */
+ rc = cc_trng_pm_init(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_trng_pm_init failed\n");
+ goto post_clk_err;
+ }
+
+ /* increment device's usage counter */
+ rc = cc_trng_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "cc_trng_pm_get returned %x\n", rc);
+ goto post_pm_err;
+ }
+
+ /* set pending_hw to verify that HW won't be triggered from read */
+ atomic_set(&drvdata->pending_hw, 1);
+
+ /* registration of the hwrng device */
+ rc = hwrng_register(&drvdata->rng);
+ if (rc) {
+ dev_err(dev, "Could not register hwrng device.\n");
+ goto post_pm_err;
+ }
+
+ /* trigger HW to start generate data */
+ drvdata->active_rosc = 0;
+ cc_trng_hw_trigger(drvdata);
+
+ /* All set, we can allow auto-suspend */
+ cc_trng_pm_go(drvdata);
+
+ dev_info(dev, "ARM cctrng device initialized\n");
+
+ return 0;
+
+post_pm_err:
+ cc_trng_pm_fini(drvdata);
+
+post_clk_err:
+ cc_trng_clk_fini(drvdata);
+
+ return rc;
+}
+
+static int cctrng_remove(struct platform_device *pdev)
+{
+ struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "Releasing cctrng resources...\n");
+
+ hwrng_unregister(&drvdata->rng);
+
+ cc_trng_pm_fini(drvdata);
+
+ cc_trng_clk_fini(drvdata);
+
+ dev_info(dev, "ARM cctrng device terminated\n");
+
+ return 0;
+}
+
+static int __maybe_unused cctrng_suspend(struct device *dev)
+{
+ struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
+ POWER_DOWN_ENABLE);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ return 0;
+}
+
+static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata)
+{
+ unsigned int val;
+ unsigned int i;
+
+ for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
+ /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
+ * completed and device is fully functional
+ */
+ val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET);
+ if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) {
+ /* hw indicate reset completed */
+ return true;
+ }
+ /* allow scheduling other process on the processor */
+ schedule();
+ }
+ /* reset not completed */
+ return false;
+}
+
+static int __maybe_unused cctrng_resume(struct device *dev)
+{
+ struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
+ int rc;
+
+ dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
+ /* Enables the device source clk */
+ rc = clk_prepare_enable(drvdata->clk);
+ if (rc) {
+ dev_err(dev, "failed getting clock back on. We're toast.\n");
+ return rc;
+ }
+
+ /* wait for Cryptocell reset completion */
+ if (!cctrng_wait_for_reset_completion(drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
+ return -EBUSY;
+ }
+
+ /* unmask HOST RNG interrupt */
+ cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
+ cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
+ ~CC_HOST_RNG_IRQ_MASK);
+
+ cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
+ POWER_DOWN_DISABLE);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL);
+
+static const struct of_device_id arm_cctrng_dt_match[] = {
+ { .compatible = "arm,cryptocell-713-trng", },
+ { .compatible = "arm,cryptocell-703-trng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match);
+
+static struct platform_driver cctrng_driver = {
+ .driver = {
+ .name = "cctrng",
+ .of_match_table = arm_cctrng_dt_match,
+ .pm = &cctrng_pm,
+ },
+ .probe = cctrng_probe,
+ .remove = cctrng_remove,
+};
+
+static int __init cctrng_mod_init(void)
+{
+ /* Compile time assertion checks */
+ BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
+ BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
+
+ return platform_driver_register(&cctrng_driver);
+}
+module_init(cctrng_mod_init);
+
+static void __exit cctrng_mod_exit(void)
+{
+ platform_driver_unregister(&cctrng_driver);
+}
+module_exit(cctrng_mod_exit);
+
+/* Module description */
+MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
+MODULE_AUTHOR("ARM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cctrng.h b/drivers/char/hw_random/cctrng.h
new file mode 100644
index 000000000000..1f2fde95adcb
--- /dev/null
+++ b/drivers/char/hw_random/cctrng.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
+
+#include <linux/bitops.h>
+
+#define POWER_DOWN_ENABLE 0x01
+#define POWER_DOWN_DISABLE 0x00
+
+/* hwrng quality: bits of true entropy per 1024 bits of input */
+#define CC_TRNG_QUALITY 1024
+
+/* CryptoCell TRNG HW definitions */
+#define CC_TRNG_NUM_OF_ROSCS 4
+/* The number of words generated in the entropy holding register (EHR)
+ * 6 words (192 bit) according to HW implementation
+ */
+#define CC_TRNG_EHR_IN_WORDS 6
+#define CC_TRNG_EHR_IN_BITS (CC_TRNG_EHR_IN_WORDS * BITS_PER_TYPE(u32))
+
+#define CC_HOST_RNG_IRQ_MASK BIT(CC_HOST_RGF_IRR_RNG_INT_BIT_SHIFT)
+
+/* RNG interrupt mask */
+#define CC_RNG_INT_MASK (BIT(CC_RNG_IMR_EHR_VALID_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_AUTOCORR_ERR_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_CRNGT_ERR_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_VN_ERR_INT_MASK_BIT_SHIFT) | \
+ BIT(CC_RNG_IMR_WATCHDOG_INT_MASK_BIT_SHIFT))
+
+// --------------------------------------
+// BLOCK: RNG
+// --------------------------------------
+#define CC_RNG_IMR_REG_OFFSET 0x0100UL
+#define CC_RNG_IMR_EHR_VALID_INT_MASK_BIT_SHIFT 0x0UL
+#define CC_RNG_IMR_AUTOCORR_ERR_INT_MASK_BIT_SHIFT 0x1UL
+#define CC_RNG_IMR_CRNGT_ERR_INT_MASK_BIT_SHIFT 0x2UL
+#define CC_RNG_IMR_VN_ERR_INT_MASK_BIT_SHIFT 0x3UL
+#define CC_RNG_IMR_WATCHDOG_INT_MASK_BIT_SHIFT 0x4UL
+#define CC_RNG_ISR_REG_OFFSET 0x0104UL
+#define CC_RNG_ISR_EHR_VALID_BIT_SHIFT 0x0UL
+#define CC_RNG_ISR_EHR_VALID_BIT_SIZE 0x1UL
+#define CC_RNG_ISR_AUTOCORR_ERR_BIT_SHIFT 0x1UL
+#define CC_RNG_ISR_AUTOCORR_ERR_BIT_SIZE 0x1UL
+#define CC_RNG_ISR_CRNGT_ERR_BIT_SHIFT 0x2UL
+#define CC_RNG_ISR_CRNGT_ERR_BIT_SIZE 0x1UL
+#define CC_RNG_ISR_WATCHDOG_BIT_SHIFT 0x4UL
+#define CC_RNG_ISR_WATCHDOG_BIT_SIZE 0x1UL
+#define CC_RNG_ICR_REG_OFFSET 0x0108UL
+#define CC_TRNG_CONFIG_REG_OFFSET 0x010CUL
+#define CC_EHR_DATA_0_REG_OFFSET 0x0114UL
+#define CC_RND_SOURCE_ENABLE_REG_OFFSET 0x012CUL
+#define CC_SAMPLE_CNT1_REG_OFFSET 0x0130UL
+#define CC_TRNG_DEBUG_CONTROL_REG_OFFSET 0x0138UL
+#define CC_RNG_SW_RESET_REG_OFFSET 0x0140UL
+#define CC_RNG_CLK_ENABLE_REG_OFFSET 0x01C4UL
+#define CC_RNG_DMA_ENABLE_REG_OFFSET 0x01C8UL
+#define CC_RNG_WATCHDOG_VAL_REG_OFFSET 0x01D8UL
+// --------------------------------------
+// BLOCK: SEC_HOST_RGF
+// --------------------------------------
+#define CC_HOST_RGF_IRR_REG_OFFSET 0x0A00UL
+#define CC_HOST_RGF_IRR_RNG_INT_BIT_SHIFT 0xAUL
+#define CC_HOST_RGF_IMR_REG_OFFSET 0x0A04UL
+#define CC_HOST_RGF_ICR_REG_OFFSET 0x0A08UL
+
+#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0x0A78UL
+
+// --------------------------------------
+// BLOCK: NVM
+// --------------------------------------
+#define CC_NVM_IS_IDLE_REG_OFFSET 0x0F10UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 65952393e1bb..7290c603fcb8 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -392,11 +392,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "%s: error getting IRQ resource - %d\n",
- __func__, irq);
+ if (irq < 0)
return irq;
- }
err = devm_request_irq(dev, irq, omap4_rng_irq,
IRQF_TRIGGER_NONE, dev_name(dev), priv);
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
index ddfbabaa5f8f..49b2e02537dd 100644
--- a/drivers/char/hw_random/optee-rng.c
+++ b/drivers/char/hw_random/optee-rng.c
@@ -226,7 +226,7 @@ static int optee_rng_probe(struct device *dev)
return -ENODEV;
/* Open session with hwrng Trusted App */
- memcpy(sess_arg.uuid, rng_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ export_uuid(sess_arg.uuid, &rng_device->id.uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index d7516a446987..008e6db9ce01 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -328,10 +328,8 @@ static int xgene_rng_probe(struct platform_device *pdev)
return PTR_ERR(ctx->csr_base);
rc = platform_get_irq(pdev, 0);
- if (rc < 0) {
- dev_err(&pdev->dev, "No IRQ resource\n");
+ if (rc < 0)
return rc;
- }
ctx->irq = rc;
dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 7dc2c3ec4051..07847d9a459a 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -14,7 +14,7 @@ menuconfig IPMI_HANDLER
IPMI is a standard for managing sensors (temperature,
voltage, etc.) in a system.
- See <file:Documentation/IPMI.txt> for more details on the driver.
+ See <file:Documentation/driver-api/ipmi.rst> for more details on the driver.
If unsure, say N.
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index d36aeacb290e..a395e2e70dc5 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -399,15 +399,15 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
struct device *dev = &pdev->dev;
int rc;
- bt_bmc->irq = platform_get_irq(pdev, 0);
- if (!bt_bmc->irq)
- return -ENODEV;
+ bt_bmc->irq = platform_get_irq_optional(pdev, 0);
+ if (bt_bmc->irq < 0)
+ return bt_bmc->irq;
rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
DEVICE_NAME, bt_bmc);
if (rc < 0) {
dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
- bt_bmc->irq = 0;
+ bt_bmc->irq = rc;
return rc;
}
@@ -430,9 +430,6 @@ static int bt_bmc_probe(struct platform_device *pdev)
struct device *dev;
int rc;
- if (!pdev || !pdev->dev.of_node)
- return -ENODEV;
-
dev = &pdev->dev;
dev_info(dev, "Found bt bmc device\n");
@@ -466,9 +463,9 @@ static int bt_bmc_probe(struct platform_device *pdev)
init_waitqueue_head(&bt_bmc->queue);
bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR,
- bt_bmc->miscdev.name = DEVICE_NAME,
- bt_bmc->miscdev.fops = &bt_bmc_fops,
- bt_bmc->miscdev.parent = dev;
+ bt_bmc->miscdev.name = DEVICE_NAME,
+ bt_bmc->miscdev.fops = &bt_bmc_fops,
+ bt_bmc->miscdev.parent = dev;
rc = misc_register(&bt_bmc->miscdev);
if (rc) {
dev_err(dev, "Unable to register misc device\n");
@@ -477,7 +474,7 @@ static int bt_bmc_probe(struct platform_device *pdev)
bt_bmc_config_irq(bt_bmc, pdev);
- if (bt_bmc->irq) {
+ if (bt_bmc->irq >= 0) {
dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
} else {
dev_info(dev, "No IRQ; using timer\n");
@@ -503,7 +500,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
misc_deregister(&bt_bmc->miscdev);
- if (!bt_bmc->irq)
+ if (bt_bmc->irq < 0)
del_timer_sync(&bt_bmc->poll_timer);
return 0;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c48d8f086382..e1b22fe0916c 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -33,6 +33,7 @@
#include <linux/workqueue.h>
#include <linux/uuid.h>
#include <linux/nospec.h>
+#include <linux/vmalloc.h>
#define IPMI_DRIVER_VERSION "39.2"
@@ -1153,7 +1154,7 @@ static void free_user_work(struct work_struct *work)
remove_work);
cleanup_srcu_struct(&user->release_barrier);
- kfree(user);
+ vfree(user);
}
int ipmi_create_user(unsigned int if_num,
@@ -1185,7 +1186,7 @@ int ipmi_create_user(unsigned int if_num,
if (rv)
return rv;
- new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
+ new_user = vzalloc(sizeof(*new_user));
if (!new_user)
return -ENOMEM;
@@ -1232,7 +1233,7 @@ int ipmi_create_user(unsigned int if_num,
out_kfree:
srcu_read_unlock(&ipmi_interfaces_srcu, index);
- kfree(new_user);
+ vfree(new_user);
return rv;
}
EXPORT_SYMBOL(ipmi_create_user);
@@ -3171,7 +3172,7 @@ static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
goto out;
}
- guid_copy(&bmc->fetch_guid, (guid_t *)(msg->msg.data + 1));
+ import_guid(&bmc->fetch_guid, msg->msg.data + 1);
/*
* Make sure the guid data is available before setting
* dyn_guid_set.
diff --git a/drivers/char/ipmi/ipmi_si_hotmod.c b/drivers/char/ipmi/ipmi_si_hotmod.c
index 42a925f8cf69..4fbb4e18bae2 100644
--- a/drivers/char/ipmi/ipmi_si_hotmod.c
+++ b/drivers/char/ipmi/ipmi_si_hotmod.c
@@ -18,7 +18,7 @@ static int hotmod_handler(const char *val, const struct kernel_param *kp);
module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
- " Documentation/IPMI.txt in the kernel sources for the"
+ " Documentation/driver-api/ipmi.rst in the kernel sources for the"
" gory details.");
/*
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index c7cc8538b84a..77b8d551ae7f 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -968,7 +968,7 @@ static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
* that are not BT and do not have interrupts. It starts spinning
* when an operation is complete or until max_busy tells it to stop
* (if that is enabled). See the paragraph on kimid_max_busy_us in
- * Documentation/IPMI.txt for details.
+ * Documentation/driver-api/ipmi.rst for details.
*/
static int ipmi_thread(void *data)
{
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 638c693e17ad..129b5713f187 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -393,6 +393,8 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
dev_info(io.dev, "%pR regsize %d spacing %d irq %d\n",
res, io.regsize, io.regspacing, io.irq);
+ request_module("acpi_ipmi");
+
return ipmi_si_add_smi(&io);
err_free:
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 2704470e021d..198b65d45c5e 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -189,8 +189,6 @@ struct ssif_addr_info {
struct device *dev;
struct i2c_client *client;
- struct i2c_client *added_client;
-
struct mutex clients_mutex;
struct list_head clients;
@@ -1472,6 +1470,7 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
if (acpi_handle) {
ssif_info->addr_source = SI_ACPI;
ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle;
+ request_module("acpi_ipmi");
return true;
}
#endif
@@ -1940,21 +1939,6 @@ out_remove_attr:
goto out;
}
-static int ssif_adapter_handler(struct device *adev, void *opaque)
-{
- struct ssif_addr_info *addr_info = opaque;
-
- if (adev->type != &i2c_adapter_type)
- return 0;
-
- addr_info->added_client = i2c_new_client_device(to_i2c_adapter(adev),
- &addr_info->binfo);
-
- if (!addr_info->adapter_name)
- return 1; /* Only try the first I2C adapter by default. */
- return 0;
-}
-
static int new_ssif_client(int addr, char *adapter_name,
int debug, int slave_addr,
enum ipmi_addr_src addr_src,
@@ -1998,9 +1982,7 @@ static int new_ssif_client(int addr, char *adapter_name,
list_add_tail(&addr_info->link, &ssif_infos);
- if (initialized)
- i2c_for_each_dev(addr_info, ssif_adapter_handler);
- /* Otherwise address list will get it */
+ /* Address list will get it */
out_unlock:
mutex_unlock(&ssif_infos_mutex);
@@ -2120,8 +2102,6 @@ static int ssif_platform_remove(struct platform_device *dev)
return 0;
mutex_lock(&ssif_infos_mutex);
- i2c_unregister_device(addr_info->added_client);
-
list_del(&addr_info->link);
kfree(addr_info);
mutex_unlock(&ssif_infos_mutex);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 43dd0891ca1e..31cae88a730b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -31,11 +31,15 @@
#include <linux/uio.h>
#include <linux/uaccess.h>
#include <linux/security.h>
+#include <linux/pseudo_fs.h>
+#include <uapi/linux/magic.h>
+#include <linux/mount.h>
#ifdef CONFIG_IA64
# include <linux/efi.h>
#endif
+#define DEVMEM_MINOR 1
#define DEVPORT_MINOR 4
static inline unsigned long size_inside_page(unsigned long start,
@@ -805,12 +809,64 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
+static struct inode *devmem_inode;
+
+#ifdef CONFIG_IO_STRICT_DEVMEM
+void revoke_devmem(struct resource *res)
+{
+ struct inode *inode = READ_ONCE(devmem_inode);
+
+ /*
+ * Check that the initialization has completed. Losing the race
+ * is ok because it means drivers are claiming resources before
+ * the fs_initcall level of init and prevent /dev/mem from
+ * establishing mappings.
+ */
+ if (!inode)
+ return;
+
+ /*
+ * The expectation is that the driver has successfully marked
+ * the resource busy by this point, so devmem_is_allowed()
+ * should start returning false, however for performance this
+ * does not iterate the entire resource range.
+ */
+ if (devmem_is_allowed(PHYS_PFN(res->start)) &&
+ devmem_is_allowed(PHYS_PFN(res->end))) {
+ /*
+ * *cringe* iomem=relaxed says "go ahead, what's the
+ * worst that can happen?"
+ */
+ return;
+ }
+
+ unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
+}
+#endif
+
static int open_port(struct inode *inode, struct file *filp)
{
+ int rc;
+
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- return security_locked_down(LOCKDOWN_DEV_MEM);
+ rc = security_locked_down(LOCKDOWN_DEV_MEM);
+ if (rc)
+ return rc;
+
+ if (iminor(inode) != DEVMEM_MINOR)
+ return 0;
+
+ /*
+ * Use a unified address space to have a single point to manage
+ * revocations when drivers want to take over a /dev/mem mapped
+ * range.
+ */
+ inode->i_mapping = devmem_inode->i_mapping;
+ filp->f_mapping = inode->i_mapping;
+
+ return 0;
}
#define zero_lseek null_lseek
@@ -885,7 +941,7 @@ static const struct memdev {
fmode_t fmode;
} devlist[] = {
#ifdef CONFIG_DEVMEM
- [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
+ [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
#endif
#ifdef CONFIG_DEVKMEM
[2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
@@ -939,6 +995,45 @@ static char *mem_devnode(struct device *dev, umode_t *mode)
static struct class *mem_class;
+static int devmem_fs_init_fs_context(struct fs_context *fc)
+{
+ return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
+}
+
+static struct file_system_type devmem_fs_type = {
+ .name = "devmem",
+ .owner = THIS_MODULE,
+ .init_fs_context = devmem_fs_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+static int devmem_init_inode(void)
+{
+ static struct vfsmount *devmem_vfs_mount;
+ static int devmem_fs_cnt;
+ struct inode *inode;
+ int rc;
+
+ rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
+ if (rc < 0) {
+ pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
+ return rc;
+ }
+
+ inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
+ if (IS_ERR(inode)) {
+ rc = PTR_ERR(inode);
+ pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
+ simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
+ return rc;
+ }
+
+ /* publish /dev/mem initialized */
+ WRITE_ONCE(devmem_inode, inode);
+
+ return 0;
+}
+
static int __init chr_dev_init(void)
{
int minor;
@@ -960,6 +1055,8 @@ static int __init chr_dev_init(void)
*/
if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
continue;
+ if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
+ continue;
device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
NULL, devlist[minor].name);
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 7d583222e8fa..0fae33319d2e 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -39,7 +39,6 @@
#include <linux/numa.h>
#include <linux/refcount.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <linux/atomic.h>
#include <asm/tlbflush.h>
#include <asm/uncached.h>
@@ -65,7 +64,7 @@ enum mspec_page_type {
* This structure is shared by all vma's that are split off from the
* original vma when split_vma()'s are done.
*
- * The refcnt is incremented atomically because mm->mmap_sem does not
+ * The refcnt is incremented atomically because mm->mmap_lock does not
* protect in fork case where multiple tasks share the vma_data.
*/
struct vma_data {
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 4667844eee69..8206412d25ba 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -232,8 +232,6 @@ static ssize_t nvram_misc_read(struct file *file, char __user *buf,
ssize_t ret;
- if (!access_ok(buf, count))
- return -EFAULT;
if (*ppos >= nvram_size)
return 0;
@@ -264,8 +262,6 @@ static ssize_t nvram_misc_write(struct file *file, const char __user *buf,
char *tmp;
ssize_t ret;
- if (!access_ok(buf, count))
- return -EFAULT;
if (*ppos >= nvram_size)
return 0;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 4edb4174a1e2..89681f07bc78 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1404,7 +1404,6 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
unsigned int iobase = dev->p_dev->resource[0]->start;
struct inode *inode = file_inode(filp);
struct pcmcia_device *link;
- int size;
int rc;
void __user *argp = (void __user *)arg;
#ifdef CM4000_DEBUG
@@ -1441,19 +1440,6 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
DEBUGP(4, dev, "iocnr mismatch\n");
goto out;
}
- size = _IOC_SIZE(cmd);
- rc = -EFAULT;
- DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n",
- _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd);
-
- if (_IOC_DIR(cmd) & _IOC_READ) {
- if (!access_ok(argp, size))
- goto out;
- }
- if (_IOC_DIR(cmd) & _IOC_WRITE) {
- if (!access_ok(argp, size))
- goto out;
- }
rc = 0;
switch (cmd) {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0d10e31fd342..2a41b21623ae 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -327,7 +327,6 @@
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
-#include <linux/cryptohash.h>
#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
@@ -337,6 +336,7 @@
#include <linux/completion.h>
#include <linux/uuid.h>
#include <crypto/chacha.h>
+#include <crypto/sha.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
@@ -1397,14 +1397,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
__u32 w[5];
unsigned long l[LONGS(20)];
} hash;
- __u32 workspace[SHA_WORKSPACE_WORDS];
+ __u32 workspace[SHA1_WORKSPACE_WORDS];
unsigned long flags;
/*
* If we have an architectural hardware random number
* generator, use it for SHA's initial vector
*/
- sha_init(hash.w);
+ sha1_init(hash.w);
for (i = 0; i < LONGS(20); i++) {
unsigned long v;
if (!arch_get_random_long(&v))
@@ -1415,7 +1415,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
/* Generate a hash across the pool, 16 words (512 bits) at a time */
spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+ sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
/*
* We mix the hash back into the pool to prevent backtracking
@@ -2057,7 +2057,7 @@ static char sysctl_bootid[16];
* sysctl system call, as 16 bytes of binary data.
*/
static int proc_do_uuid(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid;
@@ -2087,7 +2087,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
* Return entropy available scaled to integral bits
*/
static int proc_do_entropy(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
int entropy_count;
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 6d81bb3bb503..896a3550fba9 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -777,17 +777,21 @@ static int __init tlclk_init(void)
{
int ret;
+ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
+
+ alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
+ if (!alarm_events) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
if (ret < 0) {
printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
+ kfree(alarm_events);
return ret;
}
tlclk_major = ret;
- alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
- if (!alarm_events) {
- ret = -ENOMEM;
- goto out1;
- }
/* Read telecom clock IRQ number (Set by BIOS) */
if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
@@ -796,7 +800,6 @@ static int __init tlclk_init(void)
ret = -EBUSY;
goto out2;
}
- telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
@@ -837,8 +840,8 @@ out3:
release_region(TLCLK_BASE, 8);
out2:
kfree(alarm_events);
-out1:
unregister_chrdev(tlclk_major, "telco_clock");
+out1:
return ret;
}
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
index e741b1157525..37a05800980c 100644
--- a/drivers/char/tpm/eventlog/tpm2.c
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -51,8 +51,7 @@ static void *tpm2_bios_measurements_start(struct seq_file *m, loff_t *pos)
int i;
event_header = addr;
- size = sizeof(struct tcg_pcr_event) - sizeof(event_header->event)
- + event_header->event_size;
+ size = struct_size(event_header, event, event_header->event_size);
if (*pos == 0) {
if (addr + size < limit) {
@@ -98,8 +97,8 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
event_header = log->bios_event_log;
if (v == SEQ_START_TOKEN) {
- event_size = sizeof(struct tcg_pcr_event) -
- sizeof(event_header->event) + event_header->event_size;
+ event_size = struct_size(event_header, event,
+ event_header->event_size);
marker = event_header;
} else {
event = v;
@@ -136,9 +135,8 @@ static int tpm2_binary_bios_measurements_show(struct seq_file *m, void *v)
size_t size;
if (v == SEQ_START_TOKEN) {
- size = sizeof(struct tcg_pcr_event) -
- sizeof(event_header->event) + event_header->event_size;
-
+ size = struct_size(event_header, event,
+ event_header->event_size);
temp_ptr = event_header;
if (size > 0)
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 22bf553ccf9d..2491a2cb54a2 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -241,7 +241,7 @@ static int ftpm_tee_probe(struct platform_device *pdev)
/* Open a session with fTPM TA */
memset(&sess_arg, 0, sizeof(sess_arg));
- memcpy(sess_arg.uuid, ftpm_ta_uuid.b, TEE_IOCTL_UUID_LEN);
+ export_uuid(sess_arg.uuid, &ftpm_ta_uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 3cbaec925606..00c5e3acee46 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -871,7 +871,7 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
return 0;
/* Try lock this page */
- if (pipe_buf_steal(pipe, buf) == 0) {
+ if (pipe_buf_try_steal(pipe, buf)) {
/* Get reference and unlock page for moving */
get_page(buf->page);
unlock_page(buf->page);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index bcb257baed06..8f50a1caecba 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -1,5 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
+config HAVE_CLK
+ bool
+ help
+ The <linux/clk.h> calls support software clock gating and
+ thus are a key power management tool on many systems.
+
config CLKDEV_LOOKUP
bool
select HAVE_CLK
@@ -7,8 +13,18 @@ config CLKDEV_LOOKUP
config HAVE_CLK_PREPARE
bool
-config COMMON_CLK
+config HAVE_LEGACY_CLK # TODO: Remove once all legacy users are migrated
bool
+ select HAVE_CLK
+ help
+ Select this option when the clock API in <linux/clk.h> is implemented
+ by platform/architecture code. This method is deprecated. Modern
+ code should select COMMON_CLK instead and not define a custom
+ 'struct clk'.
+
+menuconfig COMMON_CLK
+ bool "Common Clock Framework"
+ depends on !HAVE_LEGACY_CLK
select HAVE_CLK_PREPARE
select CLKDEV_LOOKUP
select SRCU
@@ -20,8 +36,7 @@ config COMMON_CLK
Architectures utilizing the common struct clk should select
this option.
-menu "Common Clock Framework"
- depends on COMMON_CLK
+if COMMON_CLK
config COMMON_CLK_WM831X
tristate "Clock driver for WM831x/2x PMICs"
@@ -252,7 +267,7 @@ config COMMON_CLK_XGENE
default ARCH_XGENE
depends on ARM64 || COMPILE_TEST
---help---
- Sypport for the APM X-Gene SoC reference, PLL, and device clocks.
+ Support for the APM X-Gene SoC reference, PLL, and device clocks.
config COMMON_CLK_LOCHNAGAR
tristate "Cirrus Logic Lochnagar clock driver"
@@ -326,6 +341,12 @@ config COMMON_CLK_MMP2
help
Support for Marvell MMP2 and MMP3 SoC clocks
+config COMMON_CLK_MMP2_AUDIO
+ tristate "Clock driver for MMP2 Audio subsystem"
+ depends on COMMON_CLK_MMP2 || COMPILE_TEST
+ help
+ This driver supports clocks for Audio subsystem on MMP2 SoC.
+
config COMMON_CLK_BD718XX
tristate "Clock driver for 32K clk gates on ROHM PMICs"
depends on MFD_ROHM_BD718XX || MFD_ROHM_BD70528 || MFD_ROHM_BD71828
@@ -341,6 +362,7 @@ config COMMON_CLK_FIXED_MMIO
source "drivers/clk/actions/Kconfig"
source "drivers/clk/analogbits/Kconfig"
+source "drivers/clk/baikal-t1/Kconfig"
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
source "drivers/clk/imgtec/Kconfig"
@@ -360,6 +382,7 @@ source "drivers/clk/sunxi-ng/Kconfig"
source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"
source "drivers/clk/uniphier/Kconfig"
+source "drivers/clk/x86/Kconfig"
source "drivers/clk/zynqmp/Kconfig"
-endmenu
+endif
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index f4169cc2fd31..ca9af11d3391 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -75,6 +75,7 @@ obj-y += analogbits/
obj-$(CONFIG_COMMON_CLK_AT91) += at91/
obj-$(CONFIG_ARCH_ARTPEC) += axis/
obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/
+obj-$(CONFIG_CLK_BAIKAL_T1) += baikal-t1/
obj-y += bcm/
obj-$(CONFIG_ARCH_BERLIN) += berlin/
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
@@ -104,17 +105,18 @@ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
+obj-$(CONFIG_ARCH_AGILEX) += socfpga/
+obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
-obj-$(CONFIG_ARCH_SPRD) += sprd/
+obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
-obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_U8500) += ux500/
-obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
+obj-y += versatile/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_X86) += x86/
endif
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
index c44a431b6c97..38bdb4981315 100644
--- a/drivers/clk/at91/at91rm9200.c
+++ b/drivers/clk/at91/at91rm9200.c
@@ -98,9 +98,9 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- at91rm9200_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ at91rm9200_pmc = pmc_data_allocate(PMC_PLLBCK + 1,
nck(at91rm9200_systemck),
- nck(at91rm9200_periphck), 0);
+ nck(at91rm9200_periphck), 0, 4);
if (!at91rm9200_pmc)
return;
@@ -123,12 +123,16 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ at91rm9200_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1,
&at91rm9200_pll_layout,
&rm9200_pll_characteristics);
if (IS_ERR(hw))
goto err_free;
+ at91rm9200_pmc->chws[PMC_PLLBCK] = hw;
+
parent_names[0] = slowxtal_name;
parent_names[1] = "mainck";
parent_names[2] = "pllack";
@@ -159,6 +163,8 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
&at91rm9200_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ at91rm9200_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(at91rm9200_systemck); i++) {
@@ -187,7 +193,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(at91rm9200_pmc);
+ kfree(at91rm9200_pmc);
}
/*
* While the TCB can be used as the clocksource, the system timer is most likely
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index a9d4234758d7..6d0723aa8b13 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -352,9 +352,10 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
if (IS_ERR(regmap))
return;
- at91sam9260_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ at91sam9260_pmc = pmc_data_allocate(PMC_PLLBCK + 1,
ndck(data->sck, data->num_sck),
- ndck(data->pck, data->num_pck), 0);
+ ndck(data->pck, data->num_pck),
+ 0, data->num_progck);
if (!at91sam9260_pmc)
return;
@@ -398,12 +399,16 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
if (IS_ERR(hw))
goto err_free;
+ at91sam9260_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1,
data->pllb_layout,
data->pllb_characteristics);
if (IS_ERR(hw))
goto err_free;
+ at91sam9260_pmc->chws[PMC_PLLBCK] = hw;
+
parent_names[0] = slck_name;
parent_names[1] = "mainck";
parent_names[2] = "pllack";
@@ -434,6 +439,8 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
&at91rm9200_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ at91sam9260_pmc->pchws[i] = hw;
}
for (i = 0; i < data->num_sck; i++) {
@@ -462,7 +469,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
return;
err_free:
- pmc_data_free(at91sam9260_pmc);
+ kfree(at91sam9260_pmc);
}
static void __init at91sam9260_pmc_setup(struct device_node *np)
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
index 38a7d2d2df0c..9873b583c260 100644
--- a/drivers/clk/at91/at91sam9g45.c
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -115,9 +115,9 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- at91sam9g45_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ at91sam9g45_pmc = pmc_data_allocate(PMC_PLLACK + 1,
nck(at91sam9g45_systemck),
- nck(at91sam9g45_periphck), 0);
+ nck(at91sam9g45_periphck), 0, 2);
if (!at91sam9g45_pmc)
return;
@@ -143,6 +143,8 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ at91sam9g45_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
if (IS_ERR(hw))
goto err_free;
@@ -182,6 +184,8 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
&at91sam9g45_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ at91sam9g45_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(at91sam9g45_systemck); i++) {
@@ -210,7 +214,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(at91sam9g45_pmc);
+ kfree(at91sam9g45_pmc);
}
/*
* The TCB is used as the clocksource so its clock is needed early. This means
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
index 8bb39d2ba84b..630dc5d87171 100644
--- a/drivers/clk/at91/at91sam9n12.c
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -128,8 +128,8 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- at91sam9n12_pmc = pmc_data_allocate(PMC_MAIN + 1,
- nck(at91sam9n12_systemck), 31, 0);
+ at91sam9n12_pmc = pmc_data_allocate(PMC_PLLBCK + 1,
+ nck(at91sam9n12_systemck), 31, 0, 2);
if (!at91sam9n12_pmc)
return;
@@ -162,11 +162,15 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ at91sam9n12_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1,
&at91rm9200_pll_layout, &pllb_characteristics);
if (IS_ERR(hw))
goto err_free;
+ at91sam9n12_pmc->chws[PMC_PLLBCK] = hw;
+
parent_names[0] = slck_name;
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
@@ -198,6 +202,8 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
&at91sam9x5_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ at91sam9n12_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(at91sam9n12_systemck); i++) {
@@ -228,7 +234,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(at91sam9n12_pmc);
+ kfree(at91sam9n12_pmc);
}
/*
* The TCB is used as the clocksource so its clock is needed early. This means
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index 77fe83a73bf4..0d1cc44b056f 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -87,9 +87,9 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- at91sam9rl_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ at91sam9rl_pmc = pmc_data_allocate(PMC_PLLACK + 1,
nck(at91sam9rl_systemck),
- nck(at91sam9rl_periphck), 0);
+ nck(at91sam9rl_periphck), 0, 2);
if (!at91sam9rl_pmc)
return;
@@ -105,6 +105,8 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ at91sam9rl_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
if (IS_ERR(hw))
goto err_free;
@@ -138,6 +140,8 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
&at91rm9200_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ at91sam9rl_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(at91sam9rl_systemck); i++) {
@@ -166,6 +170,6 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(at91sam9rl_pmc);
+ kfree(at91sam9rl_pmc);
}
CLK_OF_DECLARE_DRIVER(at91sam9rl_pmc, "atmel,at91sam9rl-pmc", at91sam9rl_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 086cf0b4955c..0ce3da080287 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -150,8 +150,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
if (IS_ERR(regmap))
return;
- at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
- nck(at91sam9x5_systemck), 31, 0);
+ at91sam9x5_pmc = pmc_data_allocate(PMC_PLLACK + 1,
+ nck(at91sam9x5_systemck), 31, 0, 2);
if (!at91sam9x5_pmc)
return;
@@ -184,6 +184,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
if (IS_ERR(hw))
goto err_free;
+ at91sam9x5_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
if (IS_ERR(hw))
goto err_free;
@@ -227,6 +229,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
&at91sam9x5_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ at91sam9x5_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(at91sam9x5_systemck); i++) {
@@ -278,7 +282,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
return;
err_free:
- pmc_data_free(at91sam9x5_pmc);
+ kfree(at91sam9x5_pmc);
}
static void __init at91sam9g15_pmc_setup(struct device_node *np)
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index b71515acdec1..20ee9dccee78 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -67,6 +67,10 @@ struct clk_hw *of_clk_hw_pmc_get(struct of_phandle_args *clkspec, void *data)
if (idx < pmc_data->ngck)
return pmc_data->ghws[idx];
break;
+ case PMC_TYPE_PROGRAMMABLE:
+ if (idx < pmc_data->npck)
+ return pmc_data->pchws[idx];
+ break;
default:
break;
}
@@ -76,48 +80,34 @@ struct clk_hw *of_clk_hw_pmc_get(struct of_phandle_args *clkspec, void *data)
return ERR_PTR(-EINVAL);
}
-void pmc_data_free(struct pmc_data *pmc_data)
-{
- kfree(pmc_data->chws);
- kfree(pmc_data->shws);
- kfree(pmc_data->phws);
- kfree(pmc_data->ghws);
-}
-
struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
- unsigned int nperiph, unsigned int ngck)
+ unsigned int nperiph, unsigned int ngck,
+ unsigned int npck)
{
- struct pmc_data *pmc_data = kzalloc(sizeof(*pmc_data), GFP_KERNEL);
+ unsigned int num_clks = ncore + nsystem + nperiph + ngck + npck;
+ struct pmc_data *pmc_data;
+ pmc_data = kzalloc(struct_size(pmc_data, hwtable, num_clks),
+ GFP_KERNEL);
if (!pmc_data)
return NULL;
pmc_data->ncore = ncore;
- pmc_data->chws = kcalloc(ncore, sizeof(struct clk_hw *), GFP_KERNEL);
- if (!pmc_data->chws)
- goto err;
+ pmc_data->chws = pmc_data->hwtable;
pmc_data->nsystem = nsystem;
- pmc_data->shws = kcalloc(nsystem, sizeof(struct clk_hw *), GFP_KERNEL);
- if (!pmc_data->shws)
- goto err;
+ pmc_data->shws = pmc_data->chws + ncore;
pmc_data->nperiph = nperiph;
- pmc_data->phws = kcalloc(nperiph, sizeof(struct clk_hw *), GFP_KERNEL);
- if (!pmc_data->phws)
- goto err;
+ pmc_data->phws = pmc_data->shws + nsystem;
pmc_data->ngck = ngck;
- pmc_data->ghws = kcalloc(ngck, sizeof(struct clk_hw *), GFP_KERNEL);
- if (!pmc_data->ghws)
- goto err;
+ pmc_data->ghws = pmc_data->phws + nperiph;
- return pmc_data;
-
-err:
- pmc_data_free(pmc_data);
+ pmc_data->npck = npck;
+ pmc_data->pchws = pmc_data->ghws + ngck;
- return NULL;
+ return pmc_data;
}
#ifdef CONFIG_PM
@@ -274,8 +264,11 @@ static int __init pmc_register_ops(void)
struct device_node *np;
np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids);
+ if (!np)
+ return -ENODEV;
pmcreg = device_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(pmcreg))
return PTR_ERR(pmcreg);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 9b8db9cdcda5..df616f2937e7 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -24,6 +24,10 @@ struct pmc_data {
struct clk_hw **phws;
unsigned int ngck;
struct clk_hw **ghws;
+ unsigned int npck;
+ struct clk_hw **pchws;
+
+ struct clk_hw *hwtable[];
};
struct clk_range {
@@ -94,8 +98,8 @@ struct clk_pcr_layout {
#define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
- unsigned int nperiph, unsigned int ngck);
-void pmc_data_free(struct pmc_data *pmc_data);
+ unsigned int nperiph, unsigned int ngck,
+ unsigned int npck);
int of_at91_get_clk_range(struct device_node *np, const char *propname,
struct clk_range *range);
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index cc19e8fb83be..3e20aa68259f 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -182,10 +182,10 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- sam9x60_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ sam9x60_pmc = pmc_data_allocate(PMC_PLLACK + 1,
nck(sam9x60_systemck),
nck(sam9x60_periphck),
- nck(sam9x60_gck));
+ nck(sam9x60_gck), 8);
if (!sam9x60_pmc)
return;
@@ -214,6 +214,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ sam9x60_pmc->chws[PMC_PLLACK] = hw;
+
hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "upllck",
"main_osc", 1, &upll_characteristics);
if (IS_ERR(hw))
@@ -255,6 +257,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
&sam9x60_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ sam9x60_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(sam9x60_systemck); i++) {
@@ -299,7 +303,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(sam9x60_pmc);
+ kfree(sam9x60_pmc);
}
/* Some clks are used for a clocksource */
CLK_OF_DECLARE(sam9x60_pmc, "microchip,sam9x60-pmc", sam9x60_pmc_setup);
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index ff7e3f727082..d69421d71daf 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -89,6 +89,7 @@ static const struct {
{ .n = "i2s1_clk", .id = 55, .r = { .min = 0, .max = 83000000 }, },
{ .n = "can0_clk", .id = 56, .r = { .min = 0, .max = 83000000 }, },
{ .n = "can1_clk", .id = 57, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "ptc_clk", .id = 58, .r = { .min = 0, .max = 83000000 }, },
{ .n = "classd_clk", .id = 59, .r = { .min = 0, .max = 83000000 }, },
};
@@ -166,10 +167,10 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- sama5d2_pmc = pmc_data_allocate(PMC_I2S1_MUX + 1,
+ sama5d2_pmc = pmc_data_allocate(PMC_AUDIOPLLCK + 1,
nck(sama5d2_systemck),
nck(sama5d2_periph32ck),
- nck(sama5d2_gck));
+ nck(sama5d2_gck), 3);
if (!sama5d2_pmc)
return;
@@ -202,6 +203,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ sama5d2_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_audio_pll_frac(regmap, "audiopll_fracck",
"mainck");
if (IS_ERR(hw))
@@ -217,6 +220,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ sama5d2_pmc->chws[PMC_AUDIOPLLCK] = hw;
+
regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
if (IS_ERR(regmap_sfr))
regmap_sfr = NULL;
@@ -267,6 +272,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
&sama5d2_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ sama5d2_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(sama5d2_systemck); i++) {
@@ -350,6 +357,6 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(sama5d2_pmc);
+ kfree(sama5d2_pmc);
}
CLK_OF_DECLARE_DRIVER(sama5d2_pmc, "atmel,sama5d2-pmc", sama5d2_pmc_setup);
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
index 88506f909c08..5e4e44dd4c37 100644
--- a/drivers/clk/at91/sama5d3.c
+++ b/drivers/clk/at91/sama5d3.c
@@ -125,9 +125,9 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- sama5d3_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ sama5d3_pmc = pmc_data_allocate(PMC_PLLACK + 1,
nck(sama5d3_systemck),
- nck(sama5d3_periphck), 0);
+ nck(sama5d3_periphck), 0, 3);
if (!sama5d3_pmc)
return;
@@ -158,6 +158,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ sama5d3_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
if (IS_ERR(hw))
goto err_free;
@@ -201,6 +203,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
&at91sam9x5_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ sama5d3_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(sama5d3_systemck); i++) {
@@ -231,7 +235,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(sama5d3_pmc);
+ kfree(sama5d3_pmc);
}
/*
* The TCB is used as the clocksource so its clock is needed early. This means
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index a6dee4a3b6e4..662ff5fa6e98 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -140,9 +140,9 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- sama5d4_pmc = pmc_data_allocate(PMC_MCK2 + 1,
+ sama5d4_pmc = pmc_data_allocate(PMC_PLLACK + 1,
nck(sama5d4_systemck),
- nck(sama5d4_periph32ck), 0);
+ nck(sama5d4_periph32ck), 0, 3);
if (!sama5d4_pmc)
return;
@@ -173,6 +173,8 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ sama5d4_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
if (IS_ERR(hw))
goto err_free;
@@ -224,6 +226,8 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
&at91sam9x5_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ sama5d4_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(sama5d4_systemck); i++) {
@@ -267,6 +271,6 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(sama5d4_pmc);
+ kfree(sama5d4_pmc);
}
CLK_OF_DECLARE_DRIVER(sama5d4_pmc, "atmel,sama5d4-pmc", sama5d4_pmc_setup);
diff --git a/drivers/clk/baikal-t1/Kconfig b/drivers/clk/baikal-t1/Kconfig
new file mode 100644
index 000000000000..03102f1094bc
--- /dev/null
+++ b/drivers/clk/baikal-t1/Kconfig
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CLK_BAIKAL_T1
+ bool "Baikal-T1 Clocks Control Unit interface"
+ depends on (MIPS_BAIKAL_T1 && OF) || COMPILE_TEST
+ default MIPS_BAIKAL_T1
+ help
+ Clocks Control Unit is the core of Baikal-T1 SoC System Controller
+ responsible for the chip subsystems clocking and resetting. It
+ consists of multiple global clock domains, which can be reset by
+ means of the CCU control registers. These domains and devices placed
+ in them are fed with clocks generated by a hierarchy of PLLs,
+ configurable and fixed clock dividers. Enable this option to be able
+ to select Baikal-T1 CCU PLLs and Dividers drivers.
+
+if CLK_BAIKAL_T1
+
+config CLK_BT1_CCU_PLL
+ bool "Baikal-T1 CCU PLLs support"
+ select MFD_SYSCON
+ default MIPS_BAIKAL_T1
+ help
+ Enable this to support the PLLs embedded into the Baikal-T1 SoC
+ System Controller. These are five PLLs placed at the root of the
+ clocks hierarchy, right after an external reference oscillator
+ (normally of 25MHz). They are used to generate high frequency
+ signals, which are either directly wired to the consumers (like
+ CPUs, DDR, etc.) or passed over the clock dividers to be only
+ then used as an individual reference clock of a target device.
+
+config CLK_BT1_CCU_DIV
+ bool "Baikal-T1 CCU Dividers support"
+ select RESET_CONTROLLER
+ select MFD_SYSCON
+ default MIPS_BAIKAL_T1
+ help
+ Enable this to support the CCU dividers used to distribute clocks
+ between AXI-bus and system devices coming from CCU PLLs of Baikal-T1
+ SoC. CCU dividers can be either configurable or with fixed divider,
+ either gateable or ungateable. Some of the CCU dividers can be as well
+ used to reset the domains they're supplying clock to.
+
+endif
diff --git a/drivers/clk/baikal-t1/Makefile b/drivers/clk/baikal-t1/Makefile
new file mode 100644
index 000000000000..b3b9590b95ed
--- /dev/null
+++ b/drivers/clk/baikal-t1/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CLK_BT1_CCU_PLL) += ccu-pll.o clk-ccu-pll.o
+obj-$(CONFIG_CLK_BT1_CCU_DIV) += ccu-div.o clk-ccu-div.o
diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c
new file mode 100644
index 000000000000..4062092d67f9
--- /dev/null
+++ b/drivers/clk/baikal-t1/ccu-div.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * Baikal-T1 CCU Dividers interface driver
+ */
+
+#define pr_fmt(fmt) "bt1-ccu-div: " fmt
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/time64.h>
+#include <linux/debugfs.h>
+
+#include "ccu-div.h"
+
+#define CCU_DIV_CTL 0x00
+#define CCU_DIV_CTL_EN BIT(0)
+#define CCU_DIV_CTL_RST BIT(1)
+#define CCU_DIV_CTL_SET_CLKDIV BIT(2)
+#define CCU_DIV_CTL_CLKDIV_FLD 4
+#define CCU_DIV_CTL_CLKDIV_MASK(_width) \
+ GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
+#define CCU_DIV_CTL_LOCK_SHIFTED BIT(27)
+#define CCU_DIV_CTL_LOCK_NORMAL BIT(31)
+
+#define CCU_DIV_RST_DELAY_US 1
+#define CCU_DIV_LOCK_CHECK_RETRIES 50
+
+#define CCU_DIV_CLKDIV_MIN 0
+#define CCU_DIV_CLKDIV_MAX(_mask) \
+ ((_mask) >> CCU_DIV_CTL_CLKDIV_FLD)
+
+/*
+ * Use the next two methods until there are generic field setter and
+ * getter available with non-constant mask support.
+ */
+static inline u32 ccu_div_get(u32 mask, u32 val)
+{
+ return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD;
+}
+
+static inline u32 ccu_div_prep(u32 mask, u32 val)
+{
+ return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask;
+}
+
+static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk,
+ unsigned long div)
+{
+ u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC;
+
+ do_div(ns, ref_clk);
+
+ return ns;
+}
+
+static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk,
+ unsigned long div)
+{
+ return ref_clk / (div ?: 1);
+}
+
+static int ccu_div_var_update_clkdiv(struct ccu_div *div,
+ unsigned long parent_rate,
+ unsigned long divider)
+{
+ unsigned long nd;
+ u32 val = 0;
+ u32 lock;
+ int count;
+
+ nd = ccu_div_lock_delay_ns(parent_rate, divider);
+
+ if (div->features & CCU_DIV_LOCK_SHIFTED)
+ lock = CCU_DIV_CTL_LOCK_SHIFTED;
+ else
+ lock = CCU_DIV_CTL_LOCK_NORMAL;
+
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV);
+
+ /*
+ * Until there is nsec-version of readl_poll_timeout() is available
+ * we have to implement the next polling loop.
+ */
+ count = CCU_DIV_LOCK_CHECK_RETRIES;
+ do {
+ ndelay(nd);
+ regmap_read(div->sys_regs, div->reg_ctl, &val);
+ if (val & lock)
+ return 0;
+ } while (--count);
+
+ return -ETIMEDOUT;
+}
+
+static int ccu_div_var_enable(struct clk_hw *hw)
+{
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags;
+ u32 val = 0;
+ int ret;
+
+ if (!parent_hw) {
+ pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ regmap_read(div->sys_regs, div->reg_ctl, &val);
+ if (val & CCU_DIV_CTL_EN)
+ return 0;
+
+ spin_lock_irqsave(&div->lock, flags);
+ ret = ccu_div_var_update_clkdiv(div, clk_hw_get_rate(parent_hw),
+ ccu_div_get(div->mask, val));
+ if (!ret)
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
+ spin_unlock_irqrestore(&div->lock, flags);
+ if (ret)
+ pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
+
+ return ret;
+}
+
+static int ccu_div_gate_enable(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ return 0;
+}
+
+static void ccu_div_gate_disable(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl, CCU_DIV_CTL_EN, 0);
+ spin_unlock_irqrestore(&div->lock, flags);
+}
+
+static int ccu_div_gate_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ u32 val = 0;
+
+ regmap_read(div->sys_regs, div->reg_ctl, &val);
+
+ return !!(val & CCU_DIV_CTL_EN);
+}
+
+static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long divider;
+ u32 val = 0;
+
+ regmap_read(div->sys_regs, div->reg_ctl, &val);
+ divider = ccu_div_get(div->mask, val);
+
+ return ccu_div_calc_freq(parent_rate, divider);
+}
+
+static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned int mask)
+{
+ unsigned long divider;
+
+ divider = parent_rate / rate;
+ return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN,
+ CCU_DIV_CLKDIV_MAX(mask));
+}
+
+static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long divider;
+
+ divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
+
+ return ccu_div_calc_freq(*parent_rate, divider);
+}
+
+/*
+ * This method is used for the clock divider blocks, which support the
+ * on-the-fly rate change. So due to lacking the EN bit functionality
+ * they can't be gated before the rate adjustment.
+ */
+static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags, divider;
+ u32 val;
+ int ret;
+
+ divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
+ if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) {
+ divider = 0;
+ } else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) {
+ if (divider == 1 || divider == 2)
+ divider = 0;
+ else if (divider == 3)
+ divider = 4;
+ }
+
+ val = ccu_div_prep(div->mask, divider);
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, val);
+ ret = ccu_div_var_update_clkdiv(div, parent_rate, divider);
+ spin_unlock_irqrestore(&div->lock, flags);
+ if (ret)
+ pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
+
+ return ret;
+}
+
+/*
+ * This method is used for the clock divider blocks, which don't support
+ * the on-the-fly rate change.
+ */
+static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags, divider;
+ u32 val;
+
+ divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
+ val = ccu_div_prep(div->mask, divider);
+
+ /*
+ * Also disable the clock divider block if it was enabled by default
+ * or by the bootloader.
+ */
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ div->mask | CCU_DIV_CTL_EN, val);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ return 0;
+}
+
+static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+
+ return ccu_div_calc_freq(parent_rate, div->divider);
+}
+
+static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+
+ return ccu_div_calc_freq(*parent_rate, div->divider);
+}
+
+static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+int ccu_div_reset_domain(struct ccu_div *div)
+{
+ unsigned long flags;
+
+ if (!div || !(div->features & CCU_DIV_RESET_DOMAIN))
+ return -EINVAL;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_RST, CCU_DIV_CTL_RST);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ /* The next delay must be enough to cover all the resets. */
+ udelay(CCU_DIV_RST_DELAY_US);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+struct ccu_div_dbgfs_bit {
+ struct ccu_div *div;
+ const char *name;
+ u32 mask;
+};
+
+#define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) { \
+ .name = _name, \
+ .mask = _mask \
+ }
+
+static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
+ CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
+ CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
+ CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
+ CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
+};
+
+#define CCU_DIV_DBGFS_BIT_NUM ARRAY_SIZE(ccu_div_bits)
+
+/*
+ * It can be dangerous to change the Divider settings behind clock framework
+ * back, therefore we don't provide any kernel config based compile time option
+ * for this feature to enable.
+ */
+#undef CCU_DIV_ALLOW_WRITE_DEBUGFS
+#ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS
+
+static int ccu_div_dbgfs_bit_set(void *priv, u64 val)
+{
+ const struct ccu_div_dbgfs_bit *bit = priv;
+ struct ccu_div *div = bit->div;
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ bit->mask, val ? bit->mask : 0);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ return 0;
+}
+
+static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val)
+{
+ struct ccu_div *div = priv;
+ unsigned long flags;
+ u32 data;
+
+ val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN,
+ CCU_DIV_CLKDIV_MAX(div->mask));
+ data = ccu_div_prep(div->mask, val);
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ return 0;
+}
+
+#define ccu_div_dbgfs_mode 0644
+
+#else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
+
+#define ccu_div_dbgfs_bit_set NULL
+#define ccu_div_dbgfs_var_clkdiv_set NULL
+#define ccu_div_dbgfs_mode 0444
+
+#endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
+
+static int ccu_div_dbgfs_bit_get(void *priv, u64 *val)
+{
+ const struct ccu_div_dbgfs_bit *bit = priv;
+ struct ccu_div *div = bit->div;
+ u32 data = 0;
+
+ regmap_read(div->sys_regs, div->reg_ctl, &data);
+ *val = !!(data & bit->mask);
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops,
+ ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n");
+
+static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val)
+{
+ struct ccu_div *div = priv;
+ u32 data = 0;
+
+ regmap_read(div->sys_regs, div->reg_ctl, &data);
+ *val = ccu_div_get(div->mask, data);
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops,
+ ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n");
+
+static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val)
+{
+ struct ccu_div *div = priv;
+
+ *val = div->divider;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops,
+ ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n");
+
+static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ struct ccu_div_dbgfs_bit *bits;
+ int didx, bidx, num = 2;
+ const char *name;
+
+ num += !!(div->flags & CLK_SET_RATE_GATE) +
+ !!(div->features & CCU_DIV_RESET_DOMAIN);
+
+ bits = kcalloc(num, sizeof(*bits), GFP_KERNEL);
+ if (!bits)
+ return;
+
+ for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) {
+ name = ccu_div_bits[bidx].name;
+ if (!(div->flags & CLK_SET_RATE_GATE) &&
+ !strcmp("div_en", name)) {
+ continue;
+ }
+
+ if (!(div->features & CCU_DIV_RESET_DOMAIN) &&
+ !strcmp("div_rst", name)) {
+ continue;
+ }
+
+ bits[didx] = ccu_div_bits[bidx];
+ bits[didx].div = div;
+
+ if (div->features & CCU_DIV_LOCK_SHIFTED &&
+ !strcmp("div_lock", name)) {
+ bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED;
+ }
+
+ debugfs_create_file_unsafe(bits[didx].name, ccu_div_dbgfs_mode,
+ dentry, &bits[didx],
+ &ccu_div_dbgfs_bit_fops);
+ ++didx;
+ }
+
+ debugfs_create_file_unsafe("div_clkdiv", ccu_div_dbgfs_mode, dentry,
+ div, &ccu_div_dbgfs_var_clkdiv_fops);
+}
+
+static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ struct ccu_div_dbgfs_bit *bit;
+
+ bit = kmalloc(sizeof(*bit), GFP_KERNEL);
+ if (!bit)
+ return;
+
+ *bit = ccu_div_bits[0];
+ bit->div = div;
+ debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
+ &ccu_div_dbgfs_bit_fops);
+
+ debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
+ &ccu_div_dbgfs_fixed_clkdiv_fops);
+}
+
+static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+
+ debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
+ &ccu_div_dbgfs_fixed_clkdiv_fops);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+
+#define ccu_div_var_debug_init NULL
+#define ccu_div_gate_debug_init NULL
+#define ccu_div_fixed_debug_init NULL
+
+#endif /* !CONFIG_DEBUG_FS */
+
+static const struct clk_ops ccu_div_var_gate_to_set_ops = {
+ .enable = ccu_div_var_enable,
+ .disable = ccu_div_gate_disable,
+ .is_enabled = ccu_div_gate_is_enabled,
+ .recalc_rate = ccu_div_var_recalc_rate,
+ .round_rate = ccu_div_var_round_rate,
+ .set_rate = ccu_div_var_set_rate_fast,
+ .debug_init = ccu_div_var_debug_init
+};
+
+static const struct clk_ops ccu_div_var_nogate_ops = {
+ .recalc_rate = ccu_div_var_recalc_rate,
+ .round_rate = ccu_div_var_round_rate,
+ .set_rate = ccu_div_var_set_rate_slow,
+ .debug_init = ccu_div_var_debug_init
+};
+
+static const struct clk_ops ccu_div_gate_ops = {
+ .enable = ccu_div_gate_enable,
+ .disable = ccu_div_gate_disable,
+ .is_enabled = ccu_div_gate_is_enabled,
+ .recalc_rate = ccu_div_fixed_recalc_rate,
+ .round_rate = ccu_div_fixed_round_rate,
+ .set_rate = ccu_div_fixed_set_rate,
+ .debug_init = ccu_div_gate_debug_init
+};
+
+static const struct clk_ops ccu_div_fixed_ops = {
+ .recalc_rate = ccu_div_fixed_recalc_rate,
+ .round_rate = ccu_div_fixed_round_rate,
+ .set_rate = ccu_div_fixed_set_rate,
+ .debug_init = ccu_div_fixed_debug_init
+};
+
+struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
+{
+ struct clk_parent_data parent_data = { };
+ struct clk_init_data hw_init = { };
+ struct ccu_div *div;
+ int ret;
+
+ if (!div_init)
+ return ERR_PTR(-EINVAL);
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Note since Baikal-T1 System Controller registers are MMIO-backed
+ * we won't check the regmap IO operations return status, because it
+ * must be zero anyway.
+ */
+ div->hw.init = &hw_init;
+ div->id = div_init->id;
+ div->reg_ctl = div_init->base + CCU_DIV_CTL;
+ div->sys_regs = div_init->sys_regs;
+ div->flags = div_init->flags;
+ div->features = div_init->features;
+ spin_lock_init(&div->lock);
+
+ hw_init.name = div_init->name;
+ hw_init.flags = div_init->flags;
+
+ if (div_init->type == CCU_DIV_VAR) {
+ if (hw_init.flags & CLK_SET_RATE_GATE)
+ hw_init.ops = &ccu_div_var_gate_to_set_ops;
+ else
+ hw_init.ops = &ccu_div_var_nogate_ops;
+ div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width);
+ } else if (div_init->type == CCU_DIV_GATE) {
+ hw_init.ops = &ccu_div_gate_ops;
+ div->divider = div_init->divider;
+ } else if (div_init->type == CCU_DIV_FIXED) {
+ hw_init.ops = &ccu_div_fixed_ops;
+ div->divider = div_init->divider;
+ } else {
+ ret = -EINVAL;
+ goto err_free_div;
+ }
+
+ if (!div_init->parent_name) {
+ ret = -EINVAL;
+ goto err_free_div;
+ }
+ parent_data.fw_name = div_init->parent_name;
+ hw_init.parent_data = &parent_data;
+ hw_init.num_parents = 1;
+
+ ret = of_clk_hw_register(div_init->np, &div->hw);
+ if (ret)
+ goto err_free_div;
+
+ return div;
+
+err_free_div:
+ kfree(div);
+
+ return ERR_PTR(ret);
+}
+
+void ccu_div_hw_unregister(struct ccu_div *div)
+{
+ clk_hw_unregister(&div->hw);
+
+ kfree(div);
+}
diff --git a/drivers/clk/baikal-t1/ccu-div.h b/drivers/clk/baikal-t1/ccu-div.h
new file mode 100644
index 000000000000..795665caefbd
--- /dev/null
+++ b/drivers/clk/baikal-t1/ccu-div.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 CCU Dividers interface driver
+ */
+#ifndef __CLK_BT1_CCU_DIV_H__
+#define __CLK_BT1_CCU_DIV_H__
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/bits.h>
+#include <linux/of.h>
+
+/*
+ * CCU Divider private flags
+ * @CCU_DIV_SKIP_ONE: Due to some reason divider can't be set to 1.
+ * It can be 0 though, which is functionally the same.
+ * @CCU_DIV_SKIP_ONE_TO_THREE: For some reason divider can't be within [1,3].
+ * It can be either 0 or greater than 3.
+ * @CCU_DIV_LOCK_SHIFTED: Find lock-bit at non-standard position.
+ * @CCU_DIV_RESET_DOMAIN: Provide reset clock domain method.
+ */
+#define CCU_DIV_SKIP_ONE BIT(1)
+#define CCU_DIV_SKIP_ONE_TO_THREE BIT(2)
+#define CCU_DIV_LOCK_SHIFTED BIT(3)
+#define CCU_DIV_RESET_DOMAIN BIT(4)
+
+/*
+ * enum ccu_div_type - CCU Divider types
+ * @CCU_DIV_VAR: Clocks gate with variable divider.
+ * @CCU_DIV_GATE: Clocks gate with fixed divider.
+ * @CCU_DIV_FIXED: Ungateable clock with fixed divider.
+ */
+enum ccu_div_type {
+ CCU_DIV_VAR,
+ CCU_DIV_GATE,
+ CCU_DIV_FIXED
+};
+
+/*
+ * struct ccu_div_init_data - CCU Divider initialization data
+ * @id: Clocks private identifier.
+ * @name: Clocks name.
+ * @parent_name: Parent clocks name in a fw node.
+ * @base: Divider register base address with respect to the sys_regs base.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @np: Pointer to the node describing the CCU Dividers.
+ * @type: CCU divider type (variable, fixed with and without gate).
+ * @width: Divider width if it's variable.
+ * @divider: Divider fixed value.
+ * @flags: CCU Divider clock flags.
+ * @features: CCU Divider private features.
+ */
+struct ccu_div_init_data {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned int base;
+ struct regmap *sys_regs;
+ struct device_node *np;
+ enum ccu_div_type type;
+ union {
+ unsigned int width;
+ unsigned int divider;
+ };
+ unsigned long flags;
+ unsigned long features;
+};
+
+/*
+ * struct ccu_div - CCU Divider descriptor
+ * @hw: clk_hw of the divider.
+ * @id: Clock private identifier.
+ * @reg_ctl: Divider control register base address.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @lock: Divider state change spin-lock.
+ * @mask: Divider field mask.
+ * @divider: Divider fixed value.
+ * @flags: Divider clock flags.
+ * @features: CCU Divider private features.
+ */
+struct ccu_div {
+ struct clk_hw hw;
+ unsigned int id;
+ unsigned int reg_ctl;
+ struct regmap *sys_regs;
+ spinlock_t lock;
+ union {
+ u32 mask;
+ unsigned int divider;
+ };
+ unsigned long flags;
+ unsigned long features;
+};
+#define to_ccu_div(_hw) container_of(_hw, struct ccu_div, hw)
+
+static inline struct clk_hw *ccu_div_get_clk_hw(struct ccu_div *div)
+{
+ return div ? &div->hw : NULL;
+}
+
+struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *init);
+
+void ccu_div_hw_unregister(struct ccu_div *div);
+
+int ccu_div_reset_domain(struct ccu_div *div);
+
+#endif /* __CLK_BT1_CCU_DIV_H__ */
diff --git a/drivers/clk/baikal-t1/ccu-pll.c b/drivers/clk/baikal-t1/ccu-pll.c
new file mode 100644
index 000000000000..13ef28001439
--- /dev/null
+++ b/drivers/clk/baikal-t1/ccu-pll.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * Baikal-T1 CCU PLL interface driver
+ */
+
+#define pr_fmt(fmt) "bt1-ccu-pll: " fmt
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/limits.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/iopoll.h>
+#include <linux/time64.h>
+#include <linux/rational.h>
+#include <linux/debugfs.h>
+
+#include "ccu-pll.h"
+
+#define CCU_PLL_CTL 0x000
+#define CCU_PLL_CTL_EN BIT(0)
+#define CCU_PLL_CTL_RST BIT(1)
+#define CCU_PLL_CTL_CLKR_FLD 2
+#define CCU_PLL_CTL_CLKR_MASK GENMASK(7, CCU_PLL_CTL_CLKR_FLD)
+#define CCU_PLL_CTL_CLKF_FLD 8
+#define CCU_PLL_CTL_CLKF_MASK GENMASK(20, CCU_PLL_CTL_CLKF_FLD)
+#define CCU_PLL_CTL_CLKOD_FLD 21
+#define CCU_PLL_CTL_CLKOD_MASK GENMASK(24, CCU_PLL_CTL_CLKOD_FLD)
+#define CCU_PLL_CTL_BYPASS BIT(30)
+#define CCU_PLL_CTL_LOCK BIT(31)
+#define CCU_PLL_CTL1 0x004
+#define CCU_PLL_CTL1_BWADJ_FLD 3
+#define CCU_PLL_CTL1_BWADJ_MASK GENMASK(14, CCU_PLL_CTL1_BWADJ_FLD)
+
+#define CCU_PLL_LOCK_CHECK_RETRIES 50
+
+#define CCU_PLL_NR_MAX \
+ ((CCU_PLL_CTL_CLKR_MASK >> CCU_PLL_CTL_CLKR_FLD) + 1)
+#define CCU_PLL_NF_MAX \
+ ((CCU_PLL_CTL_CLKF_MASK >> (CCU_PLL_CTL_CLKF_FLD + 1)) + 1)
+#define CCU_PLL_OD_MAX \
+ ((CCU_PLL_CTL_CLKOD_MASK >> CCU_PLL_CTL_CLKOD_FLD) + 1)
+#define CCU_PLL_NB_MAX \
+ ((CCU_PLL_CTL1_BWADJ_MASK >> CCU_PLL_CTL1_BWADJ_FLD) + 1)
+#define CCU_PLL_FDIV_MIN 427000UL
+#define CCU_PLL_FDIV_MAX 3500000000UL
+#define CCU_PLL_FOUT_MIN 200000000UL
+#define CCU_PLL_FOUT_MAX 2500000000UL
+#define CCU_PLL_FVCO_MIN 700000000UL
+#define CCU_PLL_FVCO_MAX 3500000000UL
+#define CCU_PLL_CLKOD_FACTOR 2
+
+static inline unsigned long ccu_pll_lock_delay_us(unsigned long ref_clk,
+ unsigned long nr)
+{
+ u64 us = 500ULL * nr * USEC_PER_SEC;
+
+ do_div(us, ref_clk);
+
+ return us;
+}
+
+static inline unsigned long ccu_pll_calc_freq(unsigned long ref_clk,
+ unsigned long nr,
+ unsigned long nf,
+ unsigned long od)
+{
+ u64 tmp = ref_clk;
+
+ do_div(tmp, nr);
+ tmp *= nf;
+ do_div(tmp, od);
+
+ return tmp;
+}
+
+static int ccu_pll_reset(struct ccu_pll *pll, unsigned long ref_clk,
+ unsigned long nr)
+{
+ unsigned long ud, ut;
+ u32 val;
+
+ ud = ccu_pll_lock_delay_us(ref_clk, nr);
+ ut = ud * CCU_PLL_LOCK_CHECK_RETRIES;
+
+ regmap_update_bits(pll->sys_regs, pll->reg_ctl,
+ CCU_PLL_CTL_RST, CCU_PLL_CTL_RST);
+
+ return regmap_read_poll_timeout_atomic(pll->sys_regs, pll->reg_ctl, val,
+ val & CCU_PLL_CTL_LOCK, ud, ut);
+}
+
+static int ccu_pll_enable(struct clk_hw *hw)
+{
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ unsigned long flags;
+ u32 val = 0;
+ int ret;
+
+ if (!parent_hw) {
+ pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ regmap_read(pll->sys_regs, pll->reg_ctl, &val);
+ if (val & CCU_PLL_CTL_EN)
+ return 0;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ regmap_write(pll->sys_regs, pll->reg_ctl, val | CCU_PLL_CTL_EN);
+ ret = ccu_pll_reset(pll, clk_hw_get_rate(parent_hw),
+ FIELD_GET(CCU_PLL_CTL_CLKR_MASK, val) + 1);
+ spin_unlock_irqrestore(&pll->lock, flags);
+ if (ret)
+ pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw));
+
+ return ret;
+}
+
+static void ccu_pll_disable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ regmap_update_bits(pll->sys_regs, pll->reg_ctl, CCU_PLL_CTL_EN, 0);
+ spin_unlock_irqrestore(&pll->lock, flags);
+}
+
+static int ccu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ u32 val = 0;
+
+ regmap_read(pll->sys_regs, pll->reg_ctl, &val);
+
+ return !!(val & CCU_PLL_CTL_EN);
+}
+
+static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ unsigned long nr, nf, od;
+ u32 val = 0;
+
+ regmap_read(pll->sys_regs, pll->reg_ctl, &val);
+ nr = FIELD_GET(CCU_PLL_CTL_CLKR_MASK, val) + 1;
+ nf = FIELD_GET(CCU_PLL_CTL_CLKF_MASK, val) + 1;
+ od = FIELD_GET(CCU_PLL_CTL_CLKOD_MASK, val) + 1;
+
+ return ccu_pll_calc_freq(parent_rate, nr, nf, od);
+}
+
+static void ccu_pll_calc_factors(unsigned long rate, unsigned long parent_rate,
+ unsigned long *nr, unsigned long *nf,
+ unsigned long *od)
+{
+ unsigned long err, freq, min_err = ULONG_MAX;
+ unsigned long num, denom, n1, d1, nri;
+ unsigned long nr_max, nf_max, od_max;
+
+ /*
+ * Make sure PLL is working with valid input signal (Fdiv). If
+ * you want to speed the function up just reduce CCU_PLL_NR_MAX.
+ * This will cause a worse approximation though.
+ */
+ nri = (parent_rate / CCU_PLL_FDIV_MAX) + 1;
+ nr_max = min(parent_rate / CCU_PLL_FDIV_MIN, CCU_PLL_NR_MAX);
+
+ /*
+ * Find a closest [nr;nf;od] vector taking into account the
+ * limitations like: 1) 700MHz <= Fvco <= 3.5GHz, 2) PLL Od is
+ * either 1 or even number within the acceptable range (alas 1s
+ * is also excluded by the next loop).
+ */
+ for (; nri <= nr_max; ++nri) {
+ /* Use Od factor to fulfill the limitation 2). */
+ num = CCU_PLL_CLKOD_FACTOR * rate;
+ denom = parent_rate / nri;
+
+ /*
+ * Make sure Fvco is within the acceptable range to fulfill
+ * the condition 1). Note due to the CCU_PLL_CLKOD_FACTOR value
+ * the actual upper limit is also divided by that factor.
+ * It's not big problem for us since practically there is no
+ * need in clocks with that high frequency.
+ */
+ nf_max = min(CCU_PLL_FVCO_MAX / denom, CCU_PLL_NF_MAX);
+ od_max = CCU_PLL_OD_MAX / CCU_PLL_CLKOD_FACTOR;
+
+ /*
+ * Bypass the out-of-bound values, which can't be properly
+ * handled by the rational fraction approximation algorithm.
+ */
+ if (num / denom >= nf_max) {
+ n1 = nf_max;
+ d1 = 1;
+ } else if (denom / num >= od_max) {
+ n1 = 1;
+ d1 = od_max;
+ } else {
+ rational_best_approximation(num, denom, nf_max, od_max,
+ &n1, &d1);
+ }
+
+ /* Select the best approximation of the target rate. */
+ freq = ccu_pll_calc_freq(parent_rate, nri, n1, d1);
+ err = abs((int64_t)freq - num);
+ if (err < min_err) {
+ min_err = err;
+ *nr = nri;
+ *nf = n1;
+ *od = CCU_PLL_CLKOD_FACTOR * d1;
+ }
+ }
+}
+
+static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long nr = 1, nf = 1, od = 1;
+
+ ccu_pll_calc_factors(rate, *parent_rate, &nr, &nf, &od);
+
+ return ccu_pll_calc_freq(*parent_rate, nr, nf, od);
+}
+
+/*
+ * This method is used for PLLs, which support the on-the-fly dividers
+ * adjustment. So there is no need in gating such clocks.
+ */
+static int ccu_pll_set_rate_reset(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ unsigned long nr, nf, od;
+ unsigned long flags;
+ u32 mask, val;
+ int ret;
+
+ ccu_pll_calc_factors(rate, parent_rate, &nr, &nf, &od);
+
+ mask = CCU_PLL_CTL_CLKR_MASK | CCU_PLL_CTL_CLKF_MASK |
+ CCU_PLL_CTL_CLKOD_MASK;
+ val = FIELD_PREP(CCU_PLL_CTL_CLKR_MASK, nr - 1) |
+ FIELD_PREP(CCU_PLL_CTL_CLKF_MASK, nf - 1) |
+ FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK, od - 1);
+
+ spin_lock_irqsave(&pll->lock, flags);
+ regmap_update_bits(pll->sys_regs, pll->reg_ctl, mask, val);
+ ret = ccu_pll_reset(pll, parent_rate, nr);
+ spin_unlock_irqrestore(&pll->lock, flags);
+ if (ret)
+ pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw));
+
+ return ret;
+}
+
+/*
+ * This method is used for PLLs, which don't support the on-the-fly dividers
+ * adjustment. So the corresponding clocks are supposed to be gated first.
+ */
+static int ccu_pll_set_rate_norst(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ unsigned long nr, nf, od;
+ unsigned long flags;
+ u32 mask, val;
+
+ ccu_pll_calc_factors(rate, parent_rate, &nr, &nf, &od);
+
+ /*
+ * Disable PLL if it was enabled by default or left enabled by the
+ * system bootloader.
+ */
+ mask = CCU_PLL_CTL_CLKR_MASK | CCU_PLL_CTL_CLKF_MASK |
+ CCU_PLL_CTL_CLKOD_MASK | CCU_PLL_CTL_EN;
+ val = FIELD_PREP(CCU_PLL_CTL_CLKR_MASK, nr - 1) |
+ FIELD_PREP(CCU_PLL_CTL_CLKF_MASK, nf - 1) |
+ FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK, od - 1);
+
+ spin_lock_irqsave(&pll->lock, flags);
+ regmap_update_bits(pll->sys_regs, pll->reg_ctl, mask, val);
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+struct ccu_pll_dbgfs_bit {
+ struct ccu_pll *pll;
+ const char *name;
+ unsigned int reg;
+ u32 mask;
+};
+
+struct ccu_pll_dbgfs_fld {
+ struct ccu_pll *pll;
+ const char *name;
+ unsigned int reg;
+ unsigned int lsb;
+ u32 mask;
+ u32 min;
+ u32 max;
+};
+
+#define CCU_PLL_DBGFS_BIT_ATTR(_name, _reg, _mask) \
+ { \
+ .name = _name, \
+ .reg = _reg, \
+ .mask = _mask \
+ }
+
+#define CCU_PLL_DBGFS_FLD_ATTR(_name, _reg, _lsb, _mask, _min, _max) \
+ { \
+ .name = _name, \
+ .reg = _reg, \
+ .lsb = _lsb, \
+ .mask = _mask, \
+ .min = _min, \
+ .max = _max \
+ }
+
+static const struct ccu_pll_dbgfs_bit ccu_pll_bits[] = {
+ CCU_PLL_DBGFS_BIT_ATTR("pll_en", CCU_PLL_CTL, CCU_PLL_CTL_EN),
+ CCU_PLL_DBGFS_BIT_ATTR("pll_rst", CCU_PLL_CTL, CCU_PLL_CTL_RST),
+ CCU_PLL_DBGFS_BIT_ATTR("pll_bypass", CCU_PLL_CTL, CCU_PLL_CTL_BYPASS),
+ CCU_PLL_DBGFS_BIT_ATTR("pll_lock", CCU_PLL_CTL, CCU_PLL_CTL_LOCK)
+};
+
+#define CCU_PLL_DBGFS_BIT_NUM ARRAY_SIZE(ccu_pll_bits)
+
+static const struct ccu_pll_dbgfs_fld ccu_pll_flds[] = {
+ CCU_PLL_DBGFS_FLD_ATTR("pll_nr", CCU_PLL_CTL, CCU_PLL_CTL_CLKR_FLD,
+ CCU_PLL_CTL_CLKR_MASK, 1, CCU_PLL_NR_MAX),
+ CCU_PLL_DBGFS_FLD_ATTR("pll_nf", CCU_PLL_CTL, CCU_PLL_CTL_CLKF_FLD,
+ CCU_PLL_CTL_CLKF_MASK, 1, CCU_PLL_NF_MAX),
+ CCU_PLL_DBGFS_FLD_ATTR("pll_od", CCU_PLL_CTL, CCU_PLL_CTL_CLKOD_FLD,
+ CCU_PLL_CTL_CLKOD_MASK, 1, CCU_PLL_OD_MAX),
+ CCU_PLL_DBGFS_FLD_ATTR("pll_nb", CCU_PLL_CTL1, CCU_PLL_CTL1_BWADJ_FLD,
+ CCU_PLL_CTL1_BWADJ_MASK, 1, CCU_PLL_NB_MAX)
+};
+
+#define CCU_PLL_DBGFS_FLD_NUM ARRAY_SIZE(ccu_pll_flds)
+
+/*
+ * It can be dangerous to change the PLL settings behind clock framework back,
+ * therefore we don't provide any kernel config based compile time option for
+ * this feature to enable.
+ */
+#undef CCU_PLL_ALLOW_WRITE_DEBUGFS
+#ifdef CCU_PLL_ALLOW_WRITE_DEBUGFS
+
+static int ccu_pll_dbgfs_bit_set(void *priv, u64 val)
+{
+ const struct ccu_pll_dbgfs_bit *bit = priv;
+ struct ccu_pll *pll = bit->pll;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ regmap_update_bits(pll->sys_regs, pll->reg_ctl + bit->reg,
+ bit->mask, val ? bit->mask : 0);
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ return 0;
+}
+
+static int ccu_pll_dbgfs_fld_set(void *priv, u64 val)
+{
+ struct ccu_pll_dbgfs_fld *fld = priv;
+ struct ccu_pll *pll = fld->pll;
+ unsigned long flags;
+ u32 data;
+
+ val = clamp_t(u64, val, fld->min, fld->max);
+ data = ((val - 1) << fld->lsb) & fld->mask;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ regmap_update_bits(pll->sys_regs, pll->reg_ctl + fld->reg, fld->mask,
+ data);
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ return 0;
+}
+
+#define ccu_pll_dbgfs_mode 0644
+
+#else /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */
+
+#define ccu_pll_dbgfs_bit_set NULL
+#define ccu_pll_dbgfs_fld_set NULL
+#define ccu_pll_dbgfs_mode 0444
+
+#endif /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */
+
+static int ccu_pll_dbgfs_bit_get(void *priv, u64 *val)
+{
+ struct ccu_pll_dbgfs_bit *bit = priv;
+ struct ccu_pll *pll = bit->pll;
+ u32 data = 0;
+
+ regmap_read(pll->sys_regs, pll->reg_ctl + bit->reg, &data);
+ *val = !!(data & bit->mask);
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_bit_fops,
+ ccu_pll_dbgfs_bit_get, ccu_pll_dbgfs_bit_set, "%llu\n");
+
+static int ccu_pll_dbgfs_fld_get(void *priv, u64 *val)
+{
+ struct ccu_pll_dbgfs_fld *fld = priv;
+ struct ccu_pll *pll = fld->pll;
+ u32 data = 0;
+
+ regmap_read(pll->sys_regs, pll->reg_ctl + fld->reg, &data);
+ *val = ((data & fld->mask) >> fld->lsb) + 1;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_fld_fops,
+ ccu_pll_dbgfs_fld_get, ccu_pll_dbgfs_fld_set, "%llu\n");
+
+static void ccu_pll_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ struct ccu_pll_dbgfs_bit *bits;
+ struct ccu_pll_dbgfs_fld *flds;
+ int idx;
+
+ bits = kcalloc(CCU_PLL_DBGFS_BIT_NUM, sizeof(*bits), GFP_KERNEL);
+ if (!bits)
+ return;
+
+ for (idx = 0; idx < CCU_PLL_DBGFS_BIT_NUM; ++idx) {
+ bits[idx] = ccu_pll_bits[idx];
+ bits[idx].pll = pll;
+
+ debugfs_create_file_unsafe(bits[idx].name, ccu_pll_dbgfs_mode,
+ dentry, &bits[idx],
+ &ccu_pll_dbgfs_bit_fops);
+ }
+
+ flds = kcalloc(CCU_PLL_DBGFS_FLD_NUM, sizeof(*flds), GFP_KERNEL);
+ if (!flds)
+ return;
+
+ for (idx = 0; idx < CCU_PLL_DBGFS_FLD_NUM; ++idx) {
+ flds[idx] = ccu_pll_flds[idx];
+ flds[idx].pll = pll;
+
+ debugfs_create_file_unsafe(flds[idx].name, ccu_pll_dbgfs_mode,
+ dentry, &flds[idx],
+ &ccu_pll_dbgfs_fld_fops);
+ }
+}
+
+#else /* !CONFIG_DEBUG_FS */
+
+#define ccu_pll_debug_init NULL
+
+#endif /* !CONFIG_DEBUG_FS */
+
+static const struct clk_ops ccu_pll_gate_to_set_ops = {
+ .enable = ccu_pll_enable,
+ .disable = ccu_pll_disable,
+ .is_enabled = ccu_pll_is_enabled,
+ .recalc_rate = ccu_pll_recalc_rate,
+ .round_rate = ccu_pll_round_rate,
+ .set_rate = ccu_pll_set_rate_norst,
+ .debug_init = ccu_pll_debug_init
+};
+
+static const struct clk_ops ccu_pll_straight_set_ops = {
+ .enable = ccu_pll_enable,
+ .disable = ccu_pll_disable,
+ .is_enabled = ccu_pll_is_enabled,
+ .recalc_rate = ccu_pll_recalc_rate,
+ .round_rate = ccu_pll_round_rate,
+ .set_rate = ccu_pll_set_rate_reset,
+ .debug_init = ccu_pll_debug_init
+};
+
+struct ccu_pll *ccu_pll_hw_register(const struct ccu_pll_init_data *pll_init)
+{
+ struct clk_parent_data parent_data = { };
+ struct clk_init_data hw_init = { };
+ struct ccu_pll *pll;
+ int ret;
+
+ if (!pll_init)
+ return ERR_PTR(-EINVAL);
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Note since Baikal-T1 System Controller registers are MMIO-backed
+ * we won't check the regmap IO operations return status, because it
+ * must be zero anyway.
+ */
+ pll->hw.init = &hw_init;
+ pll->reg_ctl = pll_init->base + CCU_PLL_CTL;
+ pll->reg_ctl1 = pll_init->base + CCU_PLL_CTL1;
+ pll->sys_regs = pll_init->sys_regs;
+ pll->id = pll_init->id;
+ spin_lock_init(&pll->lock);
+
+ hw_init.name = pll_init->name;
+ hw_init.flags = pll_init->flags;
+
+ if (hw_init.flags & CLK_SET_RATE_GATE)
+ hw_init.ops = &ccu_pll_gate_to_set_ops;
+ else
+ hw_init.ops = &ccu_pll_straight_set_ops;
+
+ if (!pll_init->parent_name) {
+ ret = -EINVAL;
+ goto err_free_pll;
+ }
+ parent_data.fw_name = pll_init->parent_name;
+ hw_init.parent_data = &parent_data;
+ hw_init.num_parents = 1;
+
+ ret = of_clk_hw_register(pll_init->np, &pll->hw);
+ if (ret)
+ goto err_free_pll;
+
+ return pll;
+
+err_free_pll:
+ kfree(pll);
+
+ return ERR_PTR(ret);
+}
+
+void ccu_pll_hw_unregister(struct ccu_pll *pll)
+{
+ clk_hw_unregister(&pll->hw);
+
+ kfree(pll);
+}
diff --git a/drivers/clk/baikal-t1/ccu-pll.h b/drivers/clk/baikal-t1/ccu-pll.h
new file mode 100644
index 000000000000..76cd9132a219
--- /dev/null
+++ b/drivers/clk/baikal-t1/ccu-pll.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 CCU PLL interface driver
+ */
+#ifndef __CLK_BT1_CCU_PLL_H__
+#define __CLK_BT1_CCU_PLL_H__
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/bits.h>
+#include <linux/of.h>
+
+/*
+ * struct ccu_pll_init_data - CCU PLL initialization data
+ * @id: Clock private identifier.
+ * @name: Clocks name.
+ * @parent_name: Clocks parent name in a fw node.
+ * @base: PLL registers base address with respect to the sys_regs base.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @np: Pointer to the node describing the CCU PLLs.
+ * @flags: PLL clock flags.
+ */
+struct ccu_pll_init_data {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned int base;
+ struct regmap *sys_regs;
+ struct device_node *np;
+ unsigned long flags;
+};
+
+/*
+ * struct ccu_pll - CCU PLL descriptor
+ * @hw: clk_hw of the PLL.
+ * @id: Clock private identifier.
+ * @reg_ctl: PLL control register base.
+ * @reg_ctl1: PLL control1 register base.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @lock: PLL state change spin-lock.
+ */
+struct ccu_pll {
+ struct clk_hw hw;
+ unsigned int id;
+ unsigned int reg_ctl;
+ unsigned int reg_ctl1;
+ struct regmap *sys_regs;
+ spinlock_t lock;
+};
+#define to_ccu_pll(_hw) container_of(_hw, struct ccu_pll, hw)
+
+static inline struct clk_hw *ccu_pll_get_clk_hw(struct ccu_pll *pll)
+{
+ return pll ? &pll->hw : NULL;
+}
+
+struct ccu_pll *ccu_pll_hw_register(const struct ccu_pll_init_data *init);
+
+void ccu_pll_hw_unregister(struct ccu_pll *pll);
+
+#endif /* __CLK_BT1_CCU_PLL_H__ */
diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c
new file mode 100644
index 000000000000..f141fda12b09
--- /dev/null
+++ b/drivers/clk/baikal-t1/clk-ccu-div.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * Baikal-T1 CCU Dividers clock driver
+ */
+
+#define pr_fmt(fmt) "bt1-ccu-div: " fmt
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/reset-controller.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/ioport.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/bt1-ccu.h>
+#include <dt-bindings/reset/bt1-ccu.h>
+
+#include "ccu-div.h"
+
+#define CCU_AXI_MAIN_BASE 0x030
+#define CCU_AXI_DDR_BASE 0x034
+#define CCU_AXI_SATA_BASE 0x038
+#define CCU_AXI_GMAC0_BASE 0x03C
+#define CCU_AXI_GMAC1_BASE 0x040
+#define CCU_AXI_XGMAC_BASE 0x044
+#define CCU_AXI_PCIE_M_BASE 0x048
+#define CCU_AXI_PCIE_S_BASE 0x04C
+#define CCU_AXI_USB_BASE 0x050
+#define CCU_AXI_HWA_BASE 0x054
+#define CCU_AXI_SRAM_BASE 0x058
+
+#define CCU_SYS_SATA_REF_BASE 0x060
+#define CCU_SYS_APB_BASE 0x064
+#define CCU_SYS_GMAC0_BASE 0x068
+#define CCU_SYS_GMAC1_BASE 0x06C
+#define CCU_SYS_XGMAC_BASE 0x070
+#define CCU_SYS_USB_BASE 0x074
+#define CCU_SYS_PVT_BASE 0x078
+#define CCU_SYS_HWA_BASE 0x07C
+#define CCU_SYS_UART_BASE 0x084
+#define CCU_SYS_TIMER0_BASE 0x088
+#define CCU_SYS_TIMER1_BASE 0x08C
+#define CCU_SYS_TIMER2_BASE 0x090
+#define CCU_SYS_WDT_BASE 0x150
+
+#define CCU_DIV_VAR_INFO(_id, _name, _pname, _base, _width, _flags, _features) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .base = _base, \
+ .type = CCU_DIV_VAR, \
+ .width = _width, \
+ .flags = _flags, \
+ .features = _features \
+ }
+
+#define CCU_DIV_GATE_INFO(_id, _name, _pname, _base, _divider) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .base = _base, \
+ .type = CCU_DIV_GATE, \
+ .divider = _divider \
+ }
+
+#define CCU_DIV_FIXED_INFO(_id, _name, _pname, _divider) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .type = CCU_DIV_FIXED, \
+ .divider = _divider \
+ }
+
+#define CCU_DIV_RST_MAP(_rst_id, _clk_id) \
+ { \
+ .rst_id = _rst_id, \
+ .clk_id = _clk_id \
+ }
+
+struct ccu_div_info {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned int base;
+ enum ccu_div_type type;
+ union {
+ unsigned int width;
+ unsigned int divider;
+ };
+ unsigned long flags;
+ unsigned long features;
+};
+
+struct ccu_div_rst_map {
+ unsigned int rst_id;
+ unsigned int clk_id;
+};
+
+struct ccu_div_data {
+ struct device_node *np;
+ struct regmap *sys_regs;
+
+ unsigned int divs_num;
+ const struct ccu_div_info *divs_info;
+ struct ccu_div **divs;
+
+ unsigned int rst_num;
+ const struct ccu_div_rst_map *rst_map;
+ struct reset_controller_dev rcdev;
+};
+#define to_ccu_div_data(_rcdev) container_of(_rcdev, struct ccu_div_data, rcdev)
+
+/*
+ * AXI Main Interconnect (axi_main_clk) and DDR AXI-bus (axi_ddr_clk) clocks
+ * must be left enabled in any case, since former one is responsible for
+ * clocking a bus between CPU cores and the rest of the SoC components, while
+ * the later is clocking the AXI-bus between DDR controller and the Main
+ * Interconnect. So should any of these clocks get to be disabled, the system
+ * will literally stop working. That's why we marked them as critical.
+ */
+static const struct ccu_div_info axi_info[] = {
+ CCU_DIV_VAR_INFO(CCU_AXI_MAIN_CLK, "axi_main_clk", "pcie_clk",
+ CCU_AXI_MAIN_BASE, 4,
+ CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_DDR_CLK, "axi_ddr_clk", "sata_clk",
+ CCU_AXI_DDR_BASE, 4,
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_SATA_CLK, "axi_sata_clk", "sata_clk",
+ CCU_AXI_SATA_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_GMAC0_CLK, "axi_gmac0_clk", "eth_clk",
+ CCU_AXI_GMAC0_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_GMAC1_CLK, "axi_gmac1_clk", "eth_clk",
+ CCU_AXI_GMAC1_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_XGMAC_CLK, "axi_xgmac_clk", "eth_clk",
+ CCU_AXI_XGMAC_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_PCIE_M_CLK, "axi_pcie_m_clk", "pcie_clk",
+ CCU_AXI_PCIE_M_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_PCIE_S_CLK, "axi_pcie_s_clk", "pcie_clk",
+ CCU_AXI_PCIE_S_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_USB_CLK, "axi_usb_clk", "sata_clk",
+ CCU_AXI_USB_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_HWA_CLK, "axi_hwa_clk", "sata_clk",
+ CCU_AXI_HWA_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_SRAM_CLK, "axi_sram_clk", "eth_clk",
+ CCU_AXI_SRAM_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN)
+};
+
+static const struct ccu_div_rst_map axi_rst_map[] = {
+ CCU_DIV_RST_MAP(CCU_AXI_MAIN_RST, CCU_AXI_MAIN_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_DDR_RST, CCU_AXI_DDR_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_SATA_RST, CCU_AXI_SATA_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_GMAC0_RST, CCU_AXI_GMAC0_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_GMAC1_RST, CCU_AXI_GMAC1_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_XGMAC_RST, CCU_AXI_XGMAC_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_PCIE_M_RST, CCU_AXI_PCIE_M_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_PCIE_S_RST, CCU_AXI_PCIE_S_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_USB_RST, CCU_AXI_USB_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_HWA_RST, CCU_AXI_HWA_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_SRAM_RST, CCU_AXI_SRAM_CLK)
+};
+
+/*
+ * APB-bus clock is marked as critical since it's a main communication bus
+ * for the SoC devices registers IO-operations.
+ */
+static const struct ccu_div_info sys_info[] = {
+ CCU_DIV_VAR_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk",
+ "sata_clk", CCU_SYS_SATA_REF_BASE, 4,
+ CLK_SET_RATE_GATE,
+ CCU_DIV_SKIP_ONE | CCU_DIV_LOCK_SHIFTED |
+ CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_SYS_APB_CLK, "sys_apb_clk",
+ "pcie_clk", CCU_SYS_APB_BASE, 5,
+ CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_GATE_INFO(CCU_SYS_GMAC0_TX_CLK, "sys_gmac0_tx_clk",
+ "eth_clk", CCU_SYS_GMAC0_BASE, 5),
+ CCU_DIV_FIXED_INFO(CCU_SYS_GMAC0_PTP_CLK, "sys_gmac0_ptp_clk",
+ "eth_clk", 10),
+ CCU_DIV_GATE_INFO(CCU_SYS_GMAC1_TX_CLK, "sys_gmac1_tx_clk",
+ "eth_clk", CCU_SYS_GMAC1_BASE, 5),
+ CCU_DIV_FIXED_INFO(CCU_SYS_GMAC1_PTP_CLK, "sys_gmac1_ptp_clk",
+ "eth_clk", 10),
+ CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk",
+ "eth_clk", CCU_SYS_XGMAC_BASE, 8),
+ CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_PTP_CLK, "sys_xgmac_ptp_clk",
+ "eth_clk", 10),
+ CCU_DIV_GATE_INFO(CCU_SYS_USB_CLK, "sys_usb_clk",
+ "eth_clk", CCU_SYS_USB_BASE, 10),
+ CCU_DIV_VAR_INFO(CCU_SYS_PVT_CLK, "sys_pvt_clk",
+ "ref_clk", CCU_SYS_PVT_BASE, 5,
+ CLK_SET_RATE_GATE, 0),
+ CCU_DIV_VAR_INFO(CCU_SYS_HWA_CLK, "sys_hwa_clk",
+ "sata_clk", CCU_SYS_HWA_BASE, 4,
+ CLK_SET_RATE_GATE, 0),
+ CCU_DIV_VAR_INFO(CCU_SYS_UART_CLK, "sys_uart_clk",
+ "eth_clk", CCU_SYS_UART_BASE, 17,
+ CLK_SET_RATE_GATE, 0),
+ CCU_DIV_FIXED_INFO(CCU_SYS_I2C1_CLK, "sys_i2c1_clk",
+ "eth_clk", 10),
+ CCU_DIV_FIXED_INFO(CCU_SYS_I2C2_CLK, "sys_i2c2_clk",
+ "eth_clk", 10),
+ CCU_DIV_FIXED_INFO(CCU_SYS_GPIO_CLK, "sys_gpio_clk",
+ "ref_clk", 25),
+ CCU_DIV_VAR_INFO(CCU_SYS_TIMER0_CLK, "sys_timer0_clk",
+ "ref_clk", CCU_SYS_TIMER0_BASE, 17,
+ CLK_SET_RATE_GATE, 0),
+ CCU_DIV_VAR_INFO(CCU_SYS_TIMER1_CLK, "sys_timer1_clk",
+ "ref_clk", CCU_SYS_TIMER1_BASE, 17,
+ CLK_SET_RATE_GATE, 0),
+ CCU_DIV_VAR_INFO(CCU_SYS_TIMER2_CLK, "sys_timer2_clk",
+ "ref_clk", CCU_SYS_TIMER2_BASE, 17,
+ CLK_SET_RATE_GATE, 0),
+ CCU_DIV_VAR_INFO(CCU_SYS_WDT_CLK, "sys_wdt_clk",
+ "eth_clk", CCU_SYS_WDT_BASE, 17,
+ CLK_SET_RATE_GATE, CCU_DIV_SKIP_ONE_TO_THREE)
+};
+
+static const struct ccu_div_rst_map sys_rst_map[] = {
+ CCU_DIV_RST_MAP(CCU_SYS_SATA_REF_RST, CCU_SYS_SATA_REF_CLK),
+ CCU_DIV_RST_MAP(CCU_SYS_APB_RST, CCU_SYS_APB_CLK),
+};
+
+static struct ccu_div *ccu_div_find_desc(struct ccu_div_data *data,
+ unsigned int clk_id)
+{
+ struct ccu_div *div;
+ int idx;
+
+ for (idx = 0; idx < data->divs_num; ++idx) {
+ div = data->divs[idx];
+ if (div && div->id == clk_id)
+ return div;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int ccu_div_reset(struct reset_controller_dev *rcdev,
+ unsigned long rst_id)
+{
+ struct ccu_div_data *data = to_ccu_div_data(rcdev);
+ const struct ccu_div_rst_map *map;
+ struct ccu_div *div;
+ int idx, ret;
+
+ for (idx = 0, map = data->rst_map; idx < data->rst_num; ++idx, ++map) {
+ if (map->rst_id == rst_id)
+ break;
+ }
+ if (idx == data->rst_num) {
+ pr_err("Invalid reset ID %lu specified\n", rst_id);
+ return -EINVAL;
+ }
+
+ div = ccu_div_find_desc(data, map->clk_id);
+ if (IS_ERR(div)) {
+ pr_err("Invalid clock ID %d in mapping\n", map->clk_id);
+ return PTR_ERR(div);
+ }
+
+ ret = ccu_div_reset_domain(div);
+ if (ret) {
+ pr_err("Reset isn't supported by divider %s\n",
+ clk_hw_get_name(ccu_div_get_clk_hw(div)));
+ }
+
+ return ret;
+}
+
+static const struct reset_control_ops ccu_div_rst_ops = {
+ .reset = ccu_div_reset,
+};
+
+static struct ccu_div_data *ccu_div_create_data(struct device_node *np)
+{
+ struct ccu_div_data *data;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ data->np = np;
+ if (of_device_is_compatible(np, "baikal,bt1-ccu-axi")) {
+ data->divs_num = ARRAY_SIZE(axi_info);
+ data->divs_info = axi_info;
+ data->rst_num = ARRAY_SIZE(axi_rst_map);
+ data->rst_map = axi_rst_map;
+ } else if (of_device_is_compatible(np, "baikal,bt1-ccu-sys")) {
+ data->divs_num = ARRAY_SIZE(sys_info);
+ data->divs_info = sys_info;
+ data->rst_num = ARRAY_SIZE(sys_rst_map);
+ data->rst_map = sys_rst_map;
+ } else {
+ pr_err("Incompatible DT node '%s' specified\n",
+ of_node_full_name(np));
+ ret = -EINVAL;
+ goto err_kfree_data;
+ }
+
+ data->divs = kcalloc(data->divs_num, sizeof(*data->divs), GFP_KERNEL);
+ if (!data->divs) {
+ ret = -ENOMEM;
+ goto err_kfree_data;
+ }
+
+ return data;
+
+err_kfree_data:
+ kfree(data);
+
+ return ERR_PTR(ret);
+}
+
+static void ccu_div_free_data(struct ccu_div_data *data)
+{
+ kfree(data->divs);
+
+ kfree(data);
+}
+
+static int ccu_div_find_sys_regs(struct ccu_div_data *data)
+{
+ data->sys_regs = syscon_node_to_regmap(data->np->parent);
+ if (IS_ERR(data->sys_regs)) {
+ pr_err("Failed to find syscon regs for '%s'\n",
+ of_node_full_name(data->np));
+ return PTR_ERR(data->sys_regs);
+ }
+
+ return 0;
+}
+
+static struct clk_hw *ccu_div_of_clk_hw_get(struct of_phandle_args *clkspec,
+ void *priv)
+{
+ struct ccu_div_data *data = priv;
+ struct ccu_div *div;
+ unsigned int clk_id;
+
+ clk_id = clkspec->args[0];
+ div = ccu_div_find_desc(data, clk_id);
+ if (IS_ERR(div)) {
+ pr_info("Invalid clock ID %d specified\n", clk_id);
+ return ERR_CAST(div);
+ }
+
+ return ccu_div_get_clk_hw(div);
+}
+
+static int ccu_div_clk_register(struct ccu_div_data *data)
+{
+ int idx, ret;
+
+ for (idx = 0; idx < data->divs_num; ++idx) {
+ const struct ccu_div_info *info = &data->divs_info[idx];
+ struct ccu_div_init_data init = {0};
+
+ init.id = info->id;
+ init.name = info->name;
+ init.parent_name = info->parent_name;
+ init.np = data->np;
+ init.type = info->type;
+ init.flags = info->flags;
+ init.features = info->features;
+
+ if (init.type == CCU_DIV_VAR) {
+ init.base = info->base;
+ init.sys_regs = data->sys_regs;
+ init.width = info->width;
+ } else if (init.type == CCU_DIV_GATE) {
+ init.base = info->base;
+ init.sys_regs = data->sys_regs;
+ init.divider = info->divider;
+ } else {
+ init.divider = info->divider;
+ }
+
+ data->divs[idx] = ccu_div_hw_register(&init);
+ if (IS_ERR(data->divs[idx])) {
+ ret = PTR_ERR(data->divs[idx]);
+ pr_err("Couldn't register divider '%s' hw\n",
+ init.name);
+ goto err_hw_unregister;
+ }
+ }
+
+ ret = of_clk_add_hw_provider(data->np, ccu_div_of_clk_hw_get, data);
+ if (ret) {
+ pr_err("Couldn't register dividers '%s' clock provider\n",
+ of_node_full_name(data->np));
+ goto err_hw_unregister;
+ }
+
+ return 0;
+
+err_hw_unregister:
+ for (--idx; idx >= 0; --idx)
+ ccu_div_hw_unregister(data->divs[idx]);
+
+ return ret;
+}
+
+static void ccu_div_clk_unregister(struct ccu_div_data *data)
+{
+ int idx;
+
+ of_clk_del_provider(data->np);
+
+ for (idx = 0; idx < data->divs_num; ++idx)
+ ccu_div_hw_unregister(data->divs[idx]);
+}
+
+static int ccu_div_rst_register(struct ccu_div_data *data)
+{
+ int ret;
+
+ data->rcdev.ops = &ccu_div_rst_ops;
+ data->rcdev.of_node = data->np;
+ data->rcdev.nr_resets = data->rst_num;
+
+ ret = reset_controller_register(&data->rcdev);
+ if (ret)
+ pr_err("Couldn't register divider '%s' reset controller\n",
+ of_node_full_name(data->np));
+
+ return ret;
+}
+
+static void ccu_div_init(struct device_node *np)
+{
+ struct ccu_div_data *data;
+ int ret;
+
+ data = ccu_div_create_data(np);
+ if (IS_ERR(data))
+ return;
+
+ ret = ccu_div_find_sys_regs(data);
+ if (ret)
+ goto err_free_data;
+
+ ret = ccu_div_clk_register(data);
+ if (ret)
+ goto err_free_data;
+
+ ret = ccu_div_rst_register(data);
+ if (ret)
+ goto err_clk_unregister;
+
+ return;
+
+err_clk_unregister:
+ ccu_div_clk_unregister(data);
+
+err_free_data:
+ ccu_div_free_data(data);
+}
+
+CLK_OF_DECLARE(ccu_axi, "baikal,bt1-ccu-axi", ccu_div_init);
+CLK_OF_DECLARE(ccu_sys, "baikal,bt1-ccu-sys", ccu_div_init);
diff --git a/drivers/clk/baikal-t1/clk-ccu-pll.c b/drivers/clk/baikal-t1/clk-ccu-pll.c
new file mode 100644
index 000000000000..1eec8c0b8f50
--- /dev/null
+++ b/drivers/clk/baikal-t1/clk-ccu-pll.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * Baikal-T1 CCU PLL clocks driver
+ */
+
+#define pr_fmt(fmt) "bt1-ccu-pll: " fmt
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/bt1-ccu.h>
+
+#include "ccu-pll.h"
+
+#define CCU_CPU_PLL_BASE 0x000
+#define CCU_SATA_PLL_BASE 0x008
+#define CCU_DDR_PLL_BASE 0x010
+#define CCU_PCIE_PLL_BASE 0x018
+#define CCU_ETH_PLL_BASE 0x020
+
+#define CCU_PLL_INFO(_id, _name, _pname, _base, _flags) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .base = _base, \
+ .flags = _flags \
+ }
+
+#define CCU_PLL_NUM ARRAY_SIZE(pll_info)
+
+struct ccu_pll_info {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned int base;
+ unsigned long flags;
+};
+
+/*
+ * Mark as critical all PLLs except Ethernet one. CPU and DDR PLLs are sources
+ * of CPU cores and DDR controller reference clocks, due to which they
+ * obviously shouldn't be ever gated. SATA and PCIe PLLs are the parents of
+ * APB-bus and DDR controller AXI-bus clocks. If they are gated the system will
+ * be unusable.
+ */
+static const struct ccu_pll_info pll_info[] = {
+ CCU_PLL_INFO(CCU_CPU_PLL, "cpu_pll", "ref_clk", CCU_CPU_PLL_BASE,
+ CLK_IS_CRITICAL),
+ CCU_PLL_INFO(CCU_SATA_PLL, "sata_pll", "ref_clk", CCU_SATA_PLL_BASE,
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE),
+ CCU_PLL_INFO(CCU_DDR_PLL, "ddr_pll", "ref_clk", CCU_DDR_PLL_BASE,
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE),
+ CCU_PLL_INFO(CCU_PCIE_PLL, "pcie_pll", "ref_clk", CCU_PCIE_PLL_BASE,
+ CLK_IS_CRITICAL),
+ CCU_PLL_INFO(CCU_ETH_PLL, "eth_pll", "ref_clk", CCU_ETH_PLL_BASE,
+ CLK_SET_RATE_GATE)
+};
+
+struct ccu_pll_data {
+ struct device_node *np;
+ struct regmap *sys_regs;
+ struct ccu_pll *plls[CCU_PLL_NUM];
+};
+
+static struct ccu_pll *ccu_pll_find_desc(struct ccu_pll_data *data,
+ unsigned int clk_id)
+{
+ struct ccu_pll *pll;
+ int idx;
+
+ for (idx = 0; idx < CCU_PLL_NUM; ++idx) {
+ pll = data->plls[idx];
+ if (pll && pll->id == clk_id)
+ return pll;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct ccu_pll_data *ccu_pll_create_data(struct device_node *np)
+{
+ struct ccu_pll_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ data->np = np;
+
+ return data;
+}
+
+static void ccu_pll_free_data(struct ccu_pll_data *data)
+{
+ kfree(data);
+}
+
+static int ccu_pll_find_sys_regs(struct ccu_pll_data *data)
+{
+ data->sys_regs = syscon_node_to_regmap(data->np->parent);
+ if (IS_ERR(data->sys_regs)) {
+ pr_err("Failed to find syscon regs for '%s'\n",
+ of_node_full_name(data->np));
+ return PTR_ERR(data->sys_regs);
+ }
+
+ return 0;
+}
+
+static struct clk_hw *ccu_pll_of_clk_hw_get(struct of_phandle_args *clkspec,
+ void *priv)
+{
+ struct ccu_pll_data *data = priv;
+ struct ccu_pll *pll;
+ unsigned int clk_id;
+
+ clk_id = clkspec->args[0];
+ pll = ccu_pll_find_desc(data, clk_id);
+ if (IS_ERR(pll)) {
+ pr_info("Invalid PLL clock ID %d specified\n", clk_id);
+ return ERR_CAST(pll);
+ }
+
+ return ccu_pll_get_clk_hw(pll);
+}
+
+static int ccu_pll_clk_register(struct ccu_pll_data *data)
+{
+ int idx, ret;
+
+ for (idx = 0; idx < CCU_PLL_NUM; ++idx) {
+ const struct ccu_pll_info *info = &pll_info[idx];
+ struct ccu_pll_init_data init = {0};
+
+ init.id = info->id;
+ init.name = info->name;
+ init.parent_name = info->parent_name;
+ init.base = info->base;
+ init.sys_regs = data->sys_regs;
+ init.np = data->np;
+ init.flags = info->flags;
+
+ data->plls[idx] = ccu_pll_hw_register(&init);
+ if (IS_ERR(data->plls[idx])) {
+ ret = PTR_ERR(data->plls[idx]);
+ pr_err("Couldn't register PLL hw '%s'\n",
+ init.name);
+ goto err_hw_unregister;
+ }
+ }
+
+ ret = of_clk_add_hw_provider(data->np, ccu_pll_of_clk_hw_get, data);
+ if (ret) {
+ pr_err("Couldn't register PLL provider of '%s'\n",
+ of_node_full_name(data->np));
+ goto err_hw_unregister;
+ }
+
+ return 0;
+
+err_hw_unregister:
+ for (--idx; idx >= 0; --idx)
+ ccu_pll_hw_unregister(data->plls[idx]);
+
+ return ret;
+}
+
+static __init void ccu_pll_init(struct device_node *np)
+{
+ struct ccu_pll_data *data;
+ int ret;
+
+ data = ccu_pll_create_data(np);
+ if (IS_ERR(data))
+ return;
+
+ ret = ccu_pll_find_sys_regs(data);
+ if (ret)
+ goto err_free_data;
+
+ ret = ccu_pll_clk_register(data);
+ if (ret)
+ goto err_free_data;
+
+ return;
+
+err_free_data:
+ ccu_pll_free_data(data);
+}
+CLK_OF_DECLARE(ccu_pll, "baikal,bt1-ccu-pll", ccu_pll_init);
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index ded13ccf768e..6bb7efa12037 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -396,8 +396,8 @@ out:
}
static void bcm2835_debugfs_regset(struct bcm2835_cprman *cprman, u32 base,
- struct debugfs_reg32 *regs, size_t nregs,
- struct dentry *dentry)
+ const struct debugfs_reg32 *regs,
+ size_t nregs, struct dentry *dentry)
{
struct debugfs_regset32 *regset;
@@ -1240,7 +1240,7 @@ static u8 bcm2835_clock_get_parent(struct clk_hw *hw)
return (src & CM_SRC_MASK) >> CM_SRC_SHIFT;
}
-static struct debugfs_reg32 bcm2835_debugfs_clock_reg32[] = {
+static const struct debugfs_reg32 bcm2835_debugfs_clock_reg32[] = {
{
.name = "ctl",
.offset = 0,
@@ -1296,8 +1296,9 @@ static const struct clk_ops bcm2835_vpu_clock_clk_ops = {
};
static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
- const struct bcm2835_pll_data *data)
+ const void *data)
{
+ const struct bcm2835_pll_data *pll_data = data;
struct bcm2835_pll *pll;
struct clk_init_data init;
int ret;
@@ -1307,7 +1308,7 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
/* All of the PLLs derive from the external oscillator. */
init.parent_names = &cprman->real_parent_names[0];
init.num_parents = 1;
- init.name = data->name;
+ init.name = pll_data->name;
init.ops = &bcm2835_pll_clk_ops;
init.flags = CLK_IGNORE_UNUSED;
@@ -1316,7 +1317,7 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
return NULL;
pll->cprman = cprman;
- pll->data = data;
+ pll->data = pll_data;
pll->hw.init = &init;
ret = devm_clk_hw_register(cprman->dev, &pll->hw);
@@ -1327,35 +1328,36 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
static struct clk_hw *
bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
- const struct bcm2835_pll_divider_data *data)
+ const void *data)
{
+ const struct bcm2835_pll_divider_data *divider_data = data;
struct bcm2835_pll_divider *divider;
struct clk_init_data init;
const char *divider_name;
int ret;
- if (data->fixed_divider != 1) {
+ if (divider_data->fixed_divider != 1) {
divider_name = devm_kasprintf(cprman->dev, GFP_KERNEL,
- "%s_prediv", data->name);
+ "%s_prediv", divider_data->name);
if (!divider_name)
return NULL;
} else {
- divider_name = data->name;
+ divider_name = divider_data->name;
}
memset(&init, 0, sizeof(init));
- init.parent_names = &data->source_pll;
+ init.parent_names = &divider_data->source_pll;
init.num_parents = 1;
init.name = divider_name;
init.ops = &bcm2835_pll_divider_clk_ops;
- init.flags = data->flags | CLK_IGNORE_UNUSED;
+ init.flags = divider_data->flags | CLK_IGNORE_UNUSED;
divider = devm_kzalloc(cprman->dev, sizeof(*divider), GFP_KERNEL);
if (!divider)
return NULL;
- divider->div.reg = cprman->regs + data->a2w_reg;
+ divider->div.reg = cprman->regs + divider_data->a2w_reg;
divider->div.shift = A2W_PLL_DIV_SHIFT;
divider->div.width = A2W_PLL_DIV_BITS;
divider->div.flags = CLK_DIVIDER_MAX_AT_ZERO;
@@ -1364,7 +1366,7 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
divider->div.table = NULL;
divider->cprman = cprman;
- divider->data = data;
+ divider->data = divider_data;
ret = devm_clk_hw_register(cprman->dev, &divider->div.hw);
if (ret)
@@ -1374,20 +1376,22 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
* PLLH's channels have a fixed divide by 10 afterwards, which
* is what our consumers are actually using.
*/
- if (data->fixed_divider != 1) {
- return clk_hw_register_fixed_factor(cprman->dev, data->name,
+ if (divider_data->fixed_divider != 1) {
+ return clk_hw_register_fixed_factor(cprman->dev,
+ divider_data->name,
divider_name,
CLK_SET_RATE_PARENT,
1,
- data->fixed_divider);
+ divider_data->fixed_divider);
}
return &divider->div.hw;
}
static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
- const struct bcm2835_clock_data *data)
+ const void *data)
{
+ const struct bcm2835_clock_data *clock_data = data;
struct bcm2835_clock *clock;
struct clk_init_data init;
const char *parents[1 << CM_SRC_BITS];
@@ -1398,8 +1402,8 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
* Replace our strings referencing parent clocks with the
* actual clock-output-name of the parent.
*/
- for (i = 0; i < data->num_mux_parents; i++) {
- parents[i] = data->parents[i];
+ for (i = 0; i < clock_data->num_mux_parents; i++) {
+ parents[i] = clock_data->parents[i];
ret = match_string(cprman_parent_names,
ARRAY_SIZE(cprman_parent_names),
@@ -1410,18 +1414,18 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
memset(&init, 0, sizeof(init));
init.parent_names = parents;
- init.num_parents = data->num_mux_parents;
- init.name = data->name;
- init.flags = data->flags | CLK_IGNORE_UNUSED;
+ init.num_parents = clock_data->num_mux_parents;
+ init.name = clock_data->name;
+ init.flags = clock_data->flags | CLK_IGNORE_UNUSED;
/*
* Pass the CLK_SET_RATE_PARENT flag if we are allowed to propagate
* rate changes on at least of the parents.
*/
- if (data->set_rate_parent)
+ if (clock_data->set_rate_parent)
init.flags |= CLK_SET_RATE_PARENT;
- if (data->is_vpu_clock) {
+ if (clock_data->is_vpu_clock) {
init.ops = &bcm2835_vpu_clock_clk_ops;
} else {
init.ops = &bcm2835_clock_clk_ops;
@@ -1430,7 +1434,7 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
/* If the clock wasn't actually enabled at boot, it's not
* critical.
*/
- if (!(cprman_read(cprman, data->ctl_reg) & CM_ENABLE))
+ if (!(cprman_read(cprman, clock_data->ctl_reg) & CM_ENABLE))
init.flags &= ~CLK_IS_CRITICAL;
}
@@ -1439,7 +1443,7 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
return NULL;
clock->cprman = cprman;
- clock->data = data;
+ clock->data = clock_data;
clock->hw.init = &init;
ret = devm_clk_hw_register(cprman->dev, &clock->hw);
@@ -1448,25 +1452,27 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
return &clock->hw;
}
-static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman,
- const struct bcm2835_gate_data *data)
+static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman,
+ const void *data)
{
- return clk_register_gate(cprman->dev, data->name, data->parent,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
- cprman->regs + data->ctl_reg,
- CM_GATE_BIT, 0, &cprman->regs_lock);
+ const struct bcm2835_gate_data *gate_data = data;
+
+ return clk_hw_register_gate(cprman->dev, gate_data->name,
+ gate_data->parent,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ cprman->regs + gate_data->ctl_reg,
+ CM_GATE_BIT, 0, &cprman->regs_lock);
}
-typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman,
- const void *data);
struct bcm2835_clk_desc {
- bcm2835_clk_register clk_register;
+ struct clk_hw *(*clk_register)(struct bcm2835_cprman *cprman,
+ const void *data);
unsigned int supported;
const void *data;
};
/* assignment helper macros for different clock types */
-#define _REGISTER(f, s, ...) { .clk_register = (bcm2835_clk_register)f, \
+#define _REGISTER(f, s, ...) { .clk_register = f, \
.supported = s, \
.data = __VA_ARGS__ }
#define REGISTER_PLL(s, ...) _REGISTER(&bcm2835_register_pll, \
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 392d01705b97..99afc949925f 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -642,14 +642,22 @@ static const u32 ast2600_a0_axi_ahb_div_table[] = {
2, 2, 3, 5,
};
-static const u32 ast2600_a1_axi_ahb_div_table[] = {
- 4, 6, 2, 4,
+static const u32 ast2600_a1_axi_ahb_div0_tbl[] = {
+ 3, 2, 3, 4,
+};
+
+static const u32 ast2600_a1_axi_ahb_div1_tbl[] = {
+ 3, 4, 6, 8,
+};
+
+static const u32 ast2600_a1_axi_ahb200_tbl[] = {
+ 3, 4, 3, 4, 2, 2, 2, 2,
};
static void __init aspeed_g6_cc(struct regmap *map)
{
struct clk_hw *hw;
- u32 val, div, chip_id, axi_div, ahb_div;
+ u32 val, div, divbits, chip_id, axi_div, ahb_div;
clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000);
@@ -679,11 +687,22 @@ static void __init aspeed_g6_cc(struct regmap *map)
else
axi_div = 2;
+ divbits = (val >> 11) & 0x3;
regmap_read(map, ASPEED_G6_SILICON_REV, &chip_id);
- if (chip_id & BIT(16))
- ahb_div = ast2600_a1_axi_ahb_div_table[(val >> 11) & 0x3];
- else
+ if (chip_id & BIT(16)) {
+ if (!divbits) {
+ ahb_div = ast2600_a1_axi_ahb200_tbl[(val >> 8) & 0x3];
+ if (val & BIT(16))
+ ahb_div *= 2;
+ } else {
+ if (val & BIT(16))
+ ahb_div = ast2600_a1_axi_ahb_div1_tbl[divbits];
+ else
+ ahb_div = ast2600_a1_axi_ahb_div0_tbl[divbits];
+ }
+ } else {
ahb_div = ast2600_a0_axi_ahb_div_table[(val >> 11) & 0x3];
+ }
hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, axi_div * ahb_div);
aspeed_g6_clk_data->hws[ASPEED_CLK_AHB] = hw;
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index 97d1e8c35b71..b4f8852201cb 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -53,35 +53,38 @@ struct hsdk_pll_cfg {
u32 fbdiv;
u32 odiv;
u32 band;
+ u32 bypass;
};
static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
- { 100000000, 0, 11, 3, 0 },
- { 133000000, 0, 15, 3, 0 },
- { 200000000, 1, 47, 3, 0 },
- { 233000000, 1, 27, 2, 0 },
- { 300000000, 1, 35, 2, 0 },
- { 333000000, 1, 39, 2, 0 },
- { 400000000, 1, 47, 2, 0 },
- { 500000000, 0, 14, 1, 0 },
- { 600000000, 0, 17, 1, 0 },
- { 700000000, 0, 20, 1, 0 },
- { 800000000, 0, 23, 1, 0 },
- { 900000000, 1, 26, 0, 0 },
- { 1000000000, 1, 29, 0, 0 },
- { 1100000000, 1, 32, 0, 0 },
- { 1200000000, 1, 35, 0, 0 },
- { 1300000000, 1, 38, 0, 0 },
- { 1400000000, 1, 41, 0, 0 },
- { 1500000000, 1, 44, 0, 0 },
- { 1600000000, 1, 47, 0, 0 },
+ { 100000000, 0, 11, 3, 0, 0 },
+ { 133000000, 0, 15, 3, 0, 0 },
+ { 200000000, 1, 47, 3, 0, 0 },
+ { 233000000, 1, 27, 2, 0, 0 },
+ { 300000000, 1, 35, 2, 0, 0 },
+ { 333000000, 1, 39, 2, 0, 0 },
+ { 400000000, 1, 47, 2, 0, 0 },
+ { 500000000, 0, 14, 1, 0, 0 },
+ { 600000000, 0, 17, 1, 0, 0 },
+ { 700000000, 0, 20, 1, 0, 0 },
+ { 800000000, 0, 23, 1, 0, 0 },
+ { 900000000, 1, 26, 0, 0, 0 },
+ { 1000000000, 1, 29, 0, 0, 0 },
+ { 1100000000, 1, 32, 0, 0, 0 },
+ { 1200000000, 1, 35, 0, 0, 0 },
+ { 1300000000, 1, 38, 0, 0, 0 },
+ { 1400000000, 1, 41, 0, 0, 0 },
+ { 1500000000, 1, 44, 0, 0, 0 },
+ { 1600000000, 1, 47, 0, 0, 0 },
{}
};
static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
- { 297000000, 0, 21, 2, 0 },
- { 540000000, 0, 19, 1, 0 },
- { 594000000, 0, 21, 1, 0 },
+ { 27000000, 0, 0, 0, 0, 1 },
+ { 148500000, 0, 21, 3, 0, 0 },
+ { 297000000, 0, 21, 2, 0, 0 },
+ { 540000000, 0, 19, 1, 0, 0 },
+ { 594000000, 0, 21, 1, 0, 0 },
{}
};
@@ -134,11 +137,16 @@ static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
{
u32 val = 0;
- /* Powerdown and Bypass bits should be cleared */
- val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
- val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
- val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
- val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
+ if (cfg->bypass) {
+ val = hsdk_pll_read(clk, CGU_PLL_CTRL);
+ val |= CGU_PLL_CTRL_BYPASS;
+ } else {
+ /* Powerdown and Bypass bits should be cleared */
+ val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
+ val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
+ val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
+ val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
+ }
dev_dbg(clk->dev, "write configuration: %#x\n", val);
@@ -172,14 +180,14 @@ static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
dev_dbg(clk->dev, "current configuration: %#x\n", val);
- /* Check if PLL is disabled */
- if (val & CGU_PLL_CTRL_PD)
- return 0;
-
/* Check if PLL is bypassed */
if (val & CGU_PLL_CTRL_BYPASS)
return parent_rate;
+ /* Check if PLL is disabled */
+ if (val & CGU_PLL_CTRL_PD)
+ return 0;
+
/* input divider = reg.idiv + 1 */
idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
/* fb divider = 2*(reg.fbdiv + 1) */
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index d5946f7486d6..374afcab89af 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -95,6 +95,7 @@ struct clockgen {
};
static struct clockgen clockgen;
+static bool add_cpufreq_dev __initdata;
static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
{
@@ -1019,7 +1020,7 @@ static void __init create_muxes(struct clockgen *cg)
}
}
-static void __init clockgen_init(struct device_node *np);
+static void __init _clockgen_init(struct device_node *np, bool legacy);
/*
* Legacy nodes may get probed before the parent clockgen node.
@@ -1030,7 +1031,7 @@ static void __init clockgen_init(struct device_node *np);
static void __init legacy_init_clockgen(struct device_node *np)
{
if (!clockgen.node)
- clockgen_init(of_get_parent(np));
+ _clockgen_init(of_get_parent(np), true);
}
/* Legacy node */
@@ -1447,7 +1448,7 @@ static bool __init has_erratum_a4510(void)
}
#endif
-static void __init clockgen_init(struct device_node *np)
+static void __init _clockgen_init(struct device_node *np, bool legacy)
{
int i, ret;
bool is_old_ls1021a = false;
@@ -1516,12 +1517,35 @@ static void __init clockgen_init(struct device_node *np)
__func__, np, ret);
}
+ /* Don't create cpufreq device for legacy clockgen blocks */
+ add_cpufreq_dev = !legacy;
+
return;
err:
iounmap(clockgen.regs);
clockgen.regs = NULL;
}
+static void __init clockgen_init(struct device_node *np)
+{
+ _clockgen_init(np, false);
+}
+
+static int __init clockgen_cpufreq_init(void)
+{
+ struct platform_device *pdev;
+
+ if (add_cpufreq_dev) {
+ pdev = platform_device_register_simple("qoriq-cpufreq", -1,
+ NULL, 0);
+ if (IS_ERR(pdev))
+ pr_err("Couldn't register qoriq-cpufreq err=%ld\n",
+ PTR_ERR(pdev));
+ }
+ return 0;
+}
+device_initcall(clockgen_cpufreq_init);
+
CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init);
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 3c228b018116..3d7acab9d280 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -1,8 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Driver for Silicon Labs Si5341/Si5340 Clock generator
+ * Driver for Silicon Labs Si5340, Si5341, Si5342, Si5344 and Si5345
* Copyright (C) 2019 Topic Embedded Products
* Author: Mike Looijmans <mike.looijmans@topic.nl>
+ *
+ * The Si5341 has 10 outputs and 5 synthesizers.
+ * The Si5340 is a smaller version of the Si5341 with only 4 outputs.
+ * The Si5345 is similar to the Si5341, with the addition of fractional input
+ * dividers and automatic input selection.
+ * The Si5342 and Si5344 are smaller versions of the Si5345.
*/
#include <linux/clk.h>
@@ -18,11 +24,17 @@
#define SI5341_NUM_INPUTS 4
-#define SI5341_MAX_NUM_OUTPUTS 10
#define SI5340_MAX_NUM_OUTPUTS 4
+#define SI5341_MAX_NUM_OUTPUTS 10
+#define SI5342_MAX_NUM_OUTPUTS 2
+#define SI5344_MAX_NUM_OUTPUTS 4
+#define SI5345_MAX_NUM_OUTPUTS 10
-#define SI5341_NUM_SYNTH 5
#define SI5340_NUM_SYNTH 4
+#define SI5341_NUM_SYNTH 5
+#define SI5342_NUM_SYNTH 2
+#define SI5344_NUM_SYNTH 4
+#define SI5345_NUM_SYNTH 5
/* Range of the synthesizer fractional divider */
#define SI5341_SYNTH_N_MIN 10
@@ -65,6 +77,7 @@ struct clk_si5341 {
u64 freq_vco; /* 13500–14256 MHz */
u8 num_outputs;
u8 num_synth;
+ u16 chip_id;
};
#define to_clk_si5341(_hw) container_of(_hw, struct clk_si5341, hw)
@@ -142,6 +155,7 @@ static const char * const si5341_input_clock_names[] = {
};
/* Output configuration registers 0..9 are not quite logically organized */
+/* Also for si5345 */
static const u16 si5341_reg_output_offset[] = {
0x0108,
0x010D,
@@ -155,6 +169,7 @@ static const u16 si5341_reg_output_offset[] = {
0x013A,
};
+/* for si5340, si5342 and si5344 */
static const u16 si5340_reg_output_offset[] = {
0x0112,
0x0117,
@@ -974,12 +989,32 @@ static int si5341_probe_chip_id(struct clk_si5341 *data)
data->reg_output_offset = si5341_reg_output_offset;
data->reg_rdiv_offset = si5341_reg_rdiv_offset;
break;
+ case 0x5342:
+ data->num_outputs = SI5342_MAX_NUM_OUTPUTS;
+ data->num_synth = SI5342_NUM_SYNTH;
+ data->reg_output_offset = si5340_reg_output_offset;
+ data->reg_rdiv_offset = si5340_reg_rdiv_offset;
+ break;
+ case 0x5344:
+ data->num_outputs = SI5344_MAX_NUM_OUTPUTS;
+ data->num_synth = SI5344_NUM_SYNTH;
+ data->reg_output_offset = si5340_reg_output_offset;
+ data->reg_rdiv_offset = si5340_reg_rdiv_offset;
+ break;
+ case 0x5345:
+ data->num_outputs = SI5345_MAX_NUM_OUTPUTS;
+ data->num_synth = SI5345_NUM_SYNTH;
+ data->reg_output_offset = si5341_reg_output_offset;
+ data->reg_rdiv_offset = si5341_reg_rdiv_offset;
+ break;
default:
dev_err(&data->i2c_client->dev, "Model '%x' not supported\n",
model);
return -EINVAL;
}
+ data->chip_id = model;
+
return 0;
}
@@ -1054,6 +1089,11 @@ static const struct si5341_reg_default si5341_preamble[] = {
{ 0x0B4E, 0x1A },
};
+static const struct si5341_reg_default si5345_preamble[] = {
+ { 0x0B25, 0x00 },
+ { 0x0540, 0x01 },
+};
+
static int si5341_send_preamble(struct clk_si5341 *data)
{
int res;
@@ -1068,8 +1108,14 @@ static int si5341_send_preamble(struct clk_si5341 *data)
res = regmap_write(data->regmap, 0xB24, revision < 2 ? 0xD8 : 0xC0);
if (res < 0)
return res;
- res = si5341_write_multiple(data,
- si5341_preamble, ARRAY_SIZE(si5341_preamble));
+
+ /* The si5342..si5345 require a different preamble */
+ if (data->chip_id > 0x5341)
+ res = si5341_write_multiple(data,
+ si5345_preamble, ARRAY_SIZE(si5345_preamble));
+ else
+ res = si5341_write_multiple(data,
+ si5341_preamble, ARRAY_SIZE(si5341_preamble));
if (res < 0)
return res;
@@ -1095,6 +1141,13 @@ static int si5341_finalize_defaults(struct clk_si5341 *data)
if (res < 0)
return res;
+ /* The si5342..si5345 have an additional post-amble */
+ if (data->chip_id > 0x5341) {
+ res = regmap_write(data->regmap, 0x540, 0x0);
+ if (res < 0)
+ return res;
+ }
+
/* Datasheet does not explain these nameless registers */
res = regmap_write(data->regmap, 0xB24, revision < 2 ? 0xDB : 0xC3);
if (res < 0)
@@ -1499,6 +1552,9 @@ static int si5341_probe(struct i2c_client *client,
static const struct i2c_device_id si5341_id[] = {
{ "si5340", 0 },
{ "si5341", 1 },
+ { "si5342", 2 },
+ { "si5344", 4 },
+ { "si5345", 5 },
{ }
};
MODULE_DEVICE_TABLE(i2c, si5341_id);
@@ -1506,6 +1562,9 @@ MODULE_DEVICE_TABLE(i2c, si5341_id);
static const struct of_device_id clk_si5341_of_match[] = {
{ .compatible = "silabs,si5340" },
{ .compatible = "silabs,si5341" },
+ { .compatible = "silabs,si5342" },
+ { .compatible = "silabs,si5344" },
+ { .compatible = "silabs,si5345" },
{ }
};
MODULE_DEVICE_TABLE(of, clk_si5341_of_match);
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 24fef51fbcb5..fa96659f8023 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -124,6 +124,7 @@ enum vc5_model {
IDT_VC5_5P49V5933,
IDT_VC5_5P49V5935,
IDT_VC6_5P49V6901,
+ IDT_VC6_5P49V6965,
};
/* Structure to describe features of a particular VC5 model */
@@ -683,6 +684,7 @@ static int vc5_map_index_to_output(const enum vc5_model model,
case IDT_VC5_5P49V5925:
case IDT_VC5_5P49V5935:
case IDT_VC6_5P49V6901:
+ case IDT_VC6_5P49V6965:
default:
return n;
}
@@ -956,12 +958,20 @@ static const struct vc5_chip_info idt_5p49v6901_info = {
.flags = VC5_HAS_PFD_FREQ_DBL,
};
+static const struct vc5_chip_info idt_5p49v6965_info = {
+ .model = IDT_VC6_5P49V6965,
+ .clk_fod_cnt = 4,
+ .clk_out_cnt = 5,
+ .flags = 0,
+};
+
static const struct i2c_device_id vc5_id[] = {
{ "5p49v5923", .driver_data = IDT_VC5_5P49V5923 },
{ "5p49v5925", .driver_data = IDT_VC5_5P49V5925 },
{ "5p49v5933", .driver_data = IDT_VC5_5P49V5933 },
{ "5p49v5935", .driver_data = IDT_VC5_5P49V5935 },
{ "5p49v6901", .driver_data = IDT_VC6_5P49V6901 },
+ { "5p49v6965", .driver_data = IDT_VC6_5P49V6965 },
{ }
};
MODULE_DEVICE_TABLE(i2c, vc5_id);
@@ -972,6 +982,7 @@ static const struct of_device_id clk_vc5_of_match[] = {
{ .compatible = "idt,5p49v5933", .data = &idt_5p49v5933_info },
{ .compatible = "idt,5p49v5935", .data = &idt_5p49v5935_info },
{ .compatible = "idt,5p49v6901", .data = &idt_5p49v6901_info },
+ { .compatible = "idt,5p49v6965", .data = &idt_5p49v6965_info },
{ },
};
MODULE_DEVICE_TABLE(of, clk_vc5_of_match);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 2dfb30b963c4..3f588ed06ce3 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -114,7 +114,11 @@ static int clk_pm_runtime_get(struct clk_core *core)
return 0;
ret = pm_runtime_get_sync(core->dev);
- return ret < 0 ? ret : 0;
+ if (ret < 0) {
+ pm_runtime_put_noidle(core->dev);
+ return ret;
+ }
+ return 0;
}
static void clk_pm_runtime_put(struct clk_core *core)
@@ -3295,10 +3299,6 @@ static int __init clk_debug_init(void)
late_initcall(clk_debug_init);
#else
static inline void clk_debug_register(struct clk_core *core) { }
-static inline void clk_debug_reparent(struct clk_core *core,
- struct clk_core *new_parent)
-{
-}
static inline void clk_debug_unregister(struct clk_core *core)
{
}
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 01eadee88d66..db0253fa3d64 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -10,25 +10,25 @@ config MXC_CLK_SCU
config CLK_IMX8MM
bool "IMX8MM CCM Clock Driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
help
Build the driver for i.MX8MM CCM Clock Driver
config CLK_IMX8MN
bool "IMX8MN CCM Clock Driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
help
Build the driver for i.MX8MN CCM Clock Driver
config CLK_IMX8MP
bool "IMX8MP CCM Clock Driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
help
Build the driver for i.MX8MP CCM Clock Driver
config CLK_IMX8MQ
bool "IMX8MQ CCM Clock Driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
help
Build the driver for i.MX8MQ CCM Clock Driver
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 99773519b5a5..d2b5af826f2c 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -124,6 +124,52 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = {
.set_rate = imx8m_clk_composite_divider_set_rate,
};
+static u8 imx8m_clk_composite_mux_get_parent(struct clk_hw *hw)
+{
+ return clk_mux_ops.get_parent(hw);
+}
+
+static int imx8m_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
+ unsigned long flags = 0;
+ u32 reg;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+
+ reg = readl(mux->reg);
+ reg &= ~(mux->mask << mux->shift);
+ val = val << mux->shift;
+ reg |= val;
+ /*
+ * write twice to make sure non-target interface
+ * SEL_A/B point the same clk input.
+ */
+ writel(reg, mux->reg);
+ writel(reg, mux->reg);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return 0;
+}
+
+static int
+imx8m_clk_composite_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return clk_mux_ops.determine_rate(hw, req);
+}
+
+
+static const struct clk_ops imx8m_clk_composite_mux_ops = {
+ .get_parent = imx8m_clk_composite_mux_get_parent,
+ .set_parent = imx8m_clk_composite_mux_set_parent,
+ .determine_rate = imx8m_clk_composite_mux_determine_rate,
+};
+
struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
const char * const *parent_names,
int num_parents, void __iomem *reg,
@@ -136,6 +182,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
struct clk_gate *gate = NULL;
struct clk_mux *mux = NULL;
const struct clk_ops *divider_ops;
+ const struct clk_ops *mux_ops;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -157,10 +204,17 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
div->shift = PCG_DIV_SHIFT;
div->width = PCG_CORE_DIV_WIDTH;
divider_ops = &clk_divider_ops;
+ mux_ops = &imx8m_clk_composite_mux_ops;
+ } else if (composite_flags & IMX_COMPOSITE_BUS) {
+ div->shift = PCG_PREDIV_SHIFT;
+ div->width = PCG_PREDIV_WIDTH;
+ divider_ops = &imx8m_clk_composite_divider_ops;
+ mux_ops = &imx8m_clk_composite_mux_ops;
} else {
div->shift = PCG_PREDIV_SHIFT;
div->width = PCG_PREDIV_WIDTH;
divider_ops = &imx8m_clk_composite_divider_ops;
+ mux_ops = &clk_mux_ops;
}
div->lock = &imx_ccm_lock;
@@ -176,7 +230,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
gate->lock = &imx_ccm_lock;
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
- mux_hw, &clk_mux_ops, div_hw,
+ mux_hw, mux_ops, div_hw,
divider_ops, gate_hw, &clk_gate_ops, flags);
if (IS_ERR(hw))
goto fail;
diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c
index ce0060e8873e..b87ab3c3ba1e 100644
--- a/drivers/clk/imx/clk-gate2.c
+++ b/drivers/clk/imx/clk-gate2.c
@@ -41,21 +41,26 @@ static int clk_gate2_enable(struct clk_hw *hw)
struct clk_gate2 *gate = to_clk_gate2(hw);
u32 reg;
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(gate->lock, flags);
if (gate->share_count && (*gate->share_count)++ > 0)
goto out;
- reg = readl(gate->reg);
- reg &= ~(3 << gate->bit_idx);
- reg |= gate->cgr_val << gate->bit_idx;
- writel(reg, gate->reg);
+ if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) {
+ ret = clk_gate_ops.enable(hw);
+ } else {
+ reg = readl(gate->reg);
+ reg &= ~(3 << gate->bit_idx);
+ reg |= gate->cgr_val << gate->bit_idx;
+ writel(reg, gate->reg);
+ }
out:
spin_unlock_irqrestore(gate->lock, flags);
- return 0;
+ return ret;
}
static void clk_gate2_disable(struct clk_hw *hw)
@@ -73,9 +78,13 @@ static void clk_gate2_disable(struct clk_hw *hw)
goto out;
}
- reg = readl(gate->reg);
- reg &= ~(3 << gate->bit_idx);
- writel(reg, gate->reg);
+ if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) {
+ clk_gate_ops.disable(hw);
+ } else {
+ reg = readl(gate->reg);
+ reg &= ~(3 << gate->bit_idx);
+ writel(reg, gate->reg);
+ }
out:
spin_unlock_irqrestore(gate->lock, flags);
@@ -95,6 +104,9 @@ static int clk_gate2_is_enabled(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
+ if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT)
+ return clk_gate_ops.is_enabled(hw);
+
return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx);
}
@@ -104,6 +116,9 @@ static void clk_gate2_disable_unused(struct clk_hw *hw)
unsigned long flags;
u32 reg;
+ if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT)
+ return;
+
spin_lock_irqsave(gate->lock, flags);
if (!gate->share_count || *gate->share_count == 0) {
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index dafc8806b03e..5dbb6a937732 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -503,7 +503,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_prepare_enable(hws[IMX6UL_CLK_USBPHY2_GATE]->clk);
}
- clk_set_parent(hws[IMX6UL_CLK_CAN_SEL]->clk, hws[IMX6UL_CLK_PLL3_60M]->clk);
+ clk_set_parent(hws[IMX6UL_CLK_CAN_SEL]->clk, hws[IMX6UL_CLK_PLL3_80M]->clk);
if (clk_on_imx6ul())
clk_set_parent(hws[IMX6UL_CLK_SIM_PRE_SEL]->clk, hws[IMX6UL_CLK_PLL3_USB_OTG]->clk);
else if (clk_on_imx6ull())
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 3710aa0dee9b..634c0b6636b0 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -29,7 +29,7 @@ static const char * const ddr_sels[] = { "apll_pfd_sel", "dummy", "dummy", "dum
static const char * const nic_sels[] = { "firc", "ddr_clk", };
static const char * const periph_plat_sels[] = { "dummy", "nic1_bus_clk", "nic1_clk", "ddr_clk", "apll_pfd2", "apll_pfd1", "apll_pfd0", "upll", };
static const char * const periph_bus_sels[] = { "dummy", "sosc_bus_clk", "dummy", "firc_bus_clk", "rosc", "nic1_bus_clk", "nic1_clk", "spll_bus_clk", };
-static const char * const arm_sels[] = { "divcore", "dummy", "dummy", "hsrun_divcore", };
+static const char * const arm_sels[] = { "core", "dummy", "dummy", "hsrun_core", };
/* used by sosc/sirc/firc/ddr/spll/apll dividers */
static const struct clk_div_table ulp_div_table[] = {
@@ -121,7 +121,9 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
hws[IMX7ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 2, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
hws[IMX7ULP_CLK_CORE_DIV] = imx_clk_hw_divider_flags("divcore", "scs_sel", base + 0x14, 16, 4, CLK_SET_RATE_PARENT);
+ hws[IMX7ULP_CLK_CORE] = imx_clk_hw_cpu("core", "divcore", hws[IMX7ULP_CLK_CORE_DIV]->clk, hws[IMX7ULP_CLK_SYS_SEL]->clk, hws[IMX7ULP_CLK_SPLL_SEL]->clk, hws[IMX7ULP_CLK_FIRC]->clk);
hws[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT);
+ hws[IMX7ULP_CLK_HSRUN_CORE] = imx_clk_hw_cpu("hsrun_core", "hsrun_divcore", hws[IMX7ULP_CLK_HSRUN_CORE_DIV]->clk, hws[IMX7ULP_CLK_HSRUN_SYS_SEL]->clk, hws[IMX7ULP_CLK_SPLL_SEL]->clk, hws[IMX7ULP_CLK_FIRC]->clk);
hws[IMX7ULP_CLK_DDR_DIV] = imx_clk_hw_divider_gate("ddr_clk", "ddr_sel", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, base + 0x30, 0, 3,
0, ulp_div_table, &imx_ccm_lock);
@@ -270,7 +272,7 @@ static void __init imx7ulp_clk_smc1_init(struct device_node *np)
base = of_iomap(np, 0);
WARN_ON(!base);
- hws[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_IS_CRITICAL);
+ hws[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_SET_RATE_PARENT);
imx_check_clk_hws(hws, clk_data->num);
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 925670438f23..b793264c21c6 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -416,9 +416,9 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
return PTR_ERR(base);
/* Core Slice */
- hws[IMX8MM_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
- hws[IMX8MM_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
- hws[IMX8MM_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ hws[IMX8MM_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mm_a53_sels, base + 0x8000);
+ hws[IMX8MM_CLK_A53_CG] = hws[IMX8MM_CLK_A53_DIV];
+ hws[IMX8MM_CLK_A53_SRC] = hws[IMX8MM_CLK_A53_DIV];
hws[IMX8MM_CLK_M4_CORE] = imx8m_clk_hw_composite_core("arm_m4_core", imx8mm_m4_sels, base + 0x8080);
hws[IMX8MM_CLK_VPU_CORE] = imx8m_clk_hw_composite_core("vpu_core", imx8mm_vpu_sels, base + 0x8100);
@@ -444,21 +444,21 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
/* BUS */
hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
- hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
+ hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
- hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
- hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
- hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
- hws[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_hw_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
- hws[IMX8MM_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
- hws[IMX8MM_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
- hws[IMX8MM_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
+ hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
+ hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
+ hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
+ hws[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_hw_composite_bus("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
+ hws[IMX8MM_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
+ hws[IMX8MM_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
+ hws[IMX8MM_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
hws[IMX8MM_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
hws[IMX8MM_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
/* AHB */
hws[IMX8MM_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
- hws[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
+ hws[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
/* IPG */
hws[IMX8MM_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
@@ -614,9 +614,6 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_ARM_PLL_OUT]->clk,
hws[IMX8MM_CLK_A53_DIV]->clk);
- clk_hw_set_parent(hws[IMX8MM_CLK_A53_SRC], hws[IMX8MM_SYS_PLL1_800M]);
- clk_hw_set_parent(hws[IMX8MM_CLK_A53_CORE], hws[IMX8MM_ARM_PLL_OUT]);
-
imx_check_clk_hws(hws, IMX8MM_CLK_END);
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 0bc7070235bd..213cc37b3173 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -413,9 +413,9 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
}
/* CORE */
- hws[IMX8MN_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels));
- hws[IMX8MN_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
- hws[IMX8MN_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ hws[IMX8MN_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mn_a53_sels, base + 0x8000);
+ hws[IMX8MN_CLK_A53_SRC] = hws[IMX8MN_CLK_A53_DIV];
+ hws[IMX8MN_CLK_A53_CG] = hws[IMX8MN_CLK_A53_DIV];
hws[IMX8MN_CLK_GPU_CORE] = imx8m_clk_hw_composite_core("gpu_core", imx8mn_gpu_core_sels, base + 0x8180);
hws[IMX8MN_CLK_GPU_SHADER] = imx8m_clk_hw_composite_core("gpu_shader", imx8mn_gpu_shader_sels, base + 0x8200);
@@ -432,17 +432,17 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
/* BUS */
hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
- hws[IMX8MN_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
- hws[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
- hws[IMX8MN_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
- hws[IMX8MN_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80);
- hws[IMX8MN_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
- hws[IMX8MN_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
- hws[IMX8MN_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
+ hws[IMX8MN_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
+ hws[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
+ hws[IMX8MN_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
+ hws[IMX8MN_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80);
+ hws[IMX8MN_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
+ hws[IMX8MN_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
+ hws[IMX8MN_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
hws[IMX8MN_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mn_noc_sels, base + 0x8d00);
hws[IMX8MN_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
- hws[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
+ hws[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
hws[IMX8MN_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
hws[IMX8MN_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
hws[IMX8MN_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mn_dram_core_sels, ARRAY_SIZE(imx8mn_dram_core_sels), CLK_IS_CRITICAL);
@@ -565,9 +565,6 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_ARM_PLL_OUT]->clk,
hws[IMX8MN_CLK_A53_DIV]->clk);
- clk_hw_set_parent(hws[IMX8MN_CLK_A53_SRC], hws[IMX8MN_SYS_PLL1_800M]);
- clk_hw_set_parent(hws[IMX8MN_CLK_A53_CORE], hws[IMX8MN_ARM_PLL_OUT]);
-
imx_check_clk_hws(hws, IMX8MN_CLK_END);
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 41469e2cc3de..b4d9db9d5bf1 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -486,16 +486,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_SYS_PLL2] = imx_clk_hw_pll14xx("sys_pll2", "sys_pll2_ref_sel", anatop_base + 0x104, &imx_1416x_pll);
hws[IMX8MP_SYS_PLL3] = imx_clk_hw_pll14xx("sys_pll3", "sys_pll3_ref_sel", anatop_base + 0x114, &imx_1416x_pll);
- hws[IMX8MP_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", anatop_base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", anatop_base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", anatop_base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", anatop_base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", anatop_base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", anatop_base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", anatop_base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_SYS_PLL1_BYPASS] = imx_clk_hw_mux_flags("sys_pll1_bypass", anatop_base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_SYS_PLL2_BYPASS] = imx_clk_hw_mux_flags("sys_pll2_bypass", anatop_base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", anatop_base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", anatop_base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", anatop_base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", anatop_base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", anatop_base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", anatop_base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", anatop_base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", anatop_base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_SYS_PLL1_BYPASS] = imx_clk_hw_mux_flags("sys_pll1_bypass", anatop_base + 0x94, 28, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_SYS_PLL2_BYPASS] = imx_clk_hw_mux_flags("sys_pll2_bypass", anatop_base + 0x104, 28, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", anatop_base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX8MP_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", anatop_base, 13);
hws[IMX8MP_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", anatop_base + 0x14, 13);
@@ -504,79 +504,82 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", anatop_base + 0x64, 11);
hws[IMX8MP_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", anatop_base + 0x74, 11);
hws[IMX8MP_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", anatop_base + 0x84, 11);
- hws[IMX8MP_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1_bypass", anatop_base + 0x94, 11);
- hws[IMX8MP_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2_bypass", anatop_base + 0x104, 11);
hws[IMX8MP_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", anatop_base + 0x114, 11);
- hws[IMX8MP_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
- hws[IMX8MP_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
- hws[IMX8MP_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
- hws[IMX8MP_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
- hws[IMX8MP_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
- hws[IMX8MP_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
- hws[IMX8MP_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
- hws[IMX8MP_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
+ hws[IMX8MP_SYS_PLL1_40M_CG] = imx_clk_hw_gate("sys_pll1_40m_cg", "sys_pll1_bypass", anatop_base + 0x94, 27);
+ hws[IMX8MP_SYS_PLL1_80M_CG] = imx_clk_hw_gate("sys_pll1_80m_cg", "sys_pll1_bypass", anatop_base + 0x94, 25);
+ hws[IMX8MP_SYS_PLL1_100M_CG] = imx_clk_hw_gate("sys_pll1_100m_cg", "sys_pll1_bypass", anatop_base + 0x94, 23);
+ hws[IMX8MP_SYS_PLL1_133M_CG] = imx_clk_hw_gate("sys_pll1_133m_cg", "sys_pll1_bypass", anatop_base + 0x94, 21);
+ hws[IMX8MP_SYS_PLL1_160M_CG] = imx_clk_hw_gate("sys_pll1_160m_cg", "sys_pll1_bypass", anatop_base + 0x94, 19);
+ hws[IMX8MP_SYS_PLL1_200M_CG] = imx_clk_hw_gate("sys_pll1_200m_cg", "sys_pll1_bypass", anatop_base + 0x94, 17);
+ hws[IMX8MP_SYS_PLL1_266M_CG] = imx_clk_hw_gate("sys_pll1_266m_cg", "sys_pll1_bypass", anatop_base + 0x94, 15);
+ hws[IMX8MP_SYS_PLL1_400M_CG] = imx_clk_hw_gate("sys_pll1_400m_cg", "sys_pll1_bypass", anatop_base + 0x94, 13);
+ hws[IMX8MP_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1_bypass", anatop_base + 0x94, 11);
+
+ hws[IMX8MP_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
+ hws[IMX8MP_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
+ hws[IMX8MP_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
+ hws[IMX8MP_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
+ hws[IMX8MP_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
+ hws[IMX8MP_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
+ hws[IMX8MP_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
+ hws[IMX8MP_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
hws[IMX8MP_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
- hws[IMX8MP_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
- hws[IMX8MP_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
- hws[IMX8MP_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
- hws[IMX8MP_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
- hws[IMX8MP_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
- hws[IMX8MP_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
- hws[IMX8MP_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
- hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
+ hws[IMX8MP_SYS_PLL2_50M_CG] = imx_clk_hw_gate("sys_pll2_50m_cg", "sys_pll2_bypass", anatop_base + 0x104, 27);
+ hws[IMX8MP_SYS_PLL2_100M_CG] = imx_clk_hw_gate("sys_pll2_100m_cg", "sys_pll2_bypass", anatop_base + 0x104, 25);
+ hws[IMX8MP_SYS_PLL2_125M_CG] = imx_clk_hw_gate("sys_pll2_125m_cg", "sys_pll2_bypass", anatop_base + 0x104, 23);
+ hws[IMX8MP_SYS_PLL2_166M_CG] = imx_clk_hw_gate("sys_pll2_166m_cg", "sys_pll2_bypass", anatop_base + 0x104, 21);
+ hws[IMX8MP_SYS_PLL2_200M_CG] = imx_clk_hw_gate("sys_pll2_200m_cg", "sys_pll2_bypass", anatop_base + 0x104, 19);
+ hws[IMX8MP_SYS_PLL2_250M_CG] = imx_clk_hw_gate("sys_pll2_250m_cg", "sys_pll2_bypass", anatop_base + 0x104, 17);
+ hws[IMX8MP_SYS_PLL2_333M_CG] = imx_clk_hw_gate("sys_pll2_333m_cg", "sys_pll2_bypass", anatop_base + 0x104, 15);
+ hws[IMX8MP_SYS_PLL2_500M_CG] = imx_clk_hw_gate("sys_pll2_500m_cg", "sys_pll2_bypass", anatop_base + 0x104, 13);
+ hws[IMX8MP_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2_bypass", anatop_base + 0x104, 11);
+
+ hws[IMX8MP_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
+ hws[IMX8MP_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
+ hws[IMX8MP_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
+ hws[IMX8MP_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
+ hws[IMX8MP_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
+ hws[IMX8MP_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
+ hws[IMX8MP_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
+ hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
hws[IMX8MP_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
- hws[IMX8MP_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", ccm_base + 0x8000, 24, 3, imx8mp_a53_sels, ARRAY_SIZE(imx8mp_a53_sels));
- hws[IMX8MP_CLK_M7_SRC] = imx_clk_hw_mux2("arm_m7_src", ccm_base + 0x8080, 24, 3, imx8mp_m7_sels, ARRAY_SIZE(imx8mp_m7_sels));
- hws[IMX8MP_CLK_ML_SRC] = imx_clk_hw_mux2("ml_src", ccm_base + 0x8100, 24, 3, imx8mp_ml_sels, ARRAY_SIZE(imx8mp_ml_sels));
- hws[IMX8MP_CLK_GPU3D_CORE_SRC] = imx_clk_hw_mux2("gpu3d_core_src", ccm_base + 0x8180, 24, 3, imx8mp_gpu3d_core_sels, ARRAY_SIZE(imx8mp_gpu3d_core_sels));
- hws[IMX8MP_CLK_GPU3D_SHADER_SRC] = imx_clk_hw_mux2("gpu3d_shader_src", ccm_base + 0x8200, 24, 3, imx8mp_gpu3d_shader_sels, ARRAY_SIZE(imx8mp_gpu3d_shader_sels));
- hws[IMX8MP_CLK_GPU2D_SRC] = imx_clk_hw_mux2("gpu2d_src", ccm_base + 0x8280, 24, 3, imx8mp_gpu2d_sels, ARRAY_SIZE(imx8mp_gpu2d_sels));
- hws[IMX8MP_CLK_AUDIO_AXI_SRC] = imx_clk_hw_mux2("audio_axi_src", ccm_base + 0x8300, 24, 3, imx8mp_audio_axi_sels, ARRAY_SIZE(imx8mp_audio_axi_sels));
- hws[IMX8MP_CLK_HSIO_AXI_SRC] = imx_clk_hw_mux2("hsio_axi_src", ccm_base + 0x8380, 24, 3, imx8mp_hsio_axi_sels, ARRAY_SIZE(imx8mp_hsio_axi_sels));
- hws[IMX8MP_CLK_MEDIA_ISP_SRC] = imx_clk_hw_mux2("media_isp_src", ccm_base + 0x8400, 24, 3, imx8mp_media_isp_sels, ARRAY_SIZE(imx8mp_media_isp_sels));
- hws[IMX8MP_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", ccm_base + 0x8000, 28);
- hws[IMX8MP_CLK_M4_CG] = imx_clk_hw_gate3("arm_m7_cg", "arm_m7_src", ccm_base + 0x8080, 28);
- hws[IMX8MP_CLK_ML_CG] = imx_clk_hw_gate3("ml_cg", "ml_src", ccm_base + 0x8100, 28);
- hws[IMX8MP_CLK_GPU3D_CORE_CG] = imx_clk_hw_gate3("gpu3d_core_cg", "gpu3d_core_src", ccm_base + 0x8180, 28);
- hws[IMX8MP_CLK_GPU3D_SHADER_CG] = imx_clk_hw_gate3("gpu3d_shader_cg", "gpu3d_shader_src", ccm_base + 0x8200, 28);
- hws[IMX8MP_CLK_GPU2D_CG] = imx_clk_hw_gate3("gpu2d_cg", "gpu2d_src", ccm_base + 0x8280, 28);
- hws[IMX8MP_CLK_AUDIO_AXI_CG] = imx_clk_hw_gate3("audio_axi_cg", "audio_axi_src", ccm_base + 0x8300, 28);
- hws[IMX8MP_CLK_HSIO_AXI_CG] = imx_clk_hw_gate3("hsio_axi_cg", "hsio_axi_src", ccm_base + 0x8380, 28);
- hws[IMX8MP_CLK_MEDIA_ISP_CG] = imx_clk_hw_gate3("media_isp_cg", "media_isp_src", ccm_base + 0x8400, 28);
- hws[IMX8MP_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", ccm_base + 0x8000, 0, 3);
- hws[IMX8MP_CLK_M7_DIV] = imx_clk_hw_divider2("arm_m7_div", "arm_m7_cg", ccm_base + 0x8080, 0, 3);
- hws[IMX8MP_CLK_ML_DIV] = imx_clk_hw_divider2("ml_div", "ml_cg", ccm_base + 0x8100, 0, 3);
- hws[IMX8MP_CLK_GPU3D_CORE_DIV] = imx_clk_hw_divider2("gpu3d_core_div", "gpu3d_core_cg", ccm_base + 0x8180, 0, 3);
- hws[IMX8MP_CLK_GPU3D_SHADER_DIV] = imx_clk_hw_divider2("gpu3d_shader_div", "gpu3d_shader_cg", ccm_base + 0x8200, 0, 3);
- hws[IMX8MP_CLK_GPU2D_DIV] = imx_clk_hw_divider2("gpu2d_div", "gpu2d_cg", ccm_base + 0x8280, 0, 3);
- hws[IMX8MP_CLK_AUDIO_AXI_DIV] = imx_clk_hw_divider2("audio_axi_div", "audio_axi_cg", ccm_base + 0x8300, 0, 3);
- hws[IMX8MP_CLK_HSIO_AXI_DIV] = imx_clk_hw_divider2("hsio_axi_div", "hsio_axi_cg", ccm_base + 0x8380, 0, 3);
- hws[IMX8MP_CLK_MEDIA_ISP_DIV] = imx_clk_hw_divider2("media_isp_div", "media_isp_cg", ccm_base + 0x8400, 0, 3);
+ hws[IMX8MP_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mp_a53_sels, ccm_base + 0x8000);
+ hws[IMX8MP_CLK_A53_SRC] = hws[IMX8MP_CLK_A53_DIV];
+ hws[IMX8MP_CLK_A53_CG] = hws[IMX8MP_CLK_A53_DIV];
+ hws[IMX8MP_CLK_M7_CORE] = imx8m_clk_hw_composite_core("m7_core", imx8mp_m7_sels, ccm_base + 0x8080);
+ hws[IMX8MP_CLK_ML_CORE] = imx8m_clk_hw_composite_core("ml_core", imx8mp_ml_sels, ccm_base + 0x8100);
+ hws[IMX8MP_CLK_GPU3D_CORE] = imx8m_clk_hw_composite_core("gpu3d_core", imx8mp_gpu3d_core_sels, ccm_base + 0x8180);
+ hws[IMX8MP_CLK_GPU3D_SHADER_CORE] = imx8m_clk_hw_composite("gpu3d_shader_core", imx8mp_gpu3d_shader_sels, ccm_base + 0x8200);
+ hws[IMX8MP_CLK_GPU2D_CORE] = imx8m_clk_hw_composite("gpu2d_core", imx8mp_gpu2d_sels, ccm_base + 0x8280);
+ hws[IMX8MP_CLK_AUDIO_AXI] = imx8m_clk_hw_composite("audio_axi", imx8mp_audio_axi_sels, ccm_base + 0x8300);
+ hws[IMX8MP_CLK_AUDIO_AXI_SRC] = hws[IMX8MP_CLK_AUDIO_AXI];
+ hws[IMX8MP_CLK_HSIO_AXI] = imx8m_clk_hw_composite("hsio_axi", imx8mp_hsio_axi_sels, ccm_base + 0x8380);
+ hws[IMX8MP_CLK_MEDIA_ISP] = imx8m_clk_hw_composite("media_isp", imx8mp_media_isp_sels, ccm_base + 0x8400);
/* CORE SEL */
hws[IMX8MP_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", ccm_base + 0x9880, 24, 1, imx8mp_a53_core_sels, ARRAY_SIZE(imx8mp_a53_core_sels));
hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800);
- hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880);
+ hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880);
hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
- hws[IMX8MP_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mp_vpu_bus_sels, ccm_base + 0x8980);
- hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00);
- hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80);
- hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00);
- hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_axi_sels, ccm_base + 0x8b80);
- hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00);
- hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80);
+ hws[IMX8MP_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mp_vpu_bus_sels, ccm_base + 0x8980);
+ hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite_bus("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00);
+ hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite_bus("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80);
+ hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite_bus("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00);
+ hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite_bus("hdmi_axi", imx8mp_media_axi_sels, ccm_base + 0x8b80);
+ hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00);
+ hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80);
hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00);
hws[IMX8MP_CLK_NOC_IO] = imx8m_clk_hw_composite_critical("noc_io", imx8mp_noc_io_sels, ccm_base + 0x8d80);
- hws[IMX8MP_CLK_ML_AXI] = imx8m_clk_hw_composite("ml_axi", imx8mp_ml_axi_sels, ccm_base + 0x8e00);
- hws[IMX8MP_CLK_ML_AHB] = imx8m_clk_hw_composite("ml_ahb", imx8mp_ml_ahb_sels, ccm_base + 0x8e80);
+ hws[IMX8MP_CLK_ML_AXI] = imx8m_clk_hw_composite_bus("ml_axi", imx8mp_ml_axi_sels, ccm_base + 0x8e00);
+ hws[IMX8MP_CLK_ML_AHB] = imx8m_clk_hw_composite_bus("ml_ahb", imx8mp_ml_ahb_sels, ccm_base + 0x8e80);
hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
- hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
- hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
+ hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
+ hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
hws[IMX8MP_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", ccm_base + 0x9180, 0, 1);
@@ -695,8 +698,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_root_clk", "ipg_root", ccm_base + 0x43a0, 0);
hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "sim_enet_root_clk", ccm_base + 0x43b0, 0);
hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0);
- hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", ccm_base + 0x4450, 0);
- hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core_div", ccm_base + 0x4460, 0);
+ hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_core", ccm_base + 0x4450, 0);
+ hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core", ccm_base + 0x4460, 0);
hws[IMX8MP_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", ccm_base + 0x4470, 0);
hws[IMX8MP_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", ccm_base + 0x4490, 0);
hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0);
@@ -713,7 +716,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_axi", ccm_base + 0x4570, 0);
hws[IMX8MP_CLK_VPU_VC8KE_ROOT] = imx_clk_hw_gate4("vpu_vc8ke_root_clk", "vpu_vc8000e", ccm_base + 0x4590, 0);
hws[IMX8MP_CLK_VPU_G2_ROOT] = imx_clk_hw_gate4("vpu_g2_root_clk", "vpu_g2", ccm_base + 0x45a0, 0);
- hws[IMX8MP_CLK_NPU_ROOT] = imx_clk_hw_gate4("npu_root_clk", "ml_div", ccm_base + 0x45b0, 0);
+ hws[IMX8MP_CLK_NPU_ROOT] = imx_clk_hw_gate4("npu_root_clk", "ml_core", ccm_base + 0x45b0, 0);
hws[IMX8MP_CLK_HSIO_ROOT] = imx_clk_hw_gate4("hsio_root_clk", "ipg_root", ccm_base + 0x45c0, 0);
hws[IMX8MP_CLK_MEDIA_APB_ROOT] = imx_clk_hw_gate2_shared2("media_apb_root_clk", "media_apb", ccm_base + 0x45d0, 0, &share_count_media);
hws[IMX8MP_CLK_MEDIA_AXI_ROOT] = imx_clk_hw_gate2_shared2("media_axi_root_clk", "media_axi", ccm_base + 0x45d0, 0, &share_count_media);
@@ -721,7 +724,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_cam2_pix_root_clk", "media_cam2_pix", ccm_base + 0x45d0, 0, &share_count_media);
hws[IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp1_pix_root_clk", "media_disp1_pix", ccm_base + 0x45d0, 0, &share_count_media);
hws[IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp2_pix_root_clk", "media_disp2_pix", ccm_base + 0x45d0, 0, &share_count_media);
- hws[IMX8MP_CLK_MEDIA_ISP_ROOT] = imx_clk_hw_gate2_shared2("media_isp_root_clk", "media_isp_div", ccm_base + 0x45d0, 0, &share_count_media);
+ hws[IMX8MP_CLK_MEDIA_ISP_ROOT] = imx_clk_hw_gate2_shared2("media_isp_root_clk", "media_isp", ccm_base + 0x45d0, 0, &share_count_media);
hws[IMX8MP_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", ccm_base + 0x45e0, 0);
hws[IMX8MP_CLK_HDMI_ROOT] = imx_clk_hw_gate4("hdmi_root_clk", "hdmi_axi", ccm_base + 0x45f0, 0);
@@ -735,9 +738,6 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_ARM_PLL_OUT]->clk,
hws[IMX8MP_CLK_A53_DIV]->clk);
- clk_hw_set_parent(hws[IMX8MP_CLK_A53_SRC], hws[IMX8MP_SYS_PLL1_800M]);
- clk_hw_set_parent(hws[IMX8MP_CLK_A53_CORE], hws[IMX8MP_ARM_PLL_OUT]);
-
imx_check_clk_hws(hws, IMX8MP_CLK_END);
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index fdc68db68de5..a64aace213c2 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -405,9 +405,9 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
return PTR_ERR(base);
/* CORE */
- hws[IMX8MQ_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
- hws[IMX8MQ_CLK_A53_CG] = imx_clk_hw_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
- hws[IMX8MQ_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ hws[IMX8MQ_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mq_a53_sels, base + 0x8000);
+ hws[IMX8MQ_CLK_A53_CG] = hws[IMX8MQ_CLK_A53_DIV];
+ hws[IMX8MQ_CLK_A53_SRC] = hws[IMX8MQ_CLK_A53_DIV];
hws[IMX8MQ_CLK_M4_CORE] = imx8m_clk_hw_composite_core("arm_m4_core", imx8mq_arm_m4_sels, base + 0x8080);
hws[IMX8MQ_CLK_VPU_CORE] = imx8m_clk_hw_composite_core("vpu_core", imx8mq_vpu_sels, base + 0x8100);
@@ -432,22 +432,22 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
/* BUS */
hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
- hws[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
- hws[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
- hws[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
- hws[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00);
- hws[IMX8MQ_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80);
- hws[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_hw_composite("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00);
- hws[IMX8MQ_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
- hws[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
- hws[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
+ hws[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
+ hws[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
+ hws[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
+ hws[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00);
+ hws[IMX8MQ_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80);
+ hws[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_hw_composite_bus("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00);
+ hws[IMX8MQ_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
+ hws[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
+ hws[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
hws[IMX8MQ_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mq_noc_sels, base + 0x8d00);
hws[IMX8MQ_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
/* AHB */
/* AHB clock is used by the AHB bus therefore marked as critical */
hws[IMX8MQ_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
- hws[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
+ hws[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
/* IPG */
hws[IMX8MQ_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
@@ -599,9 +599,6 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
hws[IMX8MQ_ARM_PLL_OUT]->clk,
hws[IMX8MQ_CLK_A53_DIV]->clk);
- clk_hw_set_parent(hws[IMX8MQ_CLK_A53_SRC], hws[IMX8MQ_SYS1_PLL_800M]);
- clk_hw_set_parent(hws[IMX8MQ_CLK_A53_CORE], hws[IMX8MQ_ARM_PLL_OUT]);
-
imx_check_clk_hws(hws, IMX8MQ_CLK_END);
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index a83bbbee77d9..f9eb189b93c0 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -378,9 +378,9 @@ static const struct clk_ops clk_pll1443x_ops = {
.set_rate = clk_pll1443x_set_rate,
};
-struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
- void __iomem *base,
- const struct imx_pll14xx_clk *pll_clk)
+struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
+ const char *parent_name, void __iomem *base,
+ const struct imx_pll14xx_clk *pll_clk)
{
struct clk_pll14xx *pll;
struct clk_hw *hw;
@@ -426,7 +426,7 @@ struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
hw = &pll->hw;
- ret = clk_hw_register(NULL, hw);
+ ret = clk_hw_register(dev, hw);
if (ret) {
pr_err("%s: failed to register pll %s %d\n",
__func__, name, ret);
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index df91a8244fb4..a7db93030e02 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/err.h>
@@ -25,6 +26,8 @@
#define IMX7_ENET_PLL_POWER (0x1 << 5)
#define IMX7_DDR_PLL_POWER (0x1 << 20)
+#define PLL_LOCK_TIMEOUT 10000
+
/**
* struct clk_pllv3 - IMX PLL clock version 3
* @clk_hw: clock source
@@ -53,23 +56,14 @@ struct clk_pllv3 {
static int clk_pllv3_wait_lock(struct clk_pllv3 *pll)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(10);
u32 val = readl_relaxed(pll->base) & pll->power_bit;
/* No need to wait for lock when pll is not powered up */
if ((pll->powerup_set && !val) || (!pll->powerup_set && val))
return 0;
- /* Wait for PLL to lock */
- do {
- if (readl_relaxed(pll->base) & BM_PLL_LOCK)
- break;
- if (time_after(jiffies, timeout))
- break;
- usleep_range(50, 500);
- } while (1);
-
- return readl_relaxed(pll->base) & BM_PLL_LOCK ? 0 : -ETIMEDOUT;
+ return readl_relaxed_poll_timeout(pll->base, val, val & BM_PLL_LOCK,
+ 500, PLL_LOCK_TIMEOUT);
}
static int clk_pllv3_prepare(struct clk_hw *hw)
diff --git a/drivers/clk/imx/clk-sscg-pll.c b/drivers/clk/imx/clk-sscg-pll.c
index d4a2be16d132..773d8a545cdf 100644
--- a/drivers/clk/imx/clk-sscg-pll.c
+++ b/drivers/clk/imx/clk-sscg-pll.c
@@ -72,7 +72,6 @@ struct clk_sscg_pll_setup {
int divr2, divf2;
int divq;
int bypass;
-
uint64_t vco1;
uint64_t vco2;
uint64_t fout;
@@ -86,11 +85,8 @@ struct clk_sscg_pll_setup {
struct clk_sscg_pll {
struct clk_hw hw;
const struct clk_ops ops;
-
void __iomem *base;
-
struct clk_sscg_pll_setup setup;
-
u8 parent;
u8 bypass1;
u8 bypass2;
@@ -194,7 +190,6 @@ static int clk_sscg_pll2_find_setup(struct clk_sscg_pll_setup *setup,
struct clk_sscg_pll_setup *temp_setup,
uint64_t ref)
{
-
int ret;
if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ)
@@ -253,7 +248,6 @@ static int clk_sscg_pll1_find_setup(struct clk_sscg_pll_setup *setup,
struct clk_sscg_pll_setup *temp_setup,
uint64_t ref)
{
-
int ret;
if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ)
@@ -280,7 +274,6 @@ static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup,
temp_setup.fout_request = rate;
switch (try_bypass) {
-
case PLL_BYPASS2:
if (prate == rate) {
setup->bypass = PLL_BYPASS2;
@@ -288,11 +281,9 @@ static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup,
ret = 0;
}
break;
-
case PLL_BYPASS1:
ret = clk_sscg_pll2_find_setup(setup, &temp_setup, prate);
break;
-
case PLL_BYPASS_NONE:
ret = clk_sscg_pll1_find_setup(setup, &temp_setup, prate);
break;
@@ -301,7 +292,6 @@ static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup,
return ret;
}
-
static int clk_sscg_pll_is_prepared(struct clk_hw *hw)
{
struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index f074dd8ec42e..16adbc34e05f 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -5,6 +5,8 @@
#include <linux/spinlock.h>
#include <linux/clk-provider.h>
+#define IMX_CLK_GATE2_SINGLE_BIT 1
+
extern spinlock_t imx_ccm_lock;
void imx_check_clocks(struct clk *clks[], unsigned int count);
@@ -131,9 +133,9 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
#define imx_clk_pll14xx(name, parent_name, base, pll_clk) \
to_clk(imx_clk_hw_pll14xx(name, parent_name, base, pll_clk))
-struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
- void __iomem *base,
- const struct imx_pll14xx_clk *pll_clk);
+struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
+ const char *parent_name, void __iomem *base,
+ const struct imx_pll14xx_clk *pll_clk);
struct clk_hw *imx_clk_hw_pllv1(enum imx_pllv1_type type, const char *name,
const char *parent, void __iomem *base);
@@ -240,6 +242,13 @@ static inline struct clk *to_clk(struct clk_hw *hw)
return hw->clk;
}
+static inline struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
+ void __iomem *base,
+ const struct imx_pll14xx_clk *pll_clk)
+{
+ return imx_dev_clk_hw_pll14xx(NULL, name, parent_name, base, pll_clk);
+}
+
static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate)
{
return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate);
@@ -310,6 +319,13 @@ static inline struct clk_hw *imx_clk_hw_gate(const char *name, const char *paren
shift, 0, &imx_ccm_lock);
}
+static inline struct clk_hw *imx_dev_clk_hw_gate(struct device *dev, const char *name,
+ const char *parent, void __iomem *reg, u8 shift)
+{
+ return clk_hw_register_gate(dev, name, parent, CLK_SET_RATE_PARENT, reg,
+ shift, 0, &imx_ccm_lock);
+}
+
static inline struct clk_hw *imx_clk_hw_gate_dis(const char *name, const char *parent,
void __iomem *reg, u8 shift)
{
@@ -355,6 +371,17 @@ static inline struct clk_hw *imx_clk_hw_gate2_shared2(const char *name,
&imx_ccm_lock, share_count);
}
+static inline struct clk_hw *imx_dev_clk_hw_gate_shared(struct device *dev,
+ const char *name, const char *parent,
+ void __iomem *reg, u8 shift,
+ unsigned int *share_count)
+{
+ return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
+ CLK_OPS_PARENT_ENABLE, reg, shift, 0x3,
+ IMX_CLK_GATE2_SINGLE_BIT,
+ &imx_ccm_lock, share_count);
+}
+
static inline struct clk *imx_clk_gate2_cgr(const char *name,
const char *parent, void __iomem *reg, u8 shift, u8 cgr_val)
{
@@ -411,6 +438,15 @@ static inline struct clk_hw *imx_clk_hw_mux(const char *name, void __iomem *reg,
width, 0, &imx_ccm_lock);
}
+static inline struct clk_hw *imx_dev_clk_hw_mux(struct device *dev,
+ const char *name, void __iomem *reg, u8 shift,
+ u8 width, const char * const *parents, int num_parents)
+{
+ return clk_hw_register_mux(dev, name, parents, num_parents,
+ CLK_SET_RATE_NO_REPARENT | CLK_SET_PARENT_GATE,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
u8 shift, u8 width, const char * const *parents,
int num_parents)
@@ -473,11 +509,25 @@ static inline struct clk_hw *imx_clk_hw_mux_flags(const char *name,
reg, shift, width, 0, &imx_ccm_lock);
}
+static inline struct clk_hw *imx_dev_clk_hw_mux_flags(struct device *dev,
+ const char *name,
+ void __iomem *reg, u8 shift,
+ u8 width,
+ const char * const *parents,
+ int num_parents,
+ unsigned long flags)
+{
+ return clk_hw_register_mux(dev, name, parents, num_parents,
+ flags | CLK_SET_RATE_NO_REPARENT,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
struct clk *div, struct clk *mux, struct clk *pll,
struct clk *step);
#define IMX_COMPOSITE_CORE BIT(0)
+#define IMX_COMPOSITE_BUS BIT(1)
struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
const char * const *parent_names,
@@ -486,6 +536,12 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
u32 composite_flags,
unsigned long flags);
+#define imx8m_clk_hw_composite_bus(name, parent_names, reg) \
+ imx8m_clk_hw_composite_flags(name, parent_names, \
+ ARRAY_SIZE(parent_names), reg, \
+ IMX_COMPOSITE_BUS, \
+ CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+
#define imx8m_clk_hw_composite_core(name, parent_names, reg) \
imx8m_clk_hw_composite_flags(name, parent_names, \
ARRAY_SIZE(parent_names), reg, \
diff --git a/drivers/clk/ingenic/Kconfig b/drivers/clk/ingenic/Kconfig
index b4555b465ea6..580b0cf69ed5 100644
--- a/drivers/clk/ingenic/Kconfig
+++ b/drivers/clk/ingenic/Kconfig
@@ -55,6 +55,16 @@ config INGENIC_CGU_X1000
If building for a X1000 SoC, you want to say Y here.
+config INGENIC_CGU_X1830
+ bool "Ingenic X1830 CGU driver"
+ default MACH_X1830
+ select INGENIC_CGU_COMMON
+ help
+ Support the clocks provided by the CGU hardware on Ingenic X1830
+ and compatible SoCs.
+
+ If building for a X1830 SoC, you want to say Y here.
+
config INGENIC_TCU_CLK
bool "Ingenic JZ47xx TCU clocks driver"
default MACH_INGENIC
diff --git a/drivers/clk/ingenic/Makefile b/drivers/clk/ingenic/Makefile
index 8b1dad9b74a7..aaa4bffe03c6 100644
--- a/drivers/clk/ingenic/Makefile
+++ b/drivers/clk/ingenic/Makefile
@@ -5,4 +5,5 @@ obj-$(CONFIG_INGENIC_CGU_JZ4725B) += jz4725b-cgu.o
obj-$(CONFIG_INGENIC_CGU_JZ4770) += jz4770-cgu.o
obj-$(CONFIG_INGENIC_CGU_JZ4780) += jz4780-cgu.o
obj-$(CONFIG_INGENIC_CGU_X1000) += x1000-cgu.o
+obj-$(CONFIG_INGENIC_CGU_X1830) += x1830-cgu.o
obj-$(CONFIG_INGENIC_TCU_CLK) += tcu.o
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 6e963031cd87..d7981b670221 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -76,16 +76,13 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
const struct ingenic_cgu_pll_info *pll_info;
unsigned m, n, od_enc, od;
bool bypass;
- unsigned long flags;
u32 ctl;
clk_info = &cgu->clock_info[ingenic_clk->idx];
BUG_ON(clk_info->type != CGU_CLK_PLL);
pll_info = &clk_info->pll;
- spin_lock_irqsave(&cgu->lock, flags);
ctl = readl(cgu->base + pll_info->reg);
- spin_unlock_irqrestore(&cgu->lock, flags);
m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
m += pll_info->m_offset;
@@ -93,6 +90,9 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
n += pll_info->n_offset;
od_enc = ctl >> pll_info->od_shift;
od_enc &= GENMASK(pll_info->od_bits - 1, 0);
+
+ ctl = readl(cgu->base + pll_info->bypass_reg);
+
bypass = !pll_info->no_bypass_bit &&
!!(ctl & BIT(pll_info->bypass_bit));
@@ -106,7 +106,8 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
BUG_ON(od == pll_info->od_max);
od++;
- return div_u64((u64)parent_rate * m, n * od);
+ return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
+ n * od);
}
static unsigned long
@@ -139,7 +140,8 @@ ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
if (pod)
*pod = od;
- return div_u64((u64)parent_rate * m, n * od);
+ return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
+ n * od);
}
static inline const struct ingenic_cgu_clk_info *to_clk_info(
@@ -212,9 +214,14 @@ static int ingenic_pll_enable(struct clk_hw *hw)
u32 ctl;
spin_lock_irqsave(&cgu->lock, flags);
- ctl = readl(cgu->base + pll_info->reg);
+ ctl = readl(cgu->base + pll_info->bypass_reg);
ctl &= ~BIT(pll_info->bypass_bit);
+
+ writel(ctl, cgu->base + pll_info->bypass_reg);
+
+ ctl = readl(cgu->base + pll_info->reg);
+
ctl |= BIT(pll_info->enable_bit);
writel(ctl, cgu->base + pll_info->reg);
@@ -259,12 +266,9 @@ static int ingenic_pll_is_enabled(struct clk_hw *hw)
struct ingenic_cgu *cgu = ingenic_clk->cgu;
const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
- unsigned long flags;
u32 ctl;
- spin_lock_irqsave(&cgu->lock, flags);
ctl = readl(cgu->base + pll_info->reg);
- spin_unlock_irqrestore(&cgu->lock, flags);
return !!(ctl & BIT(pll_info->enable_bit));
}
@@ -562,16 +566,12 @@ static int ingenic_clk_is_enabled(struct clk_hw *hw)
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
const struct ingenic_cgu_clk_info *clk_info;
- unsigned long flags;
int enabled = 1;
clk_info = &cgu->clock_info[ingenic_clk->idx];
- if (clk_info->type & CGU_CLK_GATE) {
- spin_lock_irqsave(&cgu->lock, flags);
+ if (clk_info->type & CGU_CLK_GATE)
enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
- spin_unlock_irqrestore(&cgu->lock, flags);
- }
return enabled;
}
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index 0dc8004079ee..2c75ef4a36f5 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -17,6 +17,7 @@
/**
* struct ingenic_cgu_pll_info - information about a PLL
* @reg: the offset of the PLL's control register within the CGU
+ * @rate_multiplier: the multiplier needed by pll rate calculation
* @m_shift: the number of bits to shift the multiplier value by (ie. the
* index of the lowest bit of the multiplier value in the PLL's
* control register)
@@ -37,6 +38,7 @@
* @od_encoding: a pointer to an array mapping post-VCO divider values to
* their encoded values in the PLL control register, or -1 for
* unsupported values
+ * @bypass_reg: the offset of the bypass control register within the CGU
* @bypass_bit: the index of the bypass bit in the PLL control register
* @enable_bit: the index of the enable bit in the PLL control register
* @stable_bit: the index of the stable bit in the PLL control register
@@ -44,10 +46,12 @@
*/
struct ingenic_cgu_pll_info {
unsigned reg;
+ unsigned rate_multiplier;
const s8 *od_encoding;
u8 m_shift, m_bits, m_offset;
u8 n_shift, n_bits, n_offset;
u8 od_shift, od_bits, od_max;
+ unsigned bypass_reg;
u8 bypass_bit;
u8 enable_bit;
u8 stable_bit;
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
index a3b4635f6278..8c38e72d14a7 100644
--- a/drivers/clk/ingenic/jz4725b-cgu.c
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -9,7 +9,9 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/of.h>
+
#include <dt-bindings/clock/jz4725b-cgu.h>
+
#include "cgu.h"
#include "pm.h"
@@ -54,6 +56,7 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
.parents = { JZ4725B_CLK_EXT, -1, -1, -1 },
.pll = {
.reg = CGU_REG_CPPCR,
+ .rate_multiplier = 1,
.m_shift = 23,
.m_bits = 9,
.m_offset = 2,
@@ -65,6 +68,7 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
.od_max = 4,
.od_encoding = pll_od_encoding,
.stable_bit = 10,
+ .bypass_reg = CGU_REG_CPPCR,
.bypass_bit = 9,
.enable_bit = 8,
},
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 4f0e92c877d6..c0ac9196a581 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -10,7 +10,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
+
#include <dt-bindings/clock/jz4740-cgu.h>
+
#include "cgu.h"
#include "pm.h"
@@ -69,6 +71,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
.parents = { JZ4740_CLK_EXT, -1, -1, -1 },
.pll = {
.reg = CGU_REG_CPPCR,
+ .rate_multiplier = 1,
.m_shift = 23,
.m_bits = 9,
.m_offset = 2,
@@ -80,6 +83,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
.od_max = 4,
.od_encoding = pll_od_encoding,
.stable_bit = 10,
+ .bypass_reg = CGU_REG_CPPCR,
.bypass_bit = 9,
.enable_bit = 8,
},
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index c051ecba5cf8..9ea4490ecb7f 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -9,7 +9,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
+
#include <dt-bindings/clock/jz4770-cgu.h>
+
#include "cgu.h"
#include "pm.h"
@@ -102,6 +104,7 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.parents = { JZ4770_CLK_EXT },
.pll = {
.reg = CGU_REG_CPPCR0,
+ .rate_multiplier = 1,
.m_shift = 24,
.m_bits = 7,
.m_offset = 1,
@@ -112,6 +115,7 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.od_bits = 2,
.od_max = 8,
.od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_CPPCR0,
.bypass_bit = 9,
.enable_bit = 8,
.stable_bit = 10,
@@ -124,6 +128,7 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.parents = { JZ4770_CLK_EXT },
.pll = {
.reg = CGU_REG_CPPCR1,
+ .rate_multiplier = 1,
.m_shift = 24,
.m_bits = 7,
.m_offset = 1,
@@ -134,9 +139,10 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.od_bits = 2,
.od_max = 8,
.od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_CPPCR1,
+ .no_bypass_bit = true,
.enable_bit = 7,
.stable_bit = 6,
- .no_bypass_bit = true,
},
},
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index c758f1643067..6c5b8029cc8a 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <dt-bindings/clock/jz4780-cgu.h>
+
#include "cgu.h"
#include "pm.h"
@@ -266,6 +267,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
#define DEF_PLL(name) { \
.reg = CGU_REG_ ## name, \
+ .rate_multiplier = 1, \
.m_shift = 19, \
.m_bits = 13, \
.m_offset = 1, \
@@ -277,6 +279,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
.od_max = 16, \
.od_encoding = pll_od_encoding, \
.stable_bit = 6, \
+ .bypass_reg = CGU_REG_ ## name, \
.bypass_bit = 1, \
.enable_bit = 0, \
}
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 153a954b0d2f..9382dc3aa27e 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -323,7 +323,7 @@ static const struct ingenic_soc_info x1000_soc_info = {
.has_tcu_clk = false,
};
-static const struct of_device_id ingenic_tcu_of_match[] __initconst = {
+static const struct of_device_id __maybe_unused ingenic_tcu_of_match[] __initconst = {
{ .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
{ .compatible = "ingenic,jz4770-tcu", .data = &jz4770_soc_info, },
diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c
index b22d87b3f555..453f3323cb99 100644
--- a/drivers/clk/ingenic/x1000-cgu.c
+++ b/drivers/clk/ingenic/x1000-cgu.c
@@ -1,13 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/*
* X1000 SoC CGU driver
- * Copyright (c) 2019 Zhou Yanjie <zhouyanjie@zoho.com>
+ * Copyright (c) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
+
#include <dt-bindings/clock/x1000-cgu.h>
+
#include "cgu.h"
#include "pm.h"
@@ -18,6 +21,9 @@
#define CGU_REG_CLKGR 0x20
#define CGU_REG_OPCR 0x24
#define CGU_REG_DDRCDR 0x2c
+#define CGU_REG_USBPCR 0x3c
+#define CGU_REG_USBPCR1 0x48
+#define CGU_REG_USBCDR 0x50
#define CGU_REG_MACCDR 0x54
#define CGU_REG_I2SCDR 0x60
#define CGU_REG_LPCDR 0x64
@@ -38,8 +44,47 @@
#define OPCR_SPENDN0 BIT(7)
#define OPCR_SPENDN1 BIT(6)
+/* bits within the USBPCR register */
+#define USBPCR_SIDDQ BIT(21)
+#define USBPCR_OTG_DISABLE BIT(20)
+
static struct ingenic_cgu *cgu;
+static int x1000_usb_phy_enable(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ writel(readl(reg_opcr) | OPCR_SPENDN0, reg_opcr);
+ writel(readl(reg_usbpcr) & ~USBPCR_OTG_DISABLE & ~USBPCR_SIDDQ, reg_usbpcr);
+ return 0;
+}
+
+static void x1000_usb_phy_disable(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ writel(readl(reg_opcr) & ~OPCR_SPENDN0, reg_opcr);
+ writel(readl(reg_usbpcr) | USBPCR_OTG_DISABLE | USBPCR_SIDDQ, reg_usbpcr);
+}
+
+static int x1000_usb_phy_is_enabled(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ return (readl(reg_opcr) & OPCR_SPENDN0) &&
+ !(readl(reg_usbpcr) & USBPCR_SIDDQ) &&
+ !(readl(reg_usbpcr) & USBPCR_OTG_DISABLE);
+}
+
+static const struct clk_ops x1000_otg_phy_ops = {
+ .enable = x1000_usb_phy_enable,
+ .disable = x1000_usb_phy_disable,
+ .is_enabled = x1000_usb_phy_is_enabled,
+};
+
static const s8 pll_od_encoding[8] = {
0x0, 0x1, -1, 0x2, -1, -1, -1, 0x3,
};
@@ -58,6 +103,7 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.parents = { X1000_CLK_EXCLK, -1, -1, -1 },
.pll = {
.reg = CGU_REG_APLL,
+ .rate_multiplier = 1,
.m_shift = 24,
.m_bits = 7,
.m_offset = 1,
@@ -68,6 +114,7 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.od_bits = 2,
.od_max = 8,
.od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_APLL,
.bypass_bit = 9,
.enable_bit = 8,
.stable_bit = 10,
@@ -79,6 +126,7 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.parents = { X1000_CLK_EXCLK, -1, -1, -1 },
.pll = {
.reg = CGU_REG_MPLL,
+ .rate_multiplier = 1,
.m_shift = 24,
.m_bits = 7,
.m_offset = 1,
@@ -89,12 +137,22 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.od_bits = 2,
.od_max = 8,
.od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_MPLL,
.bypass_bit = 6,
.enable_bit = 7,
.stable_bit = 0,
},
},
+
+ /* Custom (SoC-specific) OTG PHY */
+
+ [X1000_CLK_OTGPHY] = {
+ "otg_phy", CGU_CLK_CUSTOM,
+ .parents = { -1, -1, X1000_CLK_EXCLK, -1 },
+ .custom = { &x1000_otg_phy_ops },
+ },
+
/* Muxes & dividers */
[X1000_CLK_SCLKA] = {
@@ -110,9 +168,10 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
},
[X1000_CLK_CPU] = {
- "cpu", CGU_CLK_DIV,
+ "cpu", CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { X1000_CLK_CPUMUX, -1, -1, -1 },
.div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 30 },
},
[X1000_CLK_L2CACHE] = {
@@ -141,9 +200,10 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
},
[X1000_CLK_PCLK] = {
- "pclk", CGU_CLK_DIV,
+ "pclk", CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { X1000_CLK_AHB2PMUX, -1, -1, -1 },
.div = { CGU_REG_CPCCR, 16, 1, 4, 20, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 28 },
},
[X1000_CLK_DDR] = {
@@ -156,12 +216,20 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
[X1000_CLK_MAC] = {
"mac", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
- .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL},
+ .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL },
.mux = { CGU_REG_MACCDR, 31, 1 },
.div = { CGU_REG_MACCDR, 0, 1, 8, 29, 28, 27 },
.gate = { CGU_REG_CLKGR, 25 },
},
+ [X1000_CLK_LCD] = {
+ "lcd", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL },
+ .mux = { CGU_REG_LPCDR, 31, 1 },
+ .div = { CGU_REG_LPCDR, 0, 1, 8, 28, 27, 26 },
+ .gate = { CGU_REG_CLKGR, 23 },
+ },
+
[X1000_CLK_MSCMUX] = {
"msc_mux", CGU_CLK_MUX,
.parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL},
@@ -182,6 +250,15 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.gate = { CGU_REG_CLKGR, 5 },
},
+ [X1000_CLK_OTG] = {
+ "otg", CGU_CLK_DIV | CGU_CLK_GATE | CGU_CLK_MUX,
+ .parents = { X1000_CLK_EXCLK, -1,
+ X1000_CLK_APLL, X1000_CLK_MPLL },
+ .mux = { CGU_REG_USBCDR, 30, 2 },
+ .div = { CGU_REG_USBCDR, 0, 1, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR, 3 },
+ },
+
[X1000_CLK_SSIPLL] = {
"ssi_pll", CGU_CLK_MUX | CGU_CLK_DIV,
.parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL, -1, -1 },
@@ -189,14 +266,32 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.div = { CGU_REG_SSICDR, 0, 1, 8, 29, 28, 27 },
},
+ [X1000_CLK_SSIPLL_DIV2] = {
+ "ssi_pll_div2", CGU_CLK_FIXDIV,
+ .parents = { X1000_CLK_SSIPLL },
+ .fixdiv = { 2 },
+ },
+
[X1000_CLK_SSIMUX] = {
"ssi_mux", CGU_CLK_MUX,
- .parents = { X1000_CLK_EXCLK, X1000_CLK_SSIPLL, -1, -1 },
+ .parents = { X1000_CLK_EXCLK, X1000_CLK_SSIPLL_DIV2, -1, -1 },
.mux = { CGU_REG_SSICDR, 30, 1 },
},
/* Gate-only clocks */
+ [X1000_CLK_EMC] = {
+ "emc", CGU_CLK_GATE,
+ .parents = { X1000_CLK_AHB2, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 0 },
+ },
+
+ [X1000_CLK_EFUSE] = {
+ "efuse", CGU_CLK_GATE,
+ .parents = { X1000_CLK_AHB2, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 1 },
+ },
+
[X1000_CLK_SFC] = {
"sfc", CGU_CLK_GATE,
.parents = { X1000_CLK_SSIPLL, -1, -1, -1 },
@@ -239,12 +334,24 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.gate = { CGU_REG_CLKGR, 16 },
},
+ [X1000_CLK_TCU] = {
+ "tcu", CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 18 },
+ },
+
[X1000_CLK_SSI] = {
"ssi", CGU_CLK_GATE,
.parents = { X1000_CLK_SSIMUX, -1, -1, -1 },
.gate = { CGU_REG_CLKGR, 19 },
},
+ [X1000_CLK_OST] = {
+ "ost", CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 20 },
+ },
+
[X1000_CLK_PDMA] = {
"pdma", CGU_CLK_GATE,
.parents = { X1000_CLK_EXCLK, -1, -1, -1 },
@@ -271,4 +378,8 @@ static void __init x1000_cgu_init(struct device_node *np)
ingenic_cgu_register_syscore_ops(cgu);
}
-CLK_OF_DECLARE(x1000_cgu, "ingenic,x1000-cgu", x1000_cgu_init);
+/*
+ * CGU has some children devices, this is useful for probing children devices
+ * in the case where the device node is compatible with "simple-mfd".
+ */
+CLK_OF_DECLARE_DRIVER(x1000_cgu, "ingenic,x1000-cgu", x1000_cgu_init);
diff --git a/drivers/clk/ingenic/x1830-cgu.c b/drivers/clk/ingenic/x1830-cgu.c
new file mode 100644
index 000000000000..a1b2ff0ee487
--- /dev/null
+++ b/drivers/clk/ingenic/x1830-cgu.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * X1830 SoC CGU driver
+ * Copyright (c) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include <dt-bindings/clock/x1830-cgu.h>
+
+#include "cgu.h"
+#include "pm.h"
+
+/* CGU register offsets */
+#define CGU_REG_CPCCR 0x00
+#define CGU_REG_CPPCR 0x0c
+#define CGU_REG_APLL 0x10
+#define CGU_REG_MPLL 0x14
+#define CGU_REG_CLKGR0 0x20
+#define CGU_REG_OPCR 0x24
+#define CGU_REG_CLKGR1 0x28
+#define CGU_REG_DDRCDR 0x2c
+#define CGU_REG_USBPCR 0x3c
+#define CGU_REG_USBRDT 0x40
+#define CGU_REG_USBVBFIL 0x44
+#define CGU_REG_USBPCR1 0x48
+#define CGU_REG_MACCDR 0x54
+#define CGU_REG_EPLL 0x58
+#define CGU_REG_I2SCDR 0x60
+#define CGU_REG_LPCDR 0x64
+#define CGU_REG_MSC0CDR 0x68
+#define CGU_REG_I2SCDR1 0x70
+#define CGU_REG_SSICDR 0x74
+#define CGU_REG_CIMCDR 0x7c
+#define CGU_REG_MSC1CDR 0xa4
+#define CGU_REG_CMP_INTR 0xb0
+#define CGU_REG_CMP_INTRE 0xb4
+#define CGU_REG_DRCG 0xd0
+#define CGU_REG_CPCSR 0xd4
+#define CGU_REG_VPLL 0xe0
+#define CGU_REG_MACPHYC 0xe8
+
+/* bits within the OPCR register */
+#define OPCR_GATE_USBPHYCLK BIT(23)
+#define OPCR_SPENDN0 BIT(7)
+#define OPCR_SPENDN1 BIT(6)
+
+/* bits within the USBPCR register */
+#define USBPCR_SIDDQ BIT(21)
+#define USBPCR_OTG_DISABLE BIT(20)
+
+static struct ingenic_cgu *cgu;
+
+static int x1830_usb_phy_enable(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ writel((readl(reg_opcr) | OPCR_SPENDN0) & ~OPCR_GATE_USBPHYCLK, reg_opcr);
+ writel(readl(reg_usbpcr) & ~USBPCR_OTG_DISABLE & ~USBPCR_SIDDQ, reg_usbpcr);
+ return 0;
+}
+
+static void x1830_usb_phy_disable(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ writel((readl(reg_opcr) & ~OPCR_SPENDN0) | OPCR_GATE_USBPHYCLK, reg_opcr);
+ writel(readl(reg_usbpcr) | USBPCR_OTG_DISABLE | USBPCR_SIDDQ, reg_usbpcr);
+}
+
+static int x1830_usb_phy_is_enabled(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ return (readl(reg_opcr) & OPCR_SPENDN0) &&
+ !(readl(reg_usbpcr) & USBPCR_SIDDQ) &&
+ !(readl(reg_usbpcr) & USBPCR_OTG_DISABLE);
+}
+
+static const struct clk_ops x1830_otg_phy_ops = {
+ .enable = x1830_usb_phy_enable,
+ .disable = x1830_usb_phy_disable,
+ .is_enabled = x1830_usb_phy_is_enabled,
+};
+
+static const s8 pll_od_encoding[64] = {
+ 0x0, 0x1, -1, 0x2, -1, -1, -1, 0x3,
+ -1, -1, -1, -1, -1, -1, -1, 0x4,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 0x5,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 0x6,
+};
+
+static const struct ingenic_cgu_clk_info x1830_cgu_clocks[] = {
+
+ /* External clocks */
+
+ [X1830_CLK_EXCLK] = { "ext", CGU_CLK_EXT },
+ [X1830_CLK_RTCLK] = { "rtc", CGU_CLK_EXT },
+
+ /* PLLs */
+
+ [X1830_CLK_APLL] = {
+ "apll", CGU_CLK_PLL,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_APLL,
+ .rate_multiplier = 2,
+ .m_shift = 20,
+ .m_bits = 9,
+ .m_offset = 1,
+ .n_shift = 14,
+ .n_bits = 6,
+ .n_offset = 1,
+ .od_shift = 11,
+ .od_bits = 3,
+ .od_max = 64,
+ .od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_CPPCR,
+ .bypass_bit = 30,
+ .enable_bit = 0,
+ .stable_bit = 3,
+ },
+ },
+
+ [X1830_CLK_MPLL] = {
+ "mpll", CGU_CLK_PLL,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_MPLL,
+ .rate_multiplier = 2,
+ .m_shift = 20,
+ .m_bits = 9,
+ .m_offset = 1,
+ .n_shift = 14,
+ .n_bits = 6,
+ .n_offset = 1,
+ .od_shift = 11,
+ .od_bits = 3,
+ .od_max = 64,
+ .od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_CPPCR,
+ .bypass_bit = 28,
+ .enable_bit = 0,
+ .stable_bit = 3,
+ },
+ },
+
+ [X1830_CLK_EPLL] = {
+ "epll", CGU_CLK_PLL,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_EPLL,
+ .rate_multiplier = 2,
+ .m_shift = 20,
+ .m_bits = 9,
+ .m_offset = 1,
+ .n_shift = 14,
+ .n_bits = 6,
+ .n_offset = 1,
+ .od_shift = 11,
+ .od_bits = 3,
+ .od_max = 64,
+ .od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_CPPCR,
+ .bypass_bit = 24,
+ .enable_bit = 0,
+ .stable_bit = 3,
+ },
+ },
+
+ [X1830_CLK_VPLL] = {
+ "vpll", CGU_CLK_PLL,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_VPLL,
+ .rate_multiplier = 2,
+ .m_shift = 20,
+ .m_bits = 9,
+ .m_offset = 1,
+ .n_shift = 14,
+ .n_bits = 6,
+ .n_offset = 1,
+ .od_shift = 11,
+ .od_bits = 3,
+ .od_max = 64,
+ .od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_CPPCR,
+ .bypass_bit = 26,
+ .enable_bit = 0,
+ .stable_bit = 3,
+ },
+ },
+
+ /* Custom (SoC-specific) OTG PHY */
+
+ [X1830_CLK_OTGPHY] = {
+ "otg_phy", CGU_CLK_CUSTOM,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .custom = { &x1830_otg_phy_ops },
+ },
+
+ /* Muxes & dividers */
+
+ [X1830_CLK_SCLKA] = {
+ "sclk_a", CGU_CLK_MUX,
+ .parents = { -1, X1830_CLK_EXCLK, X1830_CLK_APLL, -1 },
+ .mux = { CGU_REG_CPCCR, 30, 2 },
+ },
+
+ [X1830_CLK_CPUMUX] = {
+ "cpu_mux", CGU_CLK_MUX,
+ .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 },
+ .mux = { CGU_REG_CPCCR, 28, 2 },
+ },
+
+ [X1830_CLK_CPU] = {
+ "cpu", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1830_CLK_CPUMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 },
+ .gate = { CGU_REG_CLKGR1, 15 },
+ },
+
+ [X1830_CLK_L2CACHE] = {
+ "l2cache", CGU_CLK_DIV,
+ .parents = { X1830_CLK_CPUMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 },
+ },
+
+ [X1830_CLK_AHB0] = {
+ "ahb0", CGU_CLK_MUX | CGU_CLK_DIV,
+ .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 },
+ .mux = { CGU_REG_CPCCR, 26, 2 },
+ .div = { CGU_REG_CPCCR, 8, 1, 4, 21, -1, -1 },
+ },
+
+ [X1830_CLK_AHB2PMUX] = {
+ "ahb2_apb_mux", CGU_CLK_MUX,
+ .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 },
+ .mux = { CGU_REG_CPCCR, 24, 2 },
+ },
+
+ [X1830_CLK_AHB2] = {
+ "ahb2", CGU_CLK_DIV,
+ .parents = { X1830_CLK_AHB2PMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 12, 1, 4, 20, -1, -1 },
+ },
+
+ [X1830_CLK_PCLK] = {
+ "pclk", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1830_CLK_AHB2PMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 16, 1, 4, 20, -1, -1 },
+ .gate = { CGU_REG_CLKGR1, 14 },
+ },
+
+ [X1830_CLK_DDR] = {
+ "ddr", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 },
+ .mux = { CGU_REG_DDRCDR, 30, 2 },
+ .div = { CGU_REG_DDRCDR, 0, 1, 4, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR0, 31 },
+ },
+
+ [X1830_CLK_MAC] = {
+ "mac", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1830_CLK_SCLKA, X1830_CLK_MPLL,
+ X1830_CLK_VPLL, X1830_CLK_EPLL },
+ .mux = { CGU_REG_MACCDR, 30, 2 },
+ .div = { CGU_REG_MACCDR, 0, 1, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR1, 4 },
+ },
+
+ [X1830_CLK_LCD] = {
+ "lcd", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1830_CLK_SCLKA, X1830_CLK_MPLL,
+ X1830_CLK_VPLL, X1830_CLK_EPLL },
+ .mux = { CGU_REG_LPCDR, 30, 2 },
+ .div = { CGU_REG_LPCDR, 0, 1, 8, 28, 27, 26 },
+ .gate = { CGU_REG_CLKGR1, 9 },
+ },
+
+ [X1830_CLK_MSCMUX] = {
+ "msc_mux", CGU_CLK_MUX,
+ .parents = { X1830_CLK_SCLKA, X1830_CLK_MPLL,
+ X1830_CLK_VPLL, X1830_CLK_EPLL },
+ .mux = { CGU_REG_MSC0CDR, 30, 2 },
+ },
+
+ [X1830_CLK_MSC0] = {
+ "msc0", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1830_CLK_MSCMUX, -1, -1, -1 },
+ .div = { CGU_REG_MSC0CDR, 0, 2, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR0, 4 },
+ },
+
+ [X1830_CLK_MSC1] = {
+ "msc1", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1830_CLK_MSCMUX, -1, -1, -1 },
+ .div = { CGU_REG_MSC1CDR, 0, 2, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR0, 5 },
+ },
+
+ [X1830_CLK_SSIPLL] = {
+ "ssi_pll", CGU_CLK_MUX | CGU_CLK_DIV,
+ .parents = { X1830_CLK_SCLKA, X1830_CLK_MPLL,
+ X1830_CLK_VPLL, X1830_CLK_EPLL },
+ .mux = { CGU_REG_SSICDR, 30, 2 },
+ .div = { CGU_REG_SSICDR, 0, 1, 8, 28, 27, 26 },
+ },
+
+ [X1830_CLK_SSIPLL_DIV2] = {
+ "ssi_pll_div2", CGU_CLK_FIXDIV,
+ .parents = { X1830_CLK_SSIPLL },
+ .fixdiv = { 2 },
+ },
+
+ [X1830_CLK_SSIMUX] = {
+ "ssi_mux", CGU_CLK_MUX,
+ .parents = { X1830_CLK_EXCLK, X1830_CLK_SSIPLL_DIV2, -1, -1 },
+ .mux = { CGU_REG_SSICDR, 29, 1 },
+ },
+
+ /* Gate-only clocks */
+
+ [X1830_CLK_EMC] = {
+ "emc", CGU_CLK_GATE,
+ .parents = { X1830_CLK_AHB2, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 0 },
+ },
+
+ [X1830_CLK_EFUSE] = {
+ "efuse", CGU_CLK_GATE,
+ .parents = { X1830_CLK_AHB2, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 1 },
+ },
+
+ [X1830_CLK_OTG] = {
+ "otg", CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 3 },
+ },
+
+ [X1830_CLK_SSI0] = {
+ "ssi0", CGU_CLK_GATE,
+ .parents = { X1830_CLK_SSIMUX, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 6 },
+ },
+
+ [X1830_CLK_SMB0] = {
+ "smb0", CGU_CLK_GATE,
+ .parents = { X1830_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 7 },
+ },
+
+ [X1830_CLK_SMB1] = {
+ "smb1", CGU_CLK_GATE,
+ .parents = { X1830_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 8 },
+ },
+
+ [X1830_CLK_SMB2] = {
+ "smb2", CGU_CLK_GATE,
+ .parents = { X1830_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 9 },
+ },
+
+ [X1830_CLK_UART0] = {
+ "uart0", CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 14 },
+ },
+
+ [X1830_CLK_UART1] = {
+ "uart1", CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 15 },
+ },
+
+ [X1830_CLK_SSI1] = {
+ "ssi1", CGU_CLK_GATE,
+ .parents = { X1830_CLK_SSIMUX, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 19 },
+ },
+
+ [X1830_CLK_SFC] = {
+ "sfc", CGU_CLK_GATE,
+ .parents = { X1830_CLK_SSIPLL, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 20 },
+ },
+
+ [X1830_CLK_PDMA] = {
+ "pdma", CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 21 },
+ },
+
+ [X1830_CLK_TCU] = {
+ "tcu", CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 30 },
+ },
+
+ [X1830_CLK_DTRNG] = {
+ "dtrng", CGU_CLK_GATE,
+ .parents = { X1830_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR1, 1 },
+ },
+
+ [X1830_CLK_OST] = {
+ "ost", CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR1, 11 },
+ },
+};
+
+static void __init x1830_cgu_init(struct device_node *np)
+{
+ int retval;
+
+ cgu = ingenic_cgu_new(x1830_cgu_clocks,
+ ARRAY_SIZE(x1830_cgu_clocks), np);
+ if (!cgu) {
+ pr_err("%s: failed to initialise CGU\n", __func__);
+ return;
+ }
+
+ retval = ingenic_cgu_register_clocks(cgu);
+ if (retval) {
+ pr_err("%s: failed to register CGU Clocks\n", __func__);
+ return;
+ }
+
+ ingenic_cgu_register_syscore_ops(cgu);
+}
+/*
+ * CGU has some children devices, this is useful for probing children devices
+ * in the case where the device node is compatible with "simple-mfd".
+ */
+CLK_OF_DECLARE_DRIVER(x1830_cgu, "ingenic,x1830-cgu", x1830_cgu_init);
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index ea3c70d1307e..1d2b7d717541 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -117,6 +117,92 @@ config COMMON_CLK_MT2712_VENCSYS
---help---
This driver supports MediaTek MT2712 vencsys clocks.
+config COMMON_CLK_MT6765
+ bool "Clock driver for MediaTek MT6765"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK && ARM64
+ help
+ This driver supports MediaTek MT6765 basic clocks.
+
+config COMMON_CLK_MT6765_AUDIOSYS
+ bool "Clock driver for MediaTek MT6765 audiosys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 audiosys clocks.
+
+config COMMON_CLK_MT6765_CAMSYS
+ bool "Clock driver for MediaTek MT6765 camsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 camsys clocks.
+
+config COMMON_CLK_MT6765_GCESYS
+ bool "Clock driver for MediaTek MT6765 gcesys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 gcesys clocks.
+
+config COMMON_CLK_MT6765_MMSYS
+ bool "Clock driver for MediaTek MT6765 mmsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mmsys clocks.
+
+config COMMON_CLK_MT6765_IMGSYS
+ bool "Clock driver for MediaTek MT6765 imgsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 imgsys clocks.
+
+config COMMON_CLK_MT6765_VCODECSYS
+ bool "Clock driver for MediaTek MT6765 vcodecsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 vcodecsys clocks.
+
+config COMMON_CLK_MT6765_MFGSYS
+ bool "Clock driver for MediaTek MT6765 mfgsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mfgsys clocks.
+
+config COMMON_CLK_MT6765_MIPI0ASYS
+ bool "Clock driver for MediaTek MT6765 mipi0asys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mipi0asys clocks.
+
+config COMMON_CLK_MT6765_MIPI0BSYS
+ bool "Clock driver for MediaTek MT6765 mipi0bsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mipi0bsys clocks.
+
+config COMMON_CLK_MT6765_MIPI1ASYS
+ bool "Clock driver for MediaTek MT6765 mipi1asys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mipi1asys clocks.
+
+config COMMON_CLK_MT6765_MIPI1BSYS
+ bool "Clock driver for MediaTek MT6765 mipi1bsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mipi1bsys clocks.
+
+config COMMON_CLK_MT6765_MIPI2ASYS
+ bool "Clock driver for MediaTek MT6765 mipi2asys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mipi2asys clocks.
+
+config COMMON_CLK_MT6765_MIPI2BSYS
+ bool "Clock driver for MediaTek MT6765 mipi2bsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mipi2bsys clocks.
+
config COMMON_CLK_MT6779
bool "Clock driver for MediaTek MT6779"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
@@ -274,6 +360,13 @@ config COMMON_CLK_MT8173
---help---
This driver supports MediaTek MT8173 clocks.
+config COMMON_CLK_MT8173_MMSYS
+ bool "Clock driver for MediaTek MT8173 mmsys"
+ depends on COMMON_CLK_MT8173
+ default COMMON_CLK_MT8173
+ help
+ This driver supports MediaTek MT8173 mmsys clocks.
+
config COMMON_CLK_MT8183
bool "Clock driver for MediaTek MT8183"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 8cdb76a5cd71..959b556d32ea 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -1,6 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o clk-mux.o
+obj-$(CONFIG_COMMON_CLK_MT6765) += clk-mt6765.o
+obj-$(CONFIG_COMMON_CLK_MT6765_AUDIOSYS) += clk-mt6765-audio.o
+obj-$(CONFIG_COMMON_CLK_MT6765_CAMSYS) += clk-mt6765-cam.o
+obj-$(CONFIG_COMMON_CLK_MT6765_IMGSYS) += clk-mt6765-img.o
+obj-$(CONFIG_COMMON_CLK_MT6765_MIPI0ASYS) += clk-mt6765-mipi0a.o
+obj-$(CONFIG_COMMON_CLK_MT6765_MMSYS) += clk-mt6765-mm.o
+obj-$(CONFIG_COMMON_CLK_MT6765_VCODECSYS) += clk-mt6765-vcodec.o
obj-$(CONFIG_COMMON_CLK_MT6779) += clk-mt6779.o
obj-$(CONFIG_COMMON_CLK_MT6779_MMSYS) += clk-mt6779-mm.o
obj-$(CONFIG_COMMON_CLK_MT6779_IMGSYS) += clk-mt6779-img.o
@@ -41,6 +48,7 @@ obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o
obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
+obj-$(CONFIG_COMMON_CLK_MT8173_MMSYS) += clk-mt8173-mm.o
obj-$(CONFIG_COMMON_CLK_MT8183) += clk-mt8183.o
obj-$(CONFIG_COMMON_CLK_MT8183_AUDIOSYS) += clk-mt8183-audio.o
obj-$(CONFIG_COMMON_CLK_MT8183_CAMSYS) += clk-mt8183-cam.o
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index 054b597d4a73..cb18e1849492 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -79,16 +79,12 @@ static const struct mtk_gate mm_clks[] = {
GATE_DISP1(CLK_MM_TVE_FMM, "mm_tve_fmm", "mm_sel", 14),
};
-static const struct of_device_id of_match_clk_mt2701_mm[] = {
- { .compatible = "mediatek,mt2701-mmsys", },
- {}
-};
-
static int clk_mt2701_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
int r;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR);
@@ -108,7 +104,6 @@ static struct platform_driver clk_mt2701_mm_drv = {
.probe = clk_mt2701_mm_probe,
.driver = {
.name = "clk-mt2701-mm",
- .of_match_table = of_match_clk_mt2701_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
index 1c5948be35f3..5519c3d68c1f 100644
--- a/drivers/clk/mediatek/clk-mt2712-mm.c
+++ b/drivers/clk/mediatek/clk-mt2712-mm.c
@@ -128,9 +128,10 @@ static const struct mtk_gate mm_clks[] = {
static int clk_mt2712_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
int r;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -146,16 +147,10 @@ static int clk_mt2712_mm_probe(struct platform_device *pdev)
return r;
}
-static const struct of_device_id of_match_clk_mt2712_mm[] = {
- { .compatible = "mediatek,mt2712-mmsys", },
- {}
-};
-
static struct platform_driver clk_mt2712_mm_drv = {
.probe = clk_mt2712_mm_probe,
.driver = {
.name = "clk-mt2712-mm",
- .of_match_table = of_match_clk_mt2712_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c
new file mode 100644
index 000000000000..4c989165d795
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765-audio.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+static const struct mtk_gate_regs audio0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs audio1_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x4,
+};
+
+#define GATE_AUDIO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate audio_clks[] = {
+ /* AUDIO0 */
+ GATE_AUDIO0(CLK_AUDIO_AFE, "aud_afe", "audio_ck", 2),
+ GATE_AUDIO0(CLK_AUDIO_22M, "aud_22m", "aud_engen1_ck", 8),
+ GATE_AUDIO0(CLK_AUDIO_APLL_TUNER, "aud_apll_tuner",
+ "aud_engen1_ck", 19),
+ GATE_AUDIO0(CLK_AUDIO_ADC, "aud_adc", "audio_ck", 24),
+ GATE_AUDIO0(CLK_AUDIO_DAC, "aud_dac", "audio_ck", 25),
+ GATE_AUDIO0(CLK_AUDIO_DAC_PREDIS, "aud_dac_predis",
+ "audio_ck", 26),
+ GATE_AUDIO0(CLK_AUDIO_TML, "aud_tml", "audio_ck", 27),
+ /* AUDIO1 */
+ GATE_AUDIO1(CLK_AUDIO_I2S1_BCLK, "aud_i2s1_bclk",
+ "audio_ck", 4),
+ GATE_AUDIO1(CLK_AUDIO_I2S2_BCLK, "aud_i2s2_bclk",
+ "audio_ck", 5),
+ GATE_AUDIO1(CLK_AUDIO_I2S3_BCLK, "aud_i2s3_bclk",
+ "audio_ck", 6),
+ GATE_AUDIO1(CLK_AUDIO_I2S4_BCLK, "aud_i2s4_bclk",
+ "audio_ck", 7),
+};
+
+static int clk_mt6765_audio_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
+
+ mtk_clk_register_gates(node, audio_clks,
+ ARRAY_SIZE(audio_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765_audio[] = {
+ { .compatible = "mediatek,mt6765-audsys", },
+ {}
+};
+
+static struct platform_driver clk_mt6765_audio_drv = {
+ .probe = clk_mt6765_audio_probe,
+ .driver = {
+ .name = "clk-mt6765-audio",
+ .of_match_table = of_match_clk_mt6765_audio,
+ },
+};
+
+builtin_platform_driver(clk_mt6765_audio_drv);
diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c
new file mode 100644
index 000000000000..c96394893bcf
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765-cam.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+static const struct mtk_gate_regs cam_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CAM(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &cam_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate cam_clks[] = {
+ GATE_CAM(CLK_CAM_LARB3, "cam_larb3", "mm_ck", 0),
+ GATE_CAM(CLK_CAM_DFP_VAD, "cam_dfp_vad", "mm_ck", 1),
+ GATE_CAM(CLK_CAM, "cam", "mm_ck", 6),
+ GATE_CAM(CLK_CAMTG, "camtg", "mm_ck", 7),
+ GATE_CAM(CLK_CAM_SENINF, "cam_seninf", "mm_ck", 8),
+ GATE_CAM(CLK_CAMSV0, "camsv0", "mm_ck", 9),
+ GATE_CAM(CLK_CAMSV1, "camsv1", "mm_ck", 10),
+ GATE_CAM(CLK_CAMSV2, "camsv2", "mm_ck", 11),
+ GATE_CAM(CLK_CAM_CCU, "cam_ccu", "mm_ck", 12),
+};
+
+static int clk_mt6765_cam_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
+
+ mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765_cam[] = {
+ { .compatible = "mediatek,mt6765-camsys", },
+ {}
+};
+
+static struct platform_driver clk_mt6765_cam_drv = {
+ .probe = clk_mt6765_cam_probe,
+ .driver = {
+ .name = "clk-mt6765-cam",
+ .of_match_table = of_match_clk_mt6765_cam,
+ },
+};
+
+builtin_platform_driver(clk_mt6765_cam_drv);
diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c
new file mode 100644
index 000000000000..6fd8bf8030fc
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765-img.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &img_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate img_clks[] = {
+ GATE_IMG(CLK_IMG_LARB2, "img_larb2", "mm_ck", 0),
+ GATE_IMG(CLK_IMG_DIP, "img_dip", "mm_ck", 2),
+ GATE_IMG(CLK_IMG_FDVT, "img_fdvt", "mm_ck", 3),
+ GATE_IMG(CLK_IMG_DPE, "img_dpe", "mm_ck", 4),
+ GATE_IMG(CLK_IMG_RSC, "img_rsc", "mm_ck", 5),
+};
+
+static int clk_mt6765_img_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+
+ mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765_img[] = {
+ { .compatible = "mediatek,mt6765-imgsys", },
+ {}
+};
+
+static struct platform_driver clk_mt6765_img_drv = {
+ .probe = clk_mt6765_img_probe,
+ .driver = {
+ .name = "clk-mt6765-img",
+ .of_match_table = of_match_clk_mt6765_img,
+ },
+};
+
+builtin_platform_driver(clk_mt6765_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
new file mode 100644
index 000000000000..81744d0f95a0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+static const struct mtk_gate_regs mipi0a_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x80,
+ .sta_ofs = 0x80,
+};
+
+#define GATE_MIPI0A(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mipi0a_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate mipi0a_clks[] = {
+ GATE_MIPI0A(CLK_MIPI0A_CSR_CSI_EN_0A,
+ "mipi0a_csr_0a", "f_fseninf_ck", 1),
+};
+
+static int clk_mt6765_mipi0a_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MIPI0A_NR_CLK);
+
+ mtk_clk_register_gates(node, mipi0a_clks,
+ ARRAY_SIZE(mipi0a_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765_mipi0a[] = {
+ { .compatible = "mediatek,mt6765-mipi0a", },
+ {}
+};
+
+static struct platform_driver clk_mt6765_mipi0a_drv = {
+ .probe = clk_mt6765_mipi0a_probe,
+ .driver = {
+ .name = "clk-mt6765-mipi0a",
+ .of_match_table = of_match_clk_mt6765_mipi0a,
+ },
+};
+
+builtin_platform_driver(clk_mt6765_mipi0a_drv);
diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c
new file mode 100644
index 000000000000..6d8214c51684
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765-mm.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+static const struct mtk_gate_regs mm_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+#define GATE_MM(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM */
+ GATE_MM(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_ck", 0),
+ GATE_MM(CLK_MM_MDP_CCORR0, "mm_mdp_ccorr0", "mm_ck", 1),
+ GATE_MM(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_ck", 2),
+ GATE_MM(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_ck", 3),
+ GATE_MM(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_ck", 4),
+ GATE_MM(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_ck", 5),
+ GATE_MM(CLK_MM_MDP_WDMA0, "mm_mdp_wdma0", "mm_ck", 6),
+ GATE_MM(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_ck", 7),
+ GATE_MM(CLK_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "mm_ck", 8),
+ GATE_MM(CLK_MM_DISP_RSZ0, "mm_disp_rsz0", "mm_ck", 9),
+ GATE_MM(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_ck", 10),
+ GATE_MM(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_ck", 11),
+ GATE_MM(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_ck", 12),
+ GATE_MM(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "mm_ck", 13),
+ GATE_MM(CLK_MM_DISP_AAL0, "mm_disp_aal0", "mm_ck", 14),
+ GATE_MM(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "mm_ck", 15),
+ GATE_MM(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "mm_ck", 16),
+ GATE_MM(CLK_MM_DSI0, "mm_dsi0", "mm_ck", 17),
+ GATE_MM(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_ck", 18),
+ GATE_MM(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_ck", 19),
+ GATE_MM(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_ck", 20),
+ GATE_MM(CLK_MM_SMI_COMM0, "mm_smi_comm0", "mm_ck", 21),
+ GATE_MM(CLK_MM_SMI_COMM1, "mm_smi_comm1", "mm_ck", 22),
+ GATE_MM(CLK_MM_CAM_MDP, "mm_cam_mdp_ck", "mm_ck", 23),
+ GATE_MM(CLK_MM_SMI_IMG, "mm_smi_img_ck", "mm_ck", 24),
+ GATE_MM(CLK_MM_SMI_CAM, "mm_smi_cam_ck", "mm_ck", 25),
+ GATE_MM(CLK_MM_IMG_DL_RELAY, "mm_img_dl_relay", "mm_ck", 26),
+ GATE_MM(CLK_MM_IMG_DL_ASYNC_TOP, "mm_imgdl_async", "mm_ck", 27),
+ GATE_MM(CLK_MM_DIG_DSI, "mm_dig_dsi_ck", "mm_ck", 28),
+ GATE_MM(CLK_MM_F26M_HRTWT, "mm_hrtwt", "f_f26m_ck", 29),
+};
+
+static int clk_mt6765_mm_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+
+ mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765_mm[] = {
+ { .compatible = "mediatek,mt6765-mmsys", },
+ {}
+};
+
+static struct platform_driver clk_mt6765_mm_drv = {
+ .probe = clk_mt6765_mm_probe,
+ .driver = {
+ .name = "clk-mt6765-mm",
+ .of_match_table = of_match_clk_mt6765_mm,
+ },
+};
+
+builtin_platform_driver(clk_mt6765_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c
new file mode 100644
index 000000000000..baae665fab31
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &venc_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_VENC(CLK_VENC_SET0_LARB, "venc_set0_larb", "mm_ck", 0),
+ GATE_VENC(CLK_VENC_SET1_VENC, "venc_set1_venc", "mm_ck", 4),
+ GATE_VENC(CLK_VENC_SET2_JPGENC, "jpgenc", "mm_ck", 8),
+ GATE_VENC(CLK_VENC_SET3_VDEC, "venc_set3_vdec", "mm_ck", 12),
+};
+
+static int clk_mt6765_vcodec_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
+
+ mtk_clk_register_gates(node, venc_clks,
+ ARRAY_SIZE(venc_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765_vcodec[] = {
+ { .compatible = "mediatek,mt6765-vcodecsys", },
+ {}
+};
+
+static struct platform_driver clk_mt6765_vcodec_drv = {
+ .probe = clk_mt6765_vcodec_probe,
+ .driver = {
+ .name = "clk-mt6765-vcodec",
+ .of_match_table = of_match_clk_mt6765_vcodec,
+ },
+};
+
+builtin_platform_driver(clk_mt6765_vcodec_drv);
diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
new file mode 100644
index 000000000000..db8db1b3b79d
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765.c
@@ -0,0 +1,922 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+/*fmeter div select 4*/
+#define _DIV4_ 1
+
+static DEFINE_SPINLOCK(mt6765_clk_lock);
+
+/* Total 12 subsys */
+static void __iomem *cksys_base;
+static void __iomem *apmixed_base;
+
+/* CKSYS */
+#define CLK_SCP_CFG_0 (cksys_base + 0x200)
+#define CLK_SCP_CFG_1 (cksys_base + 0x204)
+
+/* CG */
+#define AP_PLL_CON3 (apmixed_base + 0x0C)
+#define PLLON_CON0 (apmixed_base + 0x44)
+#define PLLON_CON1 (apmixed_base + 0x48)
+
+/* clk cfg update */
+#define CLK_CFG_0 0x40
+#define CLK_CFG_0_SET 0x44
+#define CLK_CFG_0_CLR 0x48
+#define CLK_CFG_1 0x50
+#define CLK_CFG_1_SET 0x54
+#define CLK_CFG_1_CLR 0x58
+#define CLK_CFG_2 0x60
+#define CLK_CFG_2_SET 0x64
+#define CLK_CFG_2_CLR 0x68
+#define CLK_CFG_3 0x70
+#define CLK_CFG_3_SET 0x74
+#define CLK_CFG_3_CLR 0x78
+#define CLK_CFG_4 0x80
+#define CLK_CFG_4_SET 0x84
+#define CLK_CFG_4_CLR 0x88
+#define CLK_CFG_5 0x90
+#define CLK_CFG_5_SET 0x94
+#define CLK_CFG_5_CLR 0x98
+#define CLK_CFG_6 0xa0
+#define CLK_CFG_6_SET 0xa4
+#define CLK_CFG_6_CLR 0xa8
+#define CLK_CFG_7 0xb0
+#define CLK_CFG_7_SET 0xb4
+#define CLK_CFG_7_CLR 0xb8
+#define CLK_CFG_8 0xc0
+#define CLK_CFG_8_SET 0xc4
+#define CLK_CFG_8_CLR 0xc8
+#define CLK_CFG_9 0xd0
+#define CLK_CFG_9_SET 0xd4
+#define CLK_CFG_9_CLR 0xd8
+#define CLK_CFG_10 0xe0
+#define CLK_CFG_10_SET 0xe4
+#define CLK_CFG_10_CLR 0xe8
+#define CLK_CFG_UPDATE 0x004
+
+static const struct mtk_fixed_clk fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_F_FRTC, "f_frtc_ck", "clk32k", 32768),
+ FIXED_CLK(CLK_TOP_CLK26M, "clk_26m_ck", "clk26m", 26000000),
+ FIXED_CLK(CLK_TOP_DMPLL, "dmpll_ck", NULL, 466000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_SYSPLL, "syspll_ck", "mainpll", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "syspll_d2", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "syspll_d2", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "syspll_d2", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "syspll_d2", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "syspll_d3", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "syspll_d3", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL2_D8, "syspll2_d8", "syspll_d3", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "syspll_d5", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "syspll_d5", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "syspll_d7", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "syspll_d7", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1, 2),
+ FACTOR(CLK_TOP_USB20_192M, "usb20_192m_ck", "univpll", 2, 13),
+ FACTOR(CLK_TOP_USB20_192M_D4, "usb20_192m_d4", "usb20_192m_ck", 1, 4),
+ FACTOR(CLK_TOP_USB20_192M_D8, "usb20_192m_d8", "usb20_192m_ck", 1, 8),
+ FACTOR(CLK_TOP_USB20_192M_D16,
+ "usb20_192m_d16", "usb20_192m_ck", 1, 16),
+ FACTOR(CLK_TOP_USB20_192M_D32,
+ "usb20_192m_d32", "usb20_192m_ck", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_d2", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_d2", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll_d3", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll_d3", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll_d3", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL2_D32, "univpll2_d32", "univpll_d3", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll_d5", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll_d5", 1, 4),
+ FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll_ck", 1, 2),
+ FACTOR(CLK_TOP_MPLL, "mpll_ck", "mpll", 1, 1),
+ FACTOR(CLK_TOP_DA_MPLL_104M_DIV, "mpll_104m_div", "mpll_ck", 1, 2),
+ FACTOR(CLK_TOP_DA_MPLL_52M_DIV, "mpll_52m_div", "mpll_ck", 1, 4),
+ FACTOR(CLK_TOP_MFGPLL, "mfgpll_ck", "mfgpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1_ck", 1, 4),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1_ck", 1, 8),
+ FACTOR(CLK_TOP_ULPOSC1, "ulposc1_ck", "ulposc1", 1, 1),
+ FACTOR(CLK_TOP_ULPOSC1_D2, "ulposc1_d2", "ulposc1_ck", 1, 2),
+ FACTOR(CLK_TOP_ULPOSC1_D4, "ulposc1_d4", "ulposc1_ck", 1, 4),
+ FACTOR(CLK_TOP_ULPOSC1_D8, "ulposc1_d8", "ulposc1_ck", 1, 8),
+ FACTOR(CLK_TOP_ULPOSC1_D16, "ulposc1_d16", "ulposc1_ck", 1, 16),
+ FACTOR(CLK_TOP_ULPOSC1_D32, "ulposc1_d32", "ulposc1_ck", 1, 32),
+ FACTOR(CLK_TOP_F_F26M, "f_f26m_ck", "clk_26m_ck", 1, 1),
+ FACTOR(CLK_TOP_AXI, "axi_ck", "axi_sel", 1, 1),
+ FACTOR(CLK_TOP_MM, "mm_ck", "mm_sel", 1, 1),
+ FACTOR(CLK_TOP_SCP, "scp_ck", "scp_sel", 1, 1),
+ FACTOR(CLK_TOP_MFG, "mfg_ck", "mfg_sel", 1, 1),
+ FACTOR(CLK_TOP_F_FUART, "f_fuart_ck", "uart_sel", 1, 1),
+ FACTOR(CLK_TOP_SPI, "spi_ck", "spi_sel", 1, 1),
+ FACTOR(CLK_TOP_MSDC50_0, "msdc50_0_ck", "msdc50_0_sel", 1, 1),
+ FACTOR(CLK_TOP_MSDC30_1, "msdc30_1_ck", "msdc30_1_sel", 1, 1),
+ FACTOR(CLK_TOP_AUDIO, "audio_ck", "audio_sel", 1, 1),
+ FACTOR(CLK_TOP_AUD_1, "aud_1_ck", "aud_1_sel", 1, 1),
+ FACTOR(CLK_TOP_AUD_ENGEN1, "aud_engen1_ck", "aud_engen1_sel", 1, 1),
+ FACTOR(CLK_TOP_F_FDISP_PWM, "f_fdisp_pwm_ck", "disp_pwm_sel", 1, 1),
+ FACTOR(CLK_TOP_SSPM, "sspm_ck", "sspm_sel", 1, 1),
+ FACTOR(CLK_TOP_DXCC, "dxcc_ck", "dxcc_sel", 1, 1),
+ FACTOR(CLK_TOP_I2C, "i2c_ck", "i2c_sel", 1, 1),
+ FACTOR(CLK_TOP_F_FPWM, "f_fpwm_ck", "pwm_sel", 1, 1),
+ FACTOR(CLK_TOP_F_FSENINF, "f_fseninf_ck", "seninf_sel", 1, 1),
+ FACTOR(CLK_TOP_AES_FDE, "aes_fde_ck", "aes_fde_sel", 1, 1),
+ FACTOR(CLK_TOP_F_BIST2FPC, "f_bist2fpc_ck", "univpll2_d2", 1, 1),
+ FACTOR(CLK_TOP_ARMPLL_DIVIDER_PLL0, "arm_div_pll0", "syspll_d2", 1, 1),
+ FACTOR(CLK_TOP_ARMPLL_DIVIDER_PLL1, "arm_div_pll1", "syspll_ck", 1, 1),
+ FACTOR(CLK_TOP_ARMPLL_DIVIDER_PLL2, "arm_div_pll2", "univpll_d2", 1, 1),
+ FACTOR(CLK_TOP_DA_USB20_48M_DIV,
+ "usb20_48m_div", "usb20_192m_d4", 1, 1),
+ FACTOR(CLK_TOP_DA_UNIV_48M_DIV, "univ_48m_div", "usb20_192m_d4", 1, 1),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll_d7",
+ "syspll1_d4",
+ "syspll3_d2"
+};
+
+static const char * const mem_parents[] = {
+ "clk26m",
+ "dmpll_ck",
+ "apll1_ck"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll1_d2",
+ "mmpll_d2"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "syspll4_d2",
+ "univpll2_d2",
+ "syspll1_d2",
+ "univpll1_d2",
+ "syspll_d3",
+ "univpll_d3"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mfgpll_ck",
+ "syspll_d3",
+ "univpll_d3"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll1_d2"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "usb20_192m_d8",
+ "univpll2_d8",
+ "usb20_192m_d4",
+ "univpll2_d32",
+ "usb20_192m_d16",
+ "usb20_192m_d32"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "syspll4_d2",
+ "syspll2_d4"
+};
+
+static const char * const msdc5hclk_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "univpll1_d4",
+ "syspll2_d2"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "syspll2_d2",
+ "syspll4_d2",
+ "univpll1_d2",
+ "syspll1_d2",
+ "univpll_d5",
+ "univpll1_d4"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "msdcpll_d2",
+ "univpll2_d2",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "usb20_192m_d4",
+ "syspll2_d4"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll4_d2"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1_ck"
+};
+
+static const char * const aud_engen1_parents[] = {
+ "clk26m",
+ "apll1_d2",
+ "apll1_d4",
+ "apll1_d8"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "ulposc1_d2",
+ "ulposc1_d8"
+};
+
+static const char * const sspm_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d3"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll1_d4",
+ "syspll1_d8"
+};
+
+static const char * const usb_top_parents[] = {
+ "clk26m",
+ "univpll3_d4"
+};
+
+static const char * const spm_parents[] = {
+ "clk26m",
+ "syspll1_d8"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "univpll3_d4",
+ "univpll3_d2",
+ "syspll1_d8",
+ "syspll2_d8"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll3_d4",
+ "syspll1_d8"
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "univpll1_d4",
+ "univpll1_d2",
+ "univpll2_d2"
+};
+
+static const char * const aes_fde_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "univpll_d3",
+ "univpll2_d2",
+ "univpll1_d2",
+ "syspll1_d2"
+};
+
+static const char * const ulposc_parents[] = {
+ "clk26m",
+ "ulposc1_d4",
+ "ulposc1_d8",
+ "ulposc1_d16",
+ "ulposc1_d32"
+};
+
+static const char * const camtm_parents[] = {
+ "clk26m",
+ "univpll1_d4",
+ "univpll1_d2",
+ "univpll2_d2"
+};
+
+#define INVALID_UPDATE_REG 0xFFFFFFFF
+#define INVALID_UPDATE_SHIFT -1
+#define INVALID_MUX_GATE -1
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR,
+ 0, 2, 7, CLK_CFG_UPDATE, 0, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+ CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR,
+ 8, 2, 15, CLK_CFG_UPDATE, 1, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MM_SEL, "mm_sel", mm_parents, CLK_CFG_0,
+ CLK_CFG_0_SET, CLK_CFG_0_CLR, 16, 3, 23,
+ CLK_CFG_UPDATE, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SCP_SEL, "scp_sel", scp_parents, CLK_CFG_0,
+ CLK_CFG_0_SET, CLK_CFG_0_CLR, 24, 3, 31,
+ CLK_CFG_UPDATE, 3),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, CLK_CFG_1,
+ CLK_CFG_1_SET, CLK_CFG_1_CLR, 0, 2, 7,
+ CLK_CFG_UPDATE, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, CLK_CFG_1,
+ CLK_CFG_1_SET, CLK_CFG_1_CLR, 8, 2, 15,
+ CLK_CFG_UPDATE, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG_SEL, "camtg_sel",
+ camtg_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 16, 3, 23, CLK_CFG_UPDATE, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG1_SEL, "camtg1_sel", camtg_parents,
+ CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG2_SEL, "camtg2_sel",
+ camtg_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 0, 3, 7, CLK_CFG_UPDATE, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG3_SEL, "camtg3_sel", camtg_parents,
+ CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_parents,
+ CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 16, 1, 23,
+ CLK_CFG_UPDATE, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, CLK_CFG_2,
+ CLK_CFG_2_SET, CLK_CFG_2_CLR, 24, 2, 31,
+ CLK_CFG_UPDATE, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_HCLK_SEL, "msdc5hclk",
+ msdc5hclk_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 0, 2, 7, CLK_CFG_UPDATE, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel",
+ msdc50_0_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 8, 3, 15, CLK_CFG_UPDATE, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel",
+ msdc30_1_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 16, 3, 23, CLK_CFG_UPDATE, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents,
+ CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE, 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel",
+ aud_intbus_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 0, 2, 7, CLK_CFG_UPDATE, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents,
+ CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR,
+ 8, 1, 15, CLK_CFG_UPDATE, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN1_SEL, "aud_engen1_sel",
+ aud_engen1_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 16, 2, 23, CLK_CFG_UPDATE, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM_SEL, "disp_pwm_sel",
+ disp_pwm_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 24, 2, 31, CLK_CFG_UPDATE, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSPM_SEL, "sspm_sel", sspm_parents,
+ CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 0, 2, 7,
+ CLK_CFG_UPDATE, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DXCC_SEL, "dxcc_sel", dxcc_parents,
+ CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 8, 2, 15,
+ CLK_CFG_UPDATE, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP_SEL, "usb_top_sel",
+ usb_top_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 16, 1, 23, CLK_CFG_UPDATE, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPM_SEL, "spm_sel", spm_parents, CLK_CFG_5,
+ CLK_CFG_5_SET, CLK_CFG_5_CLR, 24, 1, 31,
+ CLK_CFG_UPDATE, 23),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents, CLK_CFG_6,
+ CLK_CFG_6_SET, CLK_CFG_6_CLR, 0, 3, 7, CLK_CFG_UPDATE,
+ 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, CLK_CFG_6,
+ CLK_CFG_6_SET, CLK_CFG_6_CLR, 8, 2, 15, CLK_CFG_UPDATE,
+ 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF_SEL, "seninf_sel", seninf_parents,
+ CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 16, 2, 23,
+ CLK_CFG_UPDATE, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_FDE_SEL, "aes_fde_sel",
+ aes_fde_parents, CLK_CFG_6, CLK_CFG_6_SET,
+ CLK_CFG_6_CLR, 24, 3, 31, CLK_CFG_UPDATE, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_PWRAP_ULPOSC_SEL, "ulposc_sel",
+ ulposc_parents, CLK_CFG_7, CLK_CFG_7_SET,
+ CLK_CFG_7_CLR, 0, 3, 7, CLK_CFG_UPDATE, 28,
+ CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTM_SEL, "camtm_sel", camtm_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR, 8, 2, 15,
+ CLK_CFG_UPDATE, 29),
+};
+
+static const struct mtk_gate_regs top0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x104,
+ .sta_ofs = 0x104,
+};
+
+static const struct mtk_gate_regs top2_cg_regs = {
+ .set_ofs = 0x320,
+ .clr_ofs = 0x320,
+ .sta_ofs = 0x320,
+};
+
+#define GATE_TOP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_TOP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+#define GATE_TOP2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate top_clks[] = {
+ /* TOP0 */
+ GATE_TOP0(CLK_TOP_MD_32K, "md_32k", "f_frtc_ck", 8),
+ GATE_TOP0(CLK_TOP_MD_26M, "md_26m", "f_f26m_ck", 9),
+ GATE_TOP0(CLK_TOP_MD2_32K, "md2_32k", "f_frtc_ck", 10),
+ GATE_TOP0(CLK_TOP_MD2_26M, "md2_26m", "f_f26m_ck", 11),
+ /* TOP1 */
+ GATE_TOP1(CLK_TOP_ARMPLL_DIVIDER_PLL0_EN,
+ "arm_div_pll0_en", "arm_div_pll0", 3),
+ GATE_TOP1(CLK_TOP_ARMPLL_DIVIDER_PLL1_EN,
+ "arm_div_pll1_en", "arm_div_pll1", 4),
+ GATE_TOP1(CLK_TOP_ARMPLL_DIVIDER_PLL2_EN,
+ "arm_div_pll2_en", "arm_div_pll2", 5),
+ GATE_TOP1(CLK_TOP_FMEM_OCC_DRC_EN, "drc_en", "univpll2_d2", 6),
+ GATE_TOP1(CLK_TOP_USB20_48M_EN, "usb20_48m_en", "usb20_48m_div", 8),
+ GATE_TOP1(CLK_TOP_UNIVPLL_48M_EN, "univpll_48m_en", "univ_48m_div", 9),
+ GATE_TOP1(CLK_TOP_F_UFS_MP_SAP_CFG_EN, "ufs_sap", "f_f26m_ck", 12),
+ GATE_TOP1(CLK_TOP_F_BIST2FPC_EN, "bist2fpc", "f_bist2fpc_ck", 16),
+ /* TOP2 */
+ GATE_TOP2(CLK_TOP_APLL12_DIV0, "apll12_div0", "aud_1_ck", 2),
+ GATE_TOP2(CLK_TOP_APLL12_DIV1, "apll12_div1", "aud_1_ck", 3),
+ GATE_TOP2(CLK_TOP_APLL12_DIV2, "apll12_div2", "aud_1_ck", 4),
+ GATE_TOP2(CLK_TOP_APLL12_DIV3, "apll12_div3", "aud_1_ck", 5),
+};
+
+static const struct mtk_gate_regs ifr2_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x90,
+};
+
+static const struct mtk_gate_regs ifr3_cg_regs = {
+ .set_ofs = 0x88,
+ .clr_ofs = 0x8c,
+ .sta_ofs = 0x94,
+};
+
+static const struct mtk_gate_regs ifr4_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xa8,
+ .sta_ofs = 0xac,
+};
+
+static const struct mtk_gate_regs ifr5_cg_regs = {
+ .set_ofs = 0xc0,
+ .clr_ofs = 0xc4,
+ .sta_ofs = 0xc8,
+};
+
+#define GATE_IFR2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR3(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr3_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR4(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr4_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR5(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr5_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate ifr_clks[] = {
+ /* INFRA_TOPAXI */
+ /* INFRA PERI */
+ /* INFRA mode 0 */
+ GATE_IFR2(CLK_IFR_ICUSB, "ifr_icusb", "axi_ck", 8),
+ GATE_IFR2(CLK_IFR_GCE, "ifr_gce", "axi_ck", 9),
+ GATE_IFR2(CLK_IFR_THERM, "ifr_therm", "axi_ck", 10),
+ GATE_IFR2(CLK_IFR_I2C_AP, "ifr_i2c_ap", "i2c_ck", 11),
+ GATE_IFR2(CLK_IFR_I2C_CCU, "ifr_i2c_ccu", "i2c_ck", 12),
+ GATE_IFR2(CLK_IFR_I2C_SSPM, "ifr_i2c_sspm", "i2c_ck", 13),
+ GATE_IFR2(CLK_IFR_I2C_RSV, "ifr_i2c_rsv", "i2c_ck", 14),
+ GATE_IFR2(CLK_IFR_PWM_HCLK, "ifr_pwm_hclk", "axi_ck", 15),
+ GATE_IFR2(CLK_IFR_PWM1, "ifr_pwm1", "f_fpwm_ck", 16),
+ GATE_IFR2(CLK_IFR_PWM2, "ifr_pwm2", "f_fpwm_ck", 17),
+ GATE_IFR2(CLK_IFR_PWM3, "ifr_pwm3", "f_fpwm_ck", 18),
+ GATE_IFR2(CLK_IFR_PWM4, "ifr_pwm4", "f_fpwm_ck", 19),
+ GATE_IFR2(CLK_IFR_PWM5, "ifr_pwm5", "f_fpwm_ck", 20),
+ GATE_IFR2(CLK_IFR_PWM, "ifr_pwm", "f_fpwm_ck", 21),
+ GATE_IFR2(CLK_IFR_UART0, "ifr_uart0", "f_fuart_ck", 22),
+ GATE_IFR2(CLK_IFR_UART1, "ifr_uart1", "f_fuart_ck", 23),
+ GATE_IFR2(CLK_IFR_GCE_26M, "ifr_gce_26m", "f_f26m_ck", 27),
+ GATE_IFR2(CLK_IFR_CQ_DMA_FPC, "ifr_dma", "axi_ck", 28),
+ GATE_IFR2(CLK_IFR_BTIF, "ifr_btif", "axi_ck", 31),
+ /* INFRA mode 1 */
+ GATE_IFR3(CLK_IFR_SPI0, "ifr_spi0", "spi_ck", 1),
+ GATE_IFR3(CLK_IFR_MSDC0, "ifr_msdc0", "msdc5hclk", 2),
+ GATE_IFR3(CLK_IFR_MSDC1, "ifr_msdc1", "axi_ck", 4),
+ GATE_IFR3(CLK_IFR_TRNG, "ifr_trng", "axi_ck", 9),
+ GATE_IFR3(CLK_IFR_AUXADC, "ifr_auxadc", "f_f26m_ck", 10),
+ GATE_IFR3(CLK_IFR_CCIF1_AP, "ifr_ccif1_ap", "axi_ck", 12),
+ GATE_IFR3(CLK_IFR_CCIF1_MD, "ifr_ccif1_md", "axi_ck", 13),
+ GATE_IFR3(CLK_IFR_AUXADC_MD, "ifr_auxadc_md", "f_f26m_ck", 14),
+ GATE_IFR3(CLK_IFR_AP_DMA, "ifr_ap_dma", "axi_ck", 18),
+ GATE_IFR3(CLK_IFR_DEVICE_APC, "ifr_dapc", "axi_ck", 20),
+ GATE_IFR3(CLK_IFR_CCIF_AP, "ifr_ccif_ap", "axi_ck", 23),
+ GATE_IFR3(CLK_IFR_AUDIO, "ifr_audio", "axi_ck", 25),
+ GATE_IFR3(CLK_IFR_CCIF_MD, "ifr_ccif_md", "axi_ck", 26),
+ /* INFRA mode 2 */
+ GATE_IFR4(CLK_IFR_RG_PWM_FBCLK6, "ifr_pwmfb", "f_f26m_ck", 0),
+ GATE_IFR4(CLK_IFR_DISP_PWM, "ifr_disp_pwm", "f_fdisp_pwm_ck", 2),
+ GATE_IFR4(CLK_IFR_CLDMA_BCLK, "ifr_cldmabclk", "axi_ck", 3),
+ GATE_IFR4(CLK_IFR_AUDIO_26M_BCLK, "ifr_audio26m", "f_f26m_ck", 4),
+ GATE_IFR4(CLK_IFR_SPI1, "ifr_spi1", "spi_ck", 6),
+ GATE_IFR4(CLK_IFR_I2C4, "ifr_i2c4", "i2c_ck", 7),
+ GATE_IFR4(CLK_IFR_SPI2, "ifr_spi2", "spi_ck", 9),
+ GATE_IFR4(CLK_IFR_SPI3, "ifr_spi3", "spi_ck", 10),
+ GATE_IFR4(CLK_IFR_I2C5, "ifr_i2c5", "i2c_ck", 18),
+ GATE_IFR4(CLK_IFR_I2C5_ARBITER, "ifr_i2c5a", "i2c_ck", 19),
+ GATE_IFR4(CLK_IFR_I2C5_IMM, "ifr_i2c5_imm", "i2c_ck", 20),
+ GATE_IFR4(CLK_IFR_I2C1_ARBITER, "ifr_i2c1a", "i2c_ck", 21),
+ GATE_IFR4(CLK_IFR_I2C1_IMM, "ifr_i2c1_imm", "i2c_ck", 22),
+ GATE_IFR4(CLK_IFR_I2C2_ARBITER, "ifr_i2c2a", "i2c_ck", 23),
+ GATE_IFR4(CLK_IFR_I2C2_IMM, "ifr_i2c2_imm", "i2c_ck", 24),
+ GATE_IFR4(CLK_IFR_SPI4, "ifr_spi4", "spi_ck", 25),
+ GATE_IFR4(CLK_IFR_SPI5, "ifr_spi5", "spi_ck", 26),
+ GATE_IFR4(CLK_IFR_CQ_DMA, "ifr_cq_dma", "axi_ck", 27),
+ GATE_IFR4(CLK_IFR_FAES_FDE, "ifr_faes_fde_ck", "aes_fde_ck", 29),
+ /* INFRA mode 3 */
+ GATE_IFR5(CLK_IFR_MSDC0_SELF, "ifr_msdc0sf", "msdc50_0_ck", 0),
+ GATE_IFR5(CLK_IFR_MSDC1_SELF, "ifr_msdc1sf", "msdc50_0_ck", 1),
+ GATE_IFR5(CLK_IFR_I2C6, "ifr_i2c6", "i2c_ck", 6),
+ GATE_IFR5(CLK_IFR_AP_MSDC0, "ifr_ap_msdc0", "msdc50_0_ck", 7),
+ GATE_IFR5(CLK_IFR_MD_MSDC0, "ifr_md_msdc0", "msdc50_0_ck", 8),
+ GATE_IFR5(CLK_IFR_MSDC0_SRC, "ifr_msdc0_clk", "msdc50_0_ck", 9),
+ GATE_IFR5(CLK_IFR_MSDC1_SRC, "ifr_msdc1_clk", "msdc30_1_ck", 10),
+ GATE_IFR5(CLK_IFR_MCU_PM_BCLK, "ifr_mcu_pm_bclk", "axi_ck", 17),
+ GATE_IFR5(CLK_IFR_CCIF2_AP, "ifr_ccif2_ap", "axi_ck", 18),
+ GATE_IFR5(CLK_IFR_CCIF2_MD, "ifr_ccif2_md", "axi_ck", 19),
+ GATE_IFR5(CLK_IFR_CCIF3_AP, "ifr_ccif3_ap", "axi_ck", 20),
+ GATE_IFR5(CLK_IFR_CCIF3_MD, "ifr_ccif3_md", "axi_ck", 21),
+};
+
+/* additional CCF control for mipi26M race condition(disp/camera) */
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x14,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x14,
+};
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &apmixed_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate apmixed_clks[] = {
+ /* AUDIO0 */
+ GATE_APMIXED(CLK_APMIXED_SSUSB26M, "apmixed_ssusb26m", "f_f26m_ck",
+ 4),
+ GATE_APMIXED(CLK_APMIXED_APPLL26M, "apmixed_appll26m", "f_f26m_ck",
+ 5),
+ GATE_APMIXED(CLK_APMIXED_MIPIC0_26M, "apmixed_mipic026m", "f_f26m_ck",
+ 6),
+ GATE_APMIXED(CLK_APMIXED_MDPLLGP26M, "apmixed_mdpll26m", "f_f26m_ck",
+ 7),
+ GATE_APMIXED(CLK_APMIXED_MMSYS_F26M, "apmixed_mmsys26m", "f_f26m_ck",
+ 8),
+ GATE_APMIXED(CLK_APMIXED_UFS26M, "apmixed_ufs26m", "f_f26m_ck",
+ 9),
+ GATE_APMIXED(CLK_APMIXED_MIPIC1_26M, "apmixed_mipic126m", "f_f26m_ck",
+ 11),
+ GATE_APMIXED(CLK_APMIXED_MEMPLL26M, "apmixed_mempll26m", "f_f26m_ck",
+ 13),
+ GATE_APMIXED(CLK_APMIXED_CLKSQ_LVPLL_26M, "apmixed_lvpll26m",
+ "f_f26m_ck", 14),
+ GATE_APMIXED(CLK_APMIXED_MIPID0_26M, "apmixed_mipid026m", "f_f26m_ck",
+ 16),
+};
+
+#define MT6765_PLL_FMAX (3800UL * MHZ)
+#define MT6765_PLL_FMIN (1500UL * MHZ)
+
+#define CON0_MT6765_RST_BAR BIT(23)
+
+#define PLL_INFO_NULL (0xFF)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pcwibits, _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg,\
+ _tuner_en_bit, _pcw_reg, _pcw_shift, _div_table) {\
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT6765_RST_BAR, \
+ .fmax = MT6765_PLL_FMAX, \
+ .fmin = MT6765_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = _pcwibits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pcwibits, _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, _pcw_reg, \
+ _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _pcwbits, _pcwibits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, NULL) \
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x021C, 0x0228, BIT(0),
+ PLL_AO, 22, 8, 0x0220, 24, 0, 0, 0, 0x0220, 0),
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x020C, 0x0218, BIT(0),
+ PLL_AO, 22, 8, 0x0210, 24, 0, 0, 0, 0x0210, 0),
+ PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x022C, 0x0238, BIT(0),
+ PLL_AO, 22, 8, 0x0230, 24, 0, 0, 0, 0x0230, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x023C, 0x0248, BIT(0),
+ (HAVE_RST_BAR | PLL_AO), 22, 8, 0x0240, 24, 0, 0, 0, 0x0240,
+ 0),
+ PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x024C, 0x0258, BIT(0),
+ 0, 22, 8, 0x0250, 24, 0, 0, 0, 0x0250, 0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x025C, 0x0268, BIT(0),
+ 0, 22, 8, 0x0260, 24, 0, 0, 0, 0x0260, 0),
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x026C, 0x0278, BIT(0),
+ HAVE_RST_BAR, 22, 8, 0x0270, 24, 0, 0, 0, 0x0270, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x027C, 0x0288, BIT(0),
+ 0, 22, 8, 0x0280, 24, 0, 0, 0, 0x0280, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x028C, 0x029C, BIT(0),
+ 0, 32, 8, 0x0290, 24, 0x0040, 0x000C, 0, 0x0294, 0),
+ PLL(CLK_APMIXED_MPLL, "mpll", 0x02A0, 0x02AC, BIT(0),
+ PLL_AO, 22, 8, 0x02A4, 24, 0, 0, 0, 0x02A4, 0),
+};
+
+static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return PTR_ERR(base);
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+ mtk_clk_register_gates(node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ apmixed_base = base;
+ /* MPLL, CCIPLL, MAINPLL set HW mode, TDCLKSQ, CLKSQ1 */
+ writel(readl(AP_PLL_CON3) & 0xFFFFFFE1, AP_PLL_CON3);
+ writel(readl(PLLON_CON0) & 0x01041041, PLLON_CON0);
+ writel(readl(PLLON_CON1) & 0x01041041, PLLON_CON1);
+
+ return r;
+}
+
+static int clk_mt6765_top_probe(struct platform_device *pdev)
+{
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ struct clk_onecell_data *clk_data;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return PTR_ERR(base);
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+ clk_data);
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
+ clk_data);
+ mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ &mt6765_clk_lock, clk_data);
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ cksys_base = base;
+ /* [4]:no need */
+ writel(readl(CLK_SCP_CFG_0) | 0x3EF, CLK_SCP_CFG_0);
+ /*[1,2,3,8]: no need*/
+ writel(readl(CLK_SCP_CFG_1) | 0x1, CLK_SCP_CFG_1);
+
+ return r;
+}
+
+static int clk_mt6765_ifr_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return PTR_ERR(base);
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
+
+ mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
+ clk_data);
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765[] = {
+ {
+ .compatible = "mediatek,mt6765-apmixedsys",
+ .data = clk_mt6765_apmixed_probe,
+ }, {
+ .compatible = "mediatek,mt6765-topckgen",
+ .data = clk_mt6765_top_probe,
+ }, {
+ .compatible = "mediatek,mt6765-infracfg",
+ .data = clk_mt6765_ifr_probe,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt6765_probe(struct platform_device *pdev)
+{
+ int (*clk_probe)(struct platform_device *d);
+ int r;
+
+ clk_probe = of_device_get_match_data(&pdev->dev);
+ if (!clk_probe)
+ return -EINVAL;
+
+ r = clk_probe(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt6765_drv = {
+ .probe = clk_mt6765_probe,
+ .driver = {
+ .name = "clk-mt6765",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_clk_mt6765,
+ },
+};
+
+static int __init clk_mt6765_init(void)
+{
+ return platform_driver_register(&clk_mt6765_drv);
+}
+
+arch_initcall(clk_mt6765_init);
diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c
index fb5fbb8e3e41..059c1a41ac7a 100644
--- a/drivers/clk/mediatek/clk-mt6779-mm.c
+++ b/drivers/clk/mediatek/clk-mt6779-mm.c
@@ -84,15 +84,11 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM1(CLK_MM_DISP_OVL_FBDC, "mm_disp_ovl_fbdc", "mm_sel", 16),
};
-static const struct of_device_id of_match_clk_mt6779_mm[] = {
- { .compatible = "mediatek,mt6779-mmsys", },
- {}
-};
-
static int clk_mt6779_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -106,7 +102,6 @@ static struct platform_driver clk_mt6779_mm_drv = {
.probe = clk_mt6779_mm_probe,
.driver = {
.name = "clk-mt6779-mm",
- .of_match_table = of_match_clk_mt6779_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
index 8f05653b387d..01fdce287247 100644
--- a/drivers/clk/mediatek/clk-mt6797-mm.c
+++ b/drivers/clk/mediatek/clk-mt6797-mm.c
@@ -92,16 +92,12 @@ static const struct mtk_gate mm_clks[] = {
"clk26m", 3),
};
-static const struct of_device_id of_match_clk_mt6797_mm[] = {
- { .compatible = "mediatek,mt6797-mmsys", },
- {}
-};
-
static int clk_mt6797_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
int r;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR);
@@ -121,7 +117,6 @@ static struct platform_driver clk_mt6797_mm_drv = {
.probe = clk_mt6797_mm_probe,
.driver = {
.name = "clk-mt6797-mm",
- .of_match_table = of_match_clk_mt6797_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
new file mode 100644
index 000000000000..36fa20be77b6
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8173-mm.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8173-clk.h>
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x0104,
+ .clr_ofs = 0x0108,
+ .sta_ofs = 0x0100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x0114,
+ .clr_ofs = 0x0118,
+ .sta_ofs = 0x0110,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mt8173_mm_clks[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+ GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 2),
+ GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 3),
+ GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 4),
+ GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 5),
+ GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 6),
+ GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 7),
+ GATE_MM0(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 8),
+ GATE_MM0(CLK_MM_MDP_TDSHP1, "mm_mdp_tdshp1", "mm_sel", 9),
+ GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
+ GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12),
+ GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13),
+ GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14),
+ GATE_MM0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "rtc_sel", 15),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 16),
+ GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 17),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 18),
+ GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
+ GATE_MM0(CLK_MM_DISP_RDMA2, "mm_disp_rdma2", "mm_sel", 20),
+ GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21),
+ GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 23),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "mm_sel", 24),
+ GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25),
+ GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26),
+ GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 27),
+ GATE_MM0(CLK_MM_DISP_SPLIT0, "mm_disp_split0", "mm_sel", 28),
+ GATE_MM0(CLK_MM_DISP_SPLIT1, "mm_disp_split1", "mm_sel", 29),
+ GATE_MM0(CLK_MM_DISP_MERGE, "mm_disp_merge", "mm_sel", 30),
+ GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 31),
+ /* MM1 */
+ GATE_MM1(CLK_MM_DISP_PWM0MM, "mm_disp_pwm0mm", "mm_sel", 0),
+ GATE_MM1(CLK_MM_DISP_PWM026M, "mm_disp_pwm026m", "pwm_sel", 1),
+ GATE_MM1(CLK_MM_DISP_PWM1MM, "mm_disp_pwm1mm", "mm_sel", 2),
+ GATE_MM1(CLK_MM_DISP_PWM126M, "mm_disp_pwm126m", "pwm_sel", 3),
+ GATE_MM1(CLK_MM_DSI0_ENGINE, "mm_dsi0_engine", "mm_sel", 4),
+ GATE_MM1(CLK_MM_DSI0_DIGITAL, "mm_dsi0_digital", "dsi0_dig", 5),
+ GATE_MM1(CLK_MM_DSI1_ENGINE, "mm_dsi1_engine", "mm_sel", 6),
+ GATE_MM1(CLK_MM_DSI1_DIGITAL, "mm_dsi1_digital", "dsi1_dig", 7),
+ GATE_MM1(CLK_MM_DPI_PIXEL, "mm_dpi_pixel", "dpi0_sel", 8),
+ GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
+ GATE_MM1(CLK_MM_DPI1_PIXEL, "mm_dpi1_pixel", "lvds_pxl", 10),
+ GATE_MM1(CLK_MM_DPI1_ENGINE, "mm_dpi1_engine", "mm_sel", 11),
+ GATE_MM1(CLK_MM_HDMI_PIXEL, "mm_hdmi_pixel", "dpi0_sel", 12),
+ GATE_MM1(CLK_MM_HDMI_PLLCK, "mm_hdmi_pllck", "hdmi_sel", 13),
+ GATE_MM1(CLK_MM_HDMI_AUDIO, "mm_hdmi_audio", "apll1", 14),
+ GATE_MM1(CLK_MM_HDMI_SPDIF, "mm_hdmi_spdif", "apll2", 15),
+ GATE_MM1(CLK_MM_LVDS_PIXEL, "mm_lvds_pixel", "lvds_pxl", 16),
+ GATE_MM1(CLK_MM_LVDS_CTS, "mm_lvds_cts", "lvds_cts", 17),
+ GATE_MM1(CLK_MM_SMI_LARB4, "mm_smi_larb4", "mm_sel", 18),
+ GATE_MM1(CLK_MM_HDMI_HDCP, "mm_hdmi_hdcp", "hdcp_sel", 19),
+ GATE_MM1(CLK_MM_HDMI_HDCP24M, "mm_hdmi_hdcp24m", "hdcp_24m_sel", 20),
+};
+
+struct clk_mt8173_mm_driver_data {
+ const struct mtk_gate *gates_clk;
+ int gates_num;
+};
+
+static const struct clk_mt8173_mm_driver_data mt8173_mmsys_driver_data = {
+ .gates_clk = mt8173_mm_clks,
+ .gates_num = ARRAY_SIZE(mt8173_mm_clks),
+};
+
+static int clk_mt8173_mm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ const struct clk_mt8173_mm_driver_data *data;
+ struct clk_onecell_data *clk_data;
+ int ret;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ data = &mt8173_mmsys_driver_data;
+
+ ret = mtk_clk_register_gates(node, data->gates_clk, data->gates_num,
+ clk_data);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8173_mm_drv = {
+ .driver = {
+ .name = "clk-mt8173-mm",
+ },
+ .probe = clk_mt8173_mm_probe,
+};
+
+builtin_platform_driver(clk_mt8173_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 537a7f49b0f7..8f898ac476c0 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -753,93 +753,6 @@ static const struct mtk_gate img_clks[] __initconst = {
GATE_IMG(CLK_IMG_FD, "img_fd", "mm_sel", 11),
};
-static const struct mtk_gate_regs mm0_cg_regs __initconst = {
- .set_ofs = 0x0104,
- .clr_ofs = 0x0108,
- .sta_ofs = 0x0100,
-};
-
-static const struct mtk_gate_regs mm1_cg_regs __initconst = {
- .set_ofs = 0x0114,
- .clr_ofs = 0x0118,
- .sta_ofs = 0x0110,
-};
-
-#define GATE_MM0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_MM1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-static const struct mtk_gate mm_clks[] __initconst = {
- /* MM0 */
- GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
- GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
- GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 2),
- GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 3),
- GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 4),
- GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 5),
- GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 6),
- GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 7),
- GATE_MM0(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 8),
- GATE_MM0(CLK_MM_MDP_TDSHP1, "mm_mdp_tdshp1", "mm_sel", 9),
- GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
- GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12),
- GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13),
- GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14),
- GATE_MM0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "rtc_sel", 15),
- GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 16),
- GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 17),
- GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 18),
- GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
- GATE_MM0(CLK_MM_DISP_RDMA2, "mm_disp_rdma2", "mm_sel", 20),
- GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21),
- GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22),
- GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 23),
- GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "mm_sel", 24),
- GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25),
- GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26),
- GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 27),
- GATE_MM0(CLK_MM_DISP_SPLIT0, "mm_disp_split0", "mm_sel", 28),
- GATE_MM0(CLK_MM_DISP_SPLIT1, "mm_disp_split1", "mm_sel", 29),
- GATE_MM0(CLK_MM_DISP_MERGE, "mm_disp_merge", "mm_sel", 30),
- GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 31),
- /* MM1 */
- GATE_MM1(CLK_MM_DISP_PWM0MM, "mm_disp_pwm0mm", "mm_sel", 0),
- GATE_MM1(CLK_MM_DISP_PWM026M, "mm_disp_pwm026m", "pwm_sel", 1),
- GATE_MM1(CLK_MM_DISP_PWM1MM, "mm_disp_pwm1mm", "mm_sel", 2),
- GATE_MM1(CLK_MM_DISP_PWM126M, "mm_disp_pwm126m", "pwm_sel", 3),
- GATE_MM1(CLK_MM_DSI0_ENGINE, "mm_dsi0_engine", "mm_sel", 4),
- GATE_MM1(CLK_MM_DSI0_DIGITAL, "mm_dsi0_digital", "dsi0_dig", 5),
- GATE_MM1(CLK_MM_DSI1_ENGINE, "mm_dsi1_engine", "mm_sel", 6),
- GATE_MM1(CLK_MM_DSI1_DIGITAL, "mm_dsi1_digital", "dsi1_dig", 7),
- GATE_MM1(CLK_MM_DPI_PIXEL, "mm_dpi_pixel", "dpi0_sel", 8),
- GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
- GATE_MM1(CLK_MM_DPI1_PIXEL, "mm_dpi1_pixel", "lvds_pxl", 10),
- GATE_MM1(CLK_MM_DPI1_ENGINE, "mm_dpi1_engine", "mm_sel", 11),
- GATE_MM1(CLK_MM_HDMI_PIXEL, "mm_hdmi_pixel", "dpi0_sel", 12),
- GATE_MM1(CLK_MM_HDMI_PLLCK, "mm_hdmi_pllck", "hdmi_sel", 13),
- GATE_MM1(CLK_MM_HDMI_AUDIO, "mm_hdmi_audio", "apll1", 14),
- GATE_MM1(CLK_MM_HDMI_SPDIF, "mm_hdmi_spdif", "apll2", 15),
- GATE_MM1(CLK_MM_LVDS_PIXEL, "mm_lvds_pixel", "lvds_pxl", 16),
- GATE_MM1(CLK_MM_LVDS_CTS, "mm_lvds_cts", "lvds_cts", 17),
- GATE_MM1(CLK_MM_SMI_LARB4, "mm_smi_larb4", "mm_sel", 18),
- GATE_MM1(CLK_MM_HDMI_HDCP, "mm_hdmi_hdcp", "hdcp_sel", 19),
- GATE_MM1(CLK_MM_HDMI_HDCP24M, "mm_hdmi_hdcp24m", "hdcp_24m_sel", 20),
-};
-
static const struct mtk_gate_regs vdec0_cg_regs __initconst = {
.set_ofs = 0x0000,
.clr_ofs = 0x0004,
@@ -1144,23 +1057,6 @@ static void __init mtk_imgsys_init(struct device_node *node)
}
CLK_OF_DECLARE(mtk_imgsys, "mediatek,mt8173-imgsys", mtk_imgsys_init);
-static void __init mtk_mmsys_init(struct device_node *node)
-{
- struct clk_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
-
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
-
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_mmsys, "mediatek,mt8173-mmsys", mtk_mmsys_init);
-
static void __init mtk_vdecsys_init(struct device_node *node)
{
struct clk_onecell_data *clk_data;
diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
index 720c696b506d..9d60e09619c1 100644
--- a/drivers/clk/mediatek/clk-mt8183-mm.c
+++ b/drivers/clk/mediatek/clk-mt8183-mm.c
@@ -84,8 +84,9 @@ static const struct mtk_gate mm_clks[] = {
static int clk_mt8183_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -95,16 +96,10 @@ static int clk_mt8183_mm_probe(struct platform_device *pdev)
return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
}
-static const struct of_device_id of_match_clk_mt8183_mm[] = {
- { .compatible = "mediatek,mt8183-mmsys", },
- {}
-};
-
static struct platform_driver clk_mt8183_mm_drv = {
.probe = clk_mt8183_mm_probe,
.driver = {
.name = "clk-mt8183-mm",
- .of_match_table = of_match_clk_mt8183_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 76f9cd039195..14e127e9a740 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -160,7 +160,7 @@ struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
spinlock_t *lock)
{
struct mtk_clk_mux *clk_mux;
- struct clk_init_data init;
+ struct clk_init_data init = {};
struct clk *clk;
clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL);
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index fad616cac01e..30c15766ebb1 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -3702,7 +3702,9 @@ static struct clk_regmap g12a_hdmi = {
/*
* The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
- * muxed by a glitch-free switch.
+ * muxed by a glitch-free switch. The CCF can manage this glitch-free
+ * mux because it does top-to-bottom updates the each clock tree and
+ * switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
static const struct clk_parent_data g12a_mali_0_1_parent_data[] = {
{ .fw_name = "xtal", },
@@ -3726,7 +3728,13 @@ static struct clk_regmap g12a_mali_0_sel = {
.ops = &clk_regmap_mux_ops,
.parent_data = g12a_mali_0_1_parent_data,
.num_parents = 8,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ /*
+ * Don't request the parent to change the rate because
+ * all GPU frequencies can be derived from the fclk_*
+ * clocks and one special GP0_PLL setting. This is
+ * important because we need the MPLL clocks for audio.
+ */
+ .flags = 0,
},
};
@@ -3743,7 +3751,7 @@ static struct clk_regmap g12a_mali_0_div = {
&g12a_mali_0_sel.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3759,7 +3767,7 @@ static struct clk_regmap g12a_mali_0 = {
&g12a_mali_0_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
@@ -3774,7 +3782,13 @@ static struct clk_regmap g12a_mali_1_sel = {
.ops = &clk_regmap_mux_ops,
.parent_data = g12a_mali_0_1_parent_data,
.num_parents = 8,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ /*
+ * Don't request the parent to change the rate because
+ * all GPU frequencies can be derived from the fclk_*
+ * clocks and one special GP0_PLL setting. This is
+ * important because we need the MPLL clocks for audio.
+ */
+ .flags = 0,
},
};
@@ -3791,7 +3805,7 @@ static struct clk_regmap g12a_mali_1_div = {
&g12a_mali_1_sel.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3807,7 +3821,7 @@ static struct clk_regmap g12a_mali_1 = {
&g12a_mali_1_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
@@ -3827,7 +3841,7 @@ static struct clk_regmap g12a_mali = {
.ops = &clk_regmap_mux_ops,
.parent_hws = g12a_mali_parent_hws,
.num_parents = 2,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 5fd6a574f8c3..0a68af6eec3d 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -957,7 +957,9 @@ static struct clk_regmap gxbb_sar_adc_clk = {
/*
* The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
- * muxed by a glitch-free switch.
+ * muxed by a glitch-free switch. The CCF can manage this glitch-free
+ * mux because it does top-to-bottom updates the each clock tree and
+ * switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
static const struct clk_parent_data gxbb_mali_0_1_parent_data[] = {
@@ -980,14 +982,15 @@ static struct clk_regmap gxbb_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- /*
- * bits 10:9 selects from 8 possible parents:
- * xtal, gp0_pll, mpll2, mpll1, fclk_div7,
- * fclk_div4, fclk_div3, fclk_div5
- */
.parent_data = gxbb_mali_0_1_parent_data,
.num_parents = 8,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ /*
+ * Don't request the parent to change the rate because
+ * all GPU frequencies can be derived from the fclk_*
+ * clocks and one special GP0_PLL setting. This is
+ * important because we need the MPLL clocks for audio.
+ */
+ .flags = 0,
},
};
@@ -1004,7 +1007,7 @@ static struct clk_regmap gxbb_mali_0_div = {
&gxbb_mali_0_sel.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -1020,7 +1023,7 @@ static struct clk_regmap gxbb_mali_0 = {
&gxbb_mali_0_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
@@ -1033,14 +1036,15 @@ static struct clk_regmap gxbb_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- /*
- * bits 10:9 selects from 8 possible parents:
- * xtal, gp0_pll, mpll2, mpll1, fclk_div7,
- * fclk_div4, fclk_div3, fclk_div5
- */
.parent_data = gxbb_mali_0_1_parent_data,
.num_parents = 8,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ /*
+ * Don't request the parent to change the rate because
+ * all GPU frequencies can be derived from the fclk_*
+ * clocks and one special GP0_PLL setting. This is
+ * important because we need the MPLL clocks for audio.
+ */
+ .flags = 0,
},
};
@@ -1057,7 +1061,7 @@ static struct clk_regmap gxbb_mali_1_div = {
&gxbb_mali_1_sel.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -1073,7 +1077,7 @@ static struct clk_regmap gxbb_mali_1 = {
&gxbb_mali_1_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
@@ -1093,7 +1097,7 @@ static struct clk_regmap gxbb_mali = {
.ops = &clk_regmap_mux_ops,
.parent_hws = gxbb_mali_parent_hws,
.num_parents = 2,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 34a70c4b4899..edc09d050ecf 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -1077,7 +1077,7 @@ static struct clk_regmap meson8b_vid_pll_in_sel = {
* Meson8m2: vid2_pll
*/
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_hdmi_pll_dco.hw
+ &meson8b_hdmi_pll_lvds_out.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1213,7 +1213,7 @@ static struct clk_regmap meson8b_vclk_in_en = {
static struct clk_regmap meson8b_vclk_div1_gate = {
.data = &(struct clk_regmap_gate_data){
- .offset = HHI_VID_CLK_DIV,
+ .offset = HHI_VID_CLK_CNTL,
.bit_idx = 0,
},
.hw.init = &(struct clk_init_data){
@@ -1243,7 +1243,7 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = {
static struct clk_regmap meson8b_vclk_div2_div_gate = {
.data = &(struct clk_regmap_gate_data){
- .offset = HHI_VID_CLK_DIV,
+ .offset = HHI_VID_CLK_CNTL,
.bit_idx = 1,
},
.hw.init = &(struct clk_init_data){
@@ -1273,7 +1273,7 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = {
static struct clk_regmap meson8b_vclk_div4_div_gate = {
.data = &(struct clk_regmap_gate_data){
- .offset = HHI_VID_CLK_DIV,
+ .offset = HHI_VID_CLK_CNTL,
.bit_idx = 2,
},
.hw.init = &(struct clk_init_data){
@@ -1303,7 +1303,7 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = {
static struct clk_regmap meson8b_vclk_div6_div_gate = {
.data = &(struct clk_regmap_gate_data){
- .offset = HHI_VID_CLK_DIV,
+ .offset = HHI_VID_CLK_CNTL,
.bit_idx = 3,
},
.hw.init = &(struct clk_init_data){
@@ -1333,7 +1333,7 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = {
static struct clk_regmap meson8b_vclk_div12_div_gate = {
.data = &(struct clk_regmap_gate_data){
- .offset = HHI_VID_CLK_DIV,
+ .offset = HHI_VID_CLK_CNTL,
.bit_idx = 4,
},
.hw.init = &(struct clk_init_data){
@@ -1725,7 +1725,7 @@ static struct clk_regmap meson8b_hdmi_sys_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_sys_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
/* FIXME: all other parents are unknown */
.parent_data = &(const struct clk_parent_data) {
.fw_name = "xtal",
@@ -1745,7 +1745,7 @@ static struct clk_regmap meson8b_hdmi_sys_div = {
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_sys_div",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_hdmi_sys_sel.hw
},
@@ -1761,7 +1761,7 @@ static struct clk_regmap meson8b_hdmi_sys = {
},
.hw.init = &(struct clk_init_data) {
.name = "hdmi_sys",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_hdmi_sys_div.hw
},
@@ -1918,6 +1918,13 @@ static struct clk_regmap meson8b_mali = {
},
};
+static const struct reg_sequence meson8m2_gp_pll_init_regs[] = {
+ { .reg = HHI_GP_PLL_CNTL2, .def = 0x59c88000 },
+ { .reg = HHI_GP_PLL_CNTL3, .def = 0xca463823 },
+ { .reg = HHI_GP_PLL_CNTL4, .def = 0x0286a027 },
+ { .reg = HHI_GP_PLL_CNTL5, .def = 0x00003000 },
+};
+
static const struct pll_params_table meson8m2_gp_pll_params_table[] = {
PLL_PARAMS(182, 3),
{ /* sentinel */ },
@@ -1951,6 +1958,8 @@ static struct clk_regmap meson8m2_gp_pll_dco = {
.width = 1,
},
.table = meson8m2_gp_pll_params_table,
+ .init_regs = meson8m2_gp_pll_init_regs,
+ .init_count = ARRAY_SIZE(meson8m2_gp_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp_pll_dco",
@@ -2063,7 +2072,7 @@ static struct clk_regmap meson8b_vpu_0 = {
&meson8b_vpu_0_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
@@ -2134,10 +2143,18 @@ static struct clk_regmap meson8b_vpu_1 = {
&meson8b_vpu_1_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
+/*
+ * The VPU clock has two two identical clock trees (vpu_0 and vpu_1)
+ * muxed by a glitch-free switch on Meson8b and Meson8m2. The CCF can
+ * actually manage this glitch-free mux because it does top-to-bottom
+ * updates the each clock tree and switches to the "inactive" one when
+ * CLK_SET_RATE_GATE is set.
+ * Meson8 only has vpu_0 and no glitch-free mux.
+ */
static struct clk_regmap meson8b_vpu = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VPU_CLK_CNTL,
@@ -2152,7 +2169,7 @@ static struct clk_regmap meson8b_vpu = {
&meson8b_vpu_1.hw,
},
.num_parents = 2,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3506,54 +3523,87 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
static const struct meson8b_clk_reset_line {
u32 reg;
u8 bit_idx;
+ bool active_low;
} meson8b_clk_reset_bits[] = {
[CLKC_RESET_L2_CACHE_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 30
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 30,
+ .active_low = false,
},
[CLKC_RESET_AXI_64_TO_128_BRIDGE_A5_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 29
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 29,
+ .active_low = false,
},
[CLKC_RESET_SCU_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 28
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 28,
+ .active_low = false,
},
[CLKC_RESET_CPU3_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 27
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 27,
+ .active_low = false,
},
[CLKC_RESET_CPU2_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 26
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 26,
+ .active_low = false,
},
[CLKC_RESET_CPU1_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 25
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 25,
+ .active_low = false,
},
[CLKC_RESET_CPU0_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 24
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 24,
+ .active_low = false,
},
[CLKC_RESET_A5_GLOBAL_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 18
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 18,
+ .active_low = false,
},
[CLKC_RESET_A5_AXI_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 17
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 17,
+ .active_low = false,
},
[CLKC_RESET_A5_ABP_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 16
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 16,
+ .active_low = false,
},
[CLKC_RESET_AXI_64_TO_128_BRIDGE_MMC_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL1, .bit_idx = 30
+ .reg = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 30,
+ .active_low = false,
},
[CLKC_RESET_VID_CLK_CNTL_SOFT_RESET] = {
- .reg = HHI_VID_CLK_CNTL, .bit_idx = 15
+ .reg = HHI_VID_CLK_CNTL,
+ .bit_idx = 15,
+ .active_low = false,
},
[CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_POST] = {
- .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 7
+ .reg = HHI_VID_DIVIDER_CNTL,
+ .bit_idx = 7,
+ .active_low = false,
},
[CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_PRE] = {
- .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 3
+ .reg = HHI_VID_DIVIDER_CNTL,
+ .bit_idx = 3,
+ .active_low = false,
},
[CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_POST] = {
- .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 1
+ .reg = HHI_VID_DIVIDER_CNTL,
+ .bit_idx = 1,
+ .active_low = true,
},
[CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_PRE] = {
- .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 0
+ .reg = HHI_VID_DIVIDER_CNTL,
+ .bit_idx = 0,
+ .active_low = true,
},
};
@@ -3562,22 +3612,22 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev,
{
struct meson8b_clk_reset *meson8b_clk_reset =
container_of(rcdev, struct meson8b_clk_reset, reset);
- unsigned long flags;
const struct meson8b_clk_reset_line *reset;
+ unsigned int value = 0;
+ unsigned long flags;
if (id >= ARRAY_SIZE(meson8b_clk_reset_bits))
return -EINVAL;
reset = &meson8b_clk_reset_bits[id];
+ if (assert != reset->active_low)
+ value = BIT(reset->bit_idx);
+
spin_lock_irqsave(&meson_clk_lock, flags);
- if (assert)
- regmap_update_bits(meson8b_clk_reset->regmap, reset->reg,
- BIT(reset->bit_idx), BIT(reset->bit_idx));
- else
- regmap_update_bits(meson8b_clk_reset->regmap, reset->reg,
- BIT(reset->bit_idx), 0);
+ regmap_update_bits(meson8b_clk_reset->regmap, reset->reg,
+ BIT(reset->bit_idx), value);
spin_unlock_irqrestore(&meson_clk_lock, flags);
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index c889fbeec30f..cd38ae2a9cb5 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -20,6 +20,10 @@
* [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
*/
#define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */
+#define HHI_GP_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */
+#define HHI_GP_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */
+#define HHI_GP_PLL_CNTL4 0x4C /* 0x13 offset in data sheet */
+#define HHI_GP_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */
#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
@@ -146,7 +150,6 @@
#define CLKID_CTS_VDAC0 171
#define CLKID_HDMI_SYS_SEL 172
#define CLKID_HDMI_SYS_DIV 173
-#define CLKID_HDMI_SYS 174
#define CLKID_MALI_0_SEL 175
#define CLKID_MALI_0_DIV 176
#define CLKID_MALI_0 177
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index 14dc8a8a9d08..cbcc2f8430a2 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -8,7 +8,8 @@ obj-y += clk-apbc.o clk-apmu.o clk-frac.o clk-mix.o clk-gate.o clk.o
obj-$(CONFIG_RESET_CONTROLLER) += reset.o
obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
-obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o
+obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o pwr-island.o
+obj-$(CONFIG_COMMON_CLK_MMP2_AUDIO) += clk-audio.o
obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o
obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
new file mode 100644
index 000000000000..eea69d498bd2
--- /dev/null
+++ b/drivers/clk/mmp/clk-audio.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MMP Audio Clock Controller driver
+ *
+ * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <dt-bindings/clock/marvell,mmp2-audio.h>
+
+/* Audio Controller Registers */
+#define SSPA_AUD_CTRL 0x04
+#define SSPA_AUD_PLL_CTRL0 0x08
+#define SSPA_AUD_PLL_CTRL1 0x0c
+
+/* SSPA Audio Control Register */
+#define SSPA_AUD_CTRL_SYSCLK_SHIFT 0
+#define SSPA_AUD_CTRL_SYSCLK_DIV_SHIFT 1
+#define SSPA_AUD_CTRL_SSPA0_MUX_SHIFT 7
+#define SSPA_AUD_CTRL_SSPA0_SHIFT 8
+#define SSPA_AUD_CTRL_SSPA0_DIV_SHIFT 9
+#define SSPA_AUD_CTRL_SSPA1_SHIFT 16
+#define SSPA_AUD_CTRL_SSPA1_DIV_SHIFT 17
+#define SSPA_AUD_CTRL_SSPA1_MUX_SHIFT 23
+#define SSPA_AUD_CTRL_DIV_MASK 0x7e
+
+/* SSPA Audio PLL Control 0 Register */
+#define SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO_MASK (0x7 << 28)
+#define SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO(x) ((x) << 28)
+#define SSPA_AUD_PLL_CTRL0_FRACT_MASK (0xfffff << 8)
+#define SSPA_AUD_PLL_CTRL0_FRACT(x) ((x) << 8)
+#define SSPA_AUD_PLL_CTRL0_ENA_DITHER (1 << 7)
+#define SSPA_AUD_PLL_CTRL0_ICP_2UA (0 << 5)
+#define SSPA_AUD_PLL_CTRL0_ICP_5UA (1 << 5)
+#define SSPA_AUD_PLL_CTRL0_ICP_7UA (2 << 5)
+#define SSPA_AUD_PLL_CTRL0_ICP_10UA (3 << 5)
+#define SSPA_AUD_PLL_CTRL0_DIV_FBCCLK_MASK (0x3 << 3)
+#define SSPA_AUD_PLL_CTRL0_DIV_FBCCLK(x) ((x) << 3)
+#define SSPA_AUD_PLL_CTRL0_DIV_MCLK_MASK (0x1 << 2)
+#define SSPA_AUD_PLL_CTRL0_DIV_MCLK(x) ((x) << 2)
+#define SSPA_AUD_PLL_CTRL0_PD_OVPROT_DIS (1 << 1)
+#define SSPA_AUD_PLL_CTRL0_PU (1 << 0)
+
+/* SSPA Audio PLL Control 1 Register */
+#define SSPA_AUD_PLL_CTRL1_SEL_FAST_CLK (1 << 24)
+#define SSPA_AUD_PLL_CTRL1_CLK_SEL_MASK (1 << 11)
+#define SSPA_AUD_PLL_CTRL1_CLK_SEL_AUDIO_PLL (1 << 11)
+#define SSPA_AUD_PLL_CTRL1_CLK_SEL_VCXO (0 << 11)
+#define SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN_MASK (0x7ff << 0)
+#define SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN(x) ((x) << 0)
+
+struct mmp2_audio_clk {
+ void __iomem *mmio_base;
+
+ struct clk_hw audio_pll_hw;
+ struct clk_mux sspa_mux;
+ struct clk_mux sspa1_mux;
+ struct clk_divider sysclk_div;
+ struct clk_divider sspa0_div;
+ struct clk_divider sspa1_div;
+ struct clk_gate sysclk_gate;
+ struct clk_gate sspa0_gate;
+ struct clk_gate sspa1_gate;
+
+ u32 aud_ctrl;
+ u32 aud_pll_ctrl0;
+ u32 aud_pll_ctrl1;
+
+ spinlock_t lock;
+
+ /* Must be last */
+ struct clk_hw_onecell_data clk_data;
+};
+
+static const struct {
+ unsigned long parent_rate;
+ unsigned long freq_vco;
+ unsigned char mclk;
+ unsigned char fbcclk;
+ unsigned short fract;
+} predivs[] = {
+ { 26000000, 135475200, 0, 0, 0x8a18 },
+ { 26000000, 147456000, 0, 1, 0x0da1 },
+ { 38400000, 135475200, 1, 2, 0x8208 },
+ { 38400000, 147456000, 1, 3, 0xaaaa },
+};
+
+static const struct {
+ unsigned char divisor;
+ unsigned char modulo;
+ unsigned char pattern;
+} postdivs[] = {
+ { 1, 3, 0, },
+ { 2, 5, 0, },
+ { 4, 0, 0, },
+ { 6, 1, 1, },
+ { 8, 1, 0, },
+ { 9, 1, 2, },
+ { 12, 2, 1, },
+ { 16, 2, 0, },
+ { 18, 2, 2, },
+ { 24, 4, 1, },
+ { 36, 4, 2, },
+ { 48, 6, 1, },
+ { 72, 6, 2, },
+};
+
+static unsigned long audio_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mmp2_audio_clk *priv = container_of(hw, struct mmp2_audio_clk, audio_pll_hw);
+ unsigned int prediv;
+ unsigned int postdiv;
+ u32 aud_pll_ctrl0;
+ u32 aud_pll_ctrl1;
+
+ aud_pll_ctrl0 = readl(priv->mmio_base + SSPA_AUD_PLL_CTRL0);
+ aud_pll_ctrl0 &= SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO_MASK |
+ SSPA_AUD_PLL_CTRL0_FRACT_MASK |
+ SSPA_AUD_PLL_CTRL0_ENA_DITHER |
+ SSPA_AUD_PLL_CTRL0_DIV_FBCCLK_MASK |
+ SSPA_AUD_PLL_CTRL0_DIV_MCLK_MASK |
+ SSPA_AUD_PLL_CTRL0_PU;
+
+ aud_pll_ctrl1 = readl(priv->mmio_base + SSPA_AUD_PLL_CTRL1);
+ aud_pll_ctrl1 &= SSPA_AUD_PLL_CTRL1_CLK_SEL_MASK |
+ SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN_MASK;
+
+ for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) {
+ if (predivs[prediv].parent_rate != parent_rate)
+ continue;
+ for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) {
+ unsigned long freq;
+ u32 val;
+
+ val = SSPA_AUD_PLL_CTRL0_ENA_DITHER;
+ val |= SSPA_AUD_PLL_CTRL0_PU;
+ val |= SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO(postdivs[postdiv].modulo);
+ val |= SSPA_AUD_PLL_CTRL0_FRACT(predivs[prediv].fract);
+ val |= SSPA_AUD_PLL_CTRL0_DIV_FBCCLK(predivs[prediv].fbcclk);
+ val |= SSPA_AUD_PLL_CTRL0_DIV_MCLK(predivs[prediv].mclk);
+ if (val != aud_pll_ctrl0)
+ continue;
+
+ val = SSPA_AUD_PLL_CTRL1_CLK_SEL_AUDIO_PLL;
+ val |= SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN(postdivs[postdiv].pattern);
+ if (val != aud_pll_ctrl1)
+ continue;
+
+ freq = predivs[prediv].freq_vco;
+ freq /= postdivs[postdiv].divisor;
+ return freq;
+ }
+ }
+
+ return 0;
+}
+
+static long audio_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned int prediv;
+ unsigned int postdiv;
+ long rounded = 0;
+
+ for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) {
+ if (predivs[prediv].parent_rate != *parent_rate)
+ continue;
+ for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) {
+ long freq = predivs[prediv].freq_vco;
+
+ freq /= postdivs[postdiv].divisor;
+ if (freq == rate)
+ return rate;
+ if (freq < rate)
+ continue;
+ if (rounded && freq > rounded)
+ continue;
+ rounded = freq;
+ }
+ }
+
+ return rounded;
+}
+
+static int audio_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct mmp2_audio_clk *priv = container_of(hw, struct mmp2_audio_clk, audio_pll_hw);
+ unsigned int prediv;
+ unsigned int postdiv;
+ unsigned long val;
+
+ for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) {
+ if (predivs[prediv].parent_rate != parent_rate)
+ continue;
+
+ for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) {
+ if (rate * postdivs[postdiv].divisor != predivs[prediv].freq_vco)
+ continue;
+
+ val = SSPA_AUD_PLL_CTRL0_ENA_DITHER;
+ val |= SSPA_AUD_PLL_CTRL0_PU;
+ val |= SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO(postdivs[postdiv].modulo);
+ val |= SSPA_AUD_PLL_CTRL0_FRACT(predivs[prediv].fract);
+ val |= SSPA_AUD_PLL_CTRL0_DIV_FBCCLK(predivs[prediv].fbcclk);
+ val |= SSPA_AUD_PLL_CTRL0_DIV_MCLK(predivs[prediv].mclk);
+ writel(val, priv->mmio_base + SSPA_AUD_PLL_CTRL0);
+
+ val = SSPA_AUD_PLL_CTRL1_CLK_SEL_AUDIO_PLL;
+ val |= SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN(postdivs[postdiv].pattern);
+ writel(val, priv->mmio_base + SSPA_AUD_PLL_CTRL1);
+
+ return 0;
+ }
+ }
+
+ return -ERANGE;
+}
+
+static const struct clk_ops audio_pll_ops = {
+ .recalc_rate = audio_pll_recalc_rate,
+ .round_rate = audio_pll_round_rate,
+ .set_rate = audio_pll_set_rate,
+};
+
+static int register_clocks(struct mmp2_audio_clk *priv, struct device *dev)
+{
+ const struct clk_parent_data sspa_mux_parents[] = {
+ { .hw = &priv->audio_pll_hw },
+ { .fw_name = "i2s0" },
+ };
+ const struct clk_parent_data sspa1_mux_parents[] = {
+ { .hw = &priv->audio_pll_hw },
+ { .fw_name = "i2s1" },
+ };
+ int ret;
+
+ priv->audio_pll_hw.init = CLK_HW_INIT_FW_NAME("audio_pll",
+ "vctcxo", &audio_pll_ops,
+ CLK_SET_RATE_PARENT);
+ ret = devm_clk_hw_register(dev, &priv->audio_pll_hw);
+ if (ret)
+ return ret;
+
+ priv->sspa_mux.hw.init = CLK_HW_INIT_PARENTS_DATA("sspa_mux",
+ sspa_mux_parents, &clk_mux_ops,
+ CLK_SET_RATE_PARENT);
+ priv->sspa_mux.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sspa_mux.mask = 1;
+ priv->sspa_mux.shift = SSPA_AUD_CTRL_SSPA0_MUX_SHIFT;
+ ret = devm_clk_hw_register(dev, &priv->sspa_mux.hw);
+ if (ret)
+ return ret;
+
+ priv->sysclk_div.hw.init = CLK_HW_INIT_HW("sys_div",
+ &priv->sspa_mux.hw, &clk_divider_ops,
+ CLK_SET_RATE_PARENT);
+ priv->sysclk_div.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sysclk_div.shift = SSPA_AUD_CTRL_SYSCLK_DIV_SHIFT;
+ priv->sysclk_div.width = 6;
+ priv->sysclk_div.flags = CLK_DIVIDER_ONE_BASED;
+ priv->sysclk_div.flags |= CLK_DIVIDER_ROUND_CLOSEST;
+ priv->sysclk_div.flags |= CLK_DIVIDER_ALLOW_ZERO;
+ ret = devm_clk_hw_register(dev, &priv->sysclk_div.hw);
+ if (ret)
+ return ret;
+
+ priv->sysclk_gate.hw.init = CLK_HW_INIT_HW("sys_clk",
+ &priv->sysclk_div.hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
+ priv->sysclk_gate.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sysclk_gate.bit_idx = SSPA_AUD_CTRL_SYSCLK_SHIFT;
+ ret = devm_clk_hw_register(dev, &priv->sysclk_gate.hw);
+ if (ret)
+ return ret;
+
+ priv->sspa0_div.hw.init = CLK_HW_INIT_HW("sspa0_div",
+ &priv->sspa_mux.hw, &clk_divider_ops, 0);
+ priv->sspa0_div.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sspa0_div.shift = SSPA_AUD_CTRL_SSPA0_DIV_SHIFT;
+ priv->sspa0_div.width = 6;
+ priv->sspa0_div.flags = CLK_DIVIDER_ONE_BASED;
+ priv->sspa0_div.flags |= CLK_DIVIDER_ROUND_CLOSEST;
+ priv->sspa0_div.flags |= CLK_DIVIDER_ALLOW_ZERO;
+ ret = devm_clk_hw_register(dev, &priv->sspa0_div.hw);
+ if (ret)
+ return ret;
+
+ priv->sspa0_gate.hw.init = CLK_HW_INIT_HW("sspa0_clk",
+ &priv->sspa0_div.hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
+ priv->sspa0_gate.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sspa0_gate.bit_idx = SSPA_AUD_CTRL_SSPA0_SHIFT;
+ ret = devm_clk_hw_register(dev, &priv->sspa0_gate.hw);
+ if (ret)
+ return ret;
+
+ priv->sspa1_mux.hw.init = CLK_HW_INIT_PARENTS_DATA("sspa1_mux",
+ sspa1_mux_parents, &clk_mux_ops,
+ CLK_SET_RATE_PARENT);
+ priv->sspa1_mux.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sspa1_mux.mask = 1;
+ priv->sspa1_mux.shift = SSPA_AUD_CTRL_SSPA1_MUX_SHIFT;
+ ret = devm_clk_hw_register(dev, &priv->sspa1_mux.hw);
+ if (ret)
+ return ret;
+
+ priv->sspa1_div.hw.init = CLK_HW_INIT_HW("sspa1_div",
+ &priv->sspa1_mux.hw, &clk_divider_ops, 0);
+ priv->sspa1_div.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sspa1_div.shift = SSPA_AUD_CTRL_SSPA1_DIV_SHIFT;
+ priv->sspa1_div.width = 6;
+ priv->sspa1_div.flags = CLK_DIVIDER_ONE_BASED;
+ priv->sspa1_div.flags |= CLK_DIVIDER_ROUND_CLOSEST;
+ priv->sspa1_div.flags |= CLK_DIVIDER_ALLOW_ZERO;
+ ret = devm_clk_hw_register(dev, &priv->sspa1_div.hw);
+ if (ret)
+ return ret;
+
+ priv->sspa1_gate.hw.init = CLK_HW_INIT_HW("sspa1_clk",
+ &priv->sspa1_div.hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
+ priv->sspa1_gate.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sspa1_gate.bit_idx = SSPA_AUD_CTRL_SSPA1_SHIFT;
+ ret = devm_clk_hw_register(dev, &priv->sspa1_gate.hw);
+ if (ret)
+ return ret;
+
+ priv->clk_data.hws[MMP2_CLK_AUDIO_SYSCLK] = &priv->sysclk_gate.hw;
+ priv->clk_data.hws[MMP2_CLK_AUDIO_SSPA0] = &priv->sspa0_gate.hw;
+ priv->clk_data.hws[MMP2_CLK_AUDIO_SSPA1] = &priv->sspa1_gate.hw;
+ priv->clk_data.num = MMP2_CLK_AUDIO_NR_CLKS;
+
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ &priv->clk_data);
+}
+
+static int mmp2_audio_clk_probe(struct platform_device *pdev)
+{
+ struct mmp2_audio_clk *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev,
+ struct_size(priv, clk_data.hws,
+ MMP2_CLK_AUDIO_NR_CLKS),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ priv->mmio_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->mmio_base))
+ return PTR_ERR(priv->mmio_base);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ goto disable_pm_runtime;
+
+ ret = pm_clk_add(&pdev->dev, "audio");
+ if (ret)
+ goto destroy_pm_clk;
+
+ ret = register_clocks(priv, &pdev->dev);
+ if (ret)
+ goto destroy_pm_clk;
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int mmp2_audio_clk_remove(struct platform_device *pdev)
+{
+ pm_clk_destroy(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
+{
+ struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
+
+ priv->aud_ctrl = readl(priv->mmio_base + SSPA_AUD_CTRL);
+ priv->aud_pll_ctrl0 = readl(priv->mmio_base + SSPA_AUD_PLL_CTRL0);
+ priv->aud_pll_ctrl1 = readl(priv->mmio_base + SSPA_AUD_PLL_CTRL1);
+ pm_clk_suspend(dev);
+
+ return 0;
+}
+
+static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
+{
+ struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
+
+ pm_clk_resume(dev);
+ writel(priv->aud_ctrl, priv->mmio_base + SSPA_AUD_CTRL);
+ writel(priv->aud_pll_ctrl0, priv->mmio_base + SSPA_AUD_PLL_CTRL0);
+ writel(priv->aud_pll_ctrl1, priv->mmio_base + SSPA_AUD_PLL_CTRL1);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
+ SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)
+};
+
+static const struct of_device_id mmp2_audio_clk_of_match[] = {
+ { .compatible = "marvell,mmp2-audio-clock" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, mmp2_audio_clk_of_match);
+
+static struct platform_driver mmp2_audio_clk_driver = {
+ .driver = {
+ .name = "mmp2-audio-clock",
+ .of_match_table = of_match_ptr(mmp2_audio_clk_of_match),
+ .pm = &mmp2_audio_clk_pm_ops,
+ },
+ .probe = mmp2_audio_clk_probe,
+ .remove = mmp2_audio_clk_remove,
+};
+module_platform_driver(mmp2_audio_clk_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Clock driver for MMP2 Audio subsystem");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index fabc09aca6c4..48f592bd633d 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -28,13 +28,15 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
unsigned long *prate)
{
struct mmp_clk_factor *factor = to_clk_factor(hw);
- unsigned long rate = 0, prev_rate;
+ u64 rate = 0, prev_rate;
int i;
for (i = 0; i < factor->ftbl_cnt; i++) {
prev_rate = rate;
- rate = (((*prate / 10000) * factor->ftbl[i].den) /
- (factor->ftbl[i].num * factor->masks->factor)) * 10000;
+ rate = *prate;
+ rate *= factor->ftbl[i].den;
+ do_div(rate, factor->ftbl[i].num * factor->masks->factor);
+
if (rate > drate)
break;
}
@@ -54,6 +56,7 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
struct mmp_clk_factor *factor = to_clk_factor(hw);
struct mmp_clk_factor_masks *masks = factor->masks;
unsigned int val, num, den;
+ u64 rate;
val = readl_relaxed(factor->base);
@@ -66,8 +69,11 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
if (!den)
return 0;
- return (((parent_rate / 10000) * den) /
- (num * factor->masks->factor)) * 10000;
+ rate = parent_rate;
+ rate *= den;
+ do_div(rate, num * factor->masks->factor);
+
+ return rate;
}
/* Configures new clock rate*/
@@ -78,12 +84,14 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
struct mmp_clk_factor_masks *masks = factor->masks;
int i;
unsigned long val;
- unsigned long rate = 0;
unsigned long flags = 0;
+ u64 rate = 0;
for (i = 0; i < factor->ftbl_cnt; i++) {
- rate = (((prate / 10000) * factor->ftbl[i].den) /
- (factor->ftbl[i].num * factor->masks->factor)) * 10000;
+ rate = prate;
+ rate *= factor->ftbl[i].den;
+ do_div(rate, factor->ftbl[i].num * factor->masks->factor);
+
if (rate > drate)
break;
}
@@ -140,7 +148,10 @@ static int clk_factor_init(struct clk_hw *hw)
val &= ~(masks->den_mask << masks->den_shift);
val |= (factor->ftbl[0].den & masks->den_mask) <<
masks->den_shift;
+ }
+ if (!(val & masks->enable_mask) || i >= factor->ftbl_cnt) {
+ val |= masks->enable_mask;
writel(val, factor->base);
}
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 52dc8b43acd9..67208aea94c5 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -17,8 +17,10 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/of_address.h>
+#include <linux/clk.h>
#include <dt-bindings/clock/marvell,mmp2.h>
+#include <dt-bindings/power/marvell,mmp2.h>
#include "clk.h"
#include "reset.h"
@@ -45,6 +47,10 @@
#define APBC_SSP1 0x54
#define APBC_SSP2 0x58
#define APBC_SSP3 0x5c
+#define APBC_THERMAL0 0x90
+#define APBC_THERMAL1 0x98
+#define APBC_THERMAL2 0x9c
+#define APBC_THERMAL3 0xa0
#define APMU_SDH0 0x54
#define APMU_SDH1 0x58
#define APMU_SDH2 0xe8
@@ -55,18 +61,19 @@
#define APMU_DISP1 0x110
#define APMU_CCIC0 0x50
#define APMU_CCIC1 0xf4
-#define APBC_THERMAL0 0x90
-#define APBC_THERMAL1 0x98
-#define APBC_THERMAL2 0x9c
-#define APBC_THERMAL3 0xa0
#define APMU_USBHSIC0 0xf8
#define APMU_USBHSIC1 0xfc
#define APMU_GPU 0xcc
+#define APMU_AUDIO 0x10c
+#define APMU_CAMERA 0x1fc
#define MPMU_FCCR 0x8
#define MPMU_POSR 0x10
#define MPMU_UART_PLL 0x14
#define MPMU_PLL2_CR 0x34
+#define MPMU_I2S0_PLL 0x40
+#define MPMU_I2S1_PLL 0x44
+#define MPMU_ACGR 0x1024
/* MMP3 specific below */
#define MPMU_PLL3_CR 0x50
#define MPMU_PLL3_CTRL1 0x58
@@ -82,6 +89,8 @@ enum mmp2_clk_model {
struct mmp2_clk_unit {
struct mmp_clk_unit unit;
enum mmp2_clk_model model;
+ struct genpd_onecell_data pd_data;
+ struct generic_pm_domain *pm_domains[MMP2_NR_POWER_DOMAINS];
void __iomem *mpmu_base;
void __iomem *apmu_base;
void __iomem *apbc_base;
@@ -91,6 +100,7 @@ static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
{MMP2_CLK_CLK32, "clk32", NULL, 0, 32768},
{MMP2_CLK_VCTCXO, "vctcxo", NULL, 0, 26000000},
{MMP2_CLK_USB_PLL, "usb_pll", NULL, 0, 480000000},
+ {0, "i2s_pll", NULL, 0, 99666667},
};
static struct mmp_param_pll_clk pll_clks[] = {
@@ -139,7 +149,35 @@ static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
{.num = 3521, .den = 689}, /*19.23MHZ */
};
-static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
+static struct mmp_clk_factor_masks i2s_factor_masks = {
+ .factor = 2,
+ .num_mask = 0x7fff,
+ .den_mask = 0x1fff,
+ .num_shift = 0,
+ .den_shift = 15,
+ .enable_mask = 0xd0000000,
+};
+
+static struct mmp_clk_factor_tbl i2s_factor_tbl[] = {
+ {.num = 24868, .den = 511}, /* 2.0480 MHz */
+ {.num = 28003, .den = 793}, /* 2.8224 MHz */
+ {.num = 24941, .den = 1025}, /* 4.0960 MHz */
+ {.num = 28003, .den = 1586}, /* 5.6448 MHz */
+ {.num = 31158, .den = 2561}, /* 8.1920 MHz */
+ {.num = 16288, .den = 1845}, /* 11.2896 MHz */
+ {.num = 20772, .den = 2561}, /* 12.2880 MHz */
+ {.num = 8144, .den = 1845}, /* 22.5792 MHz */
+ {.num = 10386, .den = 2561}, /* 24.5760 MHz */
+};
+
+static DEFINE_SPINLOCK(acgr_lock);
+
+static struct mmp_param_gate_clk mpmu_gate_clks[] = {
+ {MMP2_CLK_I2S0, "i2s0_clk", "i2s0_pll", CLK_SET_RATE_PARENT, MPMU_ACGR, 0x200000, 0x200000, 0x0, 0, &acgr_lock},
+ {MMP2_CLK_I2S1, "i2s1_clk", "i2s1_pll", CLK_SET_RATE_PARENT, MPMU_ACGR, 0x100000, 0x100000, 0x0, 0, &acgr_lock},
+};
+
+static void mmp2_main_clk_init(struct mmp2_clk_unit *pxa_unit)
{
struct clk *clk;
struct mmp_clk_unit *unit = &pxa_unit->unit;
@@ -166,6 +204,20 @@ static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
&uart_factor_masks, uart_factor_tbl,
ARRAY_SIZE(uart_factor_tbl), NULL);
mmp_clk_add(unit, MMP2_CLK_UART_PLL, clk);
+
+ mmp_clk_register_factor("i2s0_pll", "pll1_4",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->mpmu_base + MPMU_I2S0_PLL,
+ &i2s_factor_masks, i2s_factor_tbl,
+ ARRAY_SIZE(i2s_factor_tbl), NULL);
+ mmp_clk_register_factor("i2s1_pll", "pll1_4",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->mpmu_base + MPMU_I2S1_PLL,
+ &i2s_factor_masks, i2s_factor_tbl,
+ ARRAY_SIZE(i2s_factor_tbl), NULL);
+
+ mmp_register_gate_clks(unit, mpmu_gate_clks, pxa_unit->mpmu_base,
+ ARRAY_SIZE(mpmu_gate_clks));
}
static DEFINE_SPINLOCK(uart0_lock);
@@ -271,6 +323,8 @@ static u32 mmp2_gpu_bus_parent_table[] = { 0x0000, 0x0020, 0x0030,
static const char * const mmp3_gpu_bus_parent_names[] = {"pll1_4", "pll1_6", "pll1_2", "pll2_2"};
static const char * const mmp3_gpu_gc_parent_names[] = {"pll1", "pll2", "pll1_p", "pll2_p"};
+static DEFINE_SPINLOCK(audio_lock);
+
static struct mmp_clk_mix_config ccic0_mix_config = {
.reg_info = DEFINE_MIX_REG_INFO(4, 17, 2, 6, 32),
};
@@ -326,6 +380,7 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
{MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
{MMP2_CLK_GPU_BUS, "gpu_bus_clk", "gpu_bus_mux", CLK_SET_RATE_PARENT, APMU_GPU, 0xa, 0xa, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock},
+ {MMP2_CLK_AUDIO, "audio_clk", "audio_mix_clk", CLK_SET_RATE_PARENT, APMU_AUDIO, 0x12, 0x12, 0x0, 0, &audio_lock},
};
static struct mmp_param_gate_clk mmp2_apmu_gate_clks[] = {
@@ -423,6 +478,41 @@ static void mmp2_clk_reset_init(struct device_node *np,
mmp_clk_reset_register(np, cells, nr_resets);
}
+static void mmp2_pm_domain_init(struct device_node *np,
+ struct mmp2_clk_unit *pxa_unit)
+{
+ if (pxa_unit->model == CLK_MODEL_MMP3) {
+ pxa_unit->pm_domains[MMP2_POWER_DOMAIN_GPU]
+ = mmp_pm_domain_register("gpu",
+ pxa_unit->apmu_base + APMU_GPU,
+ 0x0600, 0x40003, 0x18000c, 0, &gpu_lock);
+ } else {
+ pxa_unit->pm_domains[MMP2_POWER_DOMAIN_GPU]
+ = mmp_pm_domain_register("gpu",
+ pxa_unit->apmu_base + APMU_GPU,
+ 0x8600, 0x00003, 0x00000c,
+ MMP_PM_DOMAIN_NO_DISABLE, &gpu_lock);
+ }
+ pxa_unit->pd_data.num_domains++;
+
+ pxa_unit->pm_domains[MMP2_POWER_DOMAIN_AUDIO]
+ = mmp_pm_domain_register("audio",
+ pxa_unit->apmu_base + APMU_AUDIO,
+ 0x600, 0x2, 0, 0, &audio_lock);
+ pxa_unit->pd_data.num_domains++;
+
+ if (pxa_unit->model == CLK_MODEL_MMP3) {
+ pxa_unit->pm_domains[MMP3_POWER_DOMAIN_CAMERA]
+ = mmp_pm_domain_register("camera",
+ pxa_unit->apmu_base + APMU_CAMERA,
+ 0x600, 0, 0, 0, NULL);
+ pxa_unit->pd_data.num_domains++;
+ }
+
+ pxa_unit->pd_data.domains = pxa_unit->pm_domains;
+ of_genpd_add_provider_onecell(np, &pxa_unit->pd_data);
+}
+
static void __init mmp2_clk_init(struct device_node *np)
{
struct mmp2_clk_unit *pxa_unit;
@@ -454,9 +544,11 @@ static void __init mmp2_clk_init(struct device_node *np)
goto unmap_apmu_region;
}
+ mmp2_pm_domain_init(np, pxa_unit);
+
mmp_clk_init(np, &pxa_unit->unit, MMP2_NR_CLKS);
- mmp2_pll_init(pxa_unit);
+ mmp2_main_clk_init(pxa_unit);
mmp2_apb_periph_clk_init(pxa_unit);
diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h
index 20dc1e5dd756..55ac05379781 100644
--- a/drivers/clk/mmp/clk.h
+++ b/drivers/clk/mmp/clk.h
@@ -3,6 +3,7 @@
#define __MACH_MMP_CLK_H
#include <linux/clk-provider.h>
+#include <linux/pm_domain.h>
#include <linux/clkdev.h>
#define APBC_NO_BUS_CTRL BIT(0)
@@ -16,6 +17,7 @@ struct mmp_clk_factor_masks {
unsigned int den_mask;
unsigned int num_shift;
unsigned int den_shift;
+ unsigned int enable_mask;
};
struct mmp_clk_factor_tbl {
@@ -251,4 +253,13 @@ void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit,
int nr_clks);
void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
struct clk *clk);
+
+/* Power islands */
+#define MMP_PM_DOMAIN_NO_DISABLE BIT(0)
+
+struct generic_pm_domain *mmp_pm_domain_register(const char *name,
+ void __iomem *reg,
+ u32 power_on, u32 reset, u32 clock_enable,
+ unsigned int flags, spinlock_t *lock);
+
#endif
diff --git a/drivers/clk/mmp/pwr-island.c b/drivers/clk/mmp/pwr-island.c
new file mode 100644
index 000000000000..ab57c0e995c1
--- /dev/null
+++ b/drivers/clk/mmp/pwr-island.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MMP PMU power island support
+ *
+ * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk>
+ */
+
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include "clk.h"
+
+#define to_mmp_pm_domain(genpd) container_of(genpd, struct mmp_pm_domain, genpd)
+
+struct mmp_pm_domain {
+ struct generic_pm_domain genpd;
+ void __iomem *reg;
+ spinlock_t *lock;
+ u32 power_on;
+ u32 reset;
+ u32 clock_enable;
+ unsigned int flags;
+};
+
+static int mmp_pm_domain_power_on(struct generic_pm_domain *genpd)
+{
+ struct mmp_pm_domain *pm_domain = to_mmp_pm_domain(genpd);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (pm_domain->lock)
+ spin_lock_irqsave(pm_domain->lock, flags);
+
+ val = readl(pm_domain->reg);
+
+ /* Turn on the power island */
+ val |= pm_domain->power_on;
+ writel(val, pm_domain->reg);
+
+ /* Disable isolation */
+ val |= 0x100;
+ writel(val, pm_domain->reg);
+
+ /* Some blocks need to be reset after a power up */
+ if (pm_domain->reset || pm_domain->clock_enable) {
+ u32 after_power_on = val;
+
+ val &= ~pm_domain->reset;
+ writel(val, pm_domain->reg);
+
+ val |= pm_domain->clock_enable;
+ writel(val, pm_domain->reg);
+
+ val |= pm_domain->reset;
+ writel(val, pm_domain->reg);
+
+ writel(after_power_on, pm_domain->reg);
+ }
+
+ if (pm_domain->lock)
+ spin_unlock_irqrestore(pm_domain->lock, flags);
+
+ return 0;
+}
+
+static int mmp_pm_domain_power_off(struct generic_pm_domain *genpd)
+{
+ struct mmp_pm_domain *pm_domain = to_mmp_pm_domain(genpd);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (pm_domain->flags & MMP_PM_DOMAIN_NO_DISABLE)
+ return 0;
+
+ if (pm_domain->lock)
+ spin_lock_irqsave(pm_domain->lock, flags);
+
+ /* Turn off and isolate the the power island. */
+ val = readl(pm_domain->reg);
+ val &= ~pm_domain->power_on;
+ val &= ~0x100;
+ writel(val, pm_domain->reg);
+
+ if (pm_domain->lock)
+ spin_unlock_irqrestore(pm_domain->lock, flags);
+
+ return 0;
+}
+
+struct generic_pm_domain *mmp_pm_domain_register(const char *name,
+ void __iomem *reg,
+ u32 power_on, u32 reset, u32 clock_enable,
+ unsigned int flags, spinlock_t *lock)
+{
+ struct mmp_pm_domain *pm_domain;
+
+ pm_domain = kzalloc(sizeof(*pm_domain), GFP_KERNEL);
+ if (!pm_domain)
+ return ERR_PTR(-ENOMEM);
+
+ pm_domain->reg = reg;
+ pm_domain->power_on = power_on;
+ pm_domain->reset = reset;
+ pm_domain->clock_enable = clock_enable;
+ pm_domain->flags = flags;
+ pm_domain->lock = lock;
+
+ pm_genpd_init(&pm_domain->genpd, NULL, true);
+ pm_domain->genpd.name = name;
+ pm_domain->genpd.power_on = mmp_pm_domain_power_on;
+ pm_domain->genpd.power_off = mmp_pm_domain_power_off;
+
+ return &pm_domain->genpd;
+}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index abb121f8de52..cde6ca90a06b 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -142,6 +142,14 @@ config MSM_GCC_8916
Say Y if you want to use devices such as UART, SPI i2c, USB,
SD/eMMC, display, graphics, camera etc.
+config MSM_GCC_8939
+ tristate "MSM8939 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on msm8939 devices.
+ Say Y if you want to use devices such as UART, SPI i2c, USB,
+ SD/eMMC, display, graphics, camera etc.
+
config MSM_GCC_8960
tristate "APQ8064/MSM8960 Global Clock Controller"
help
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 691efbf7e81f..7ec8561a1270 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
+obj-$(CONFIG_MSM_GCC_8939) += gcc-msm8939.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 4e329a7baf2b..17e4a5a2a9fd 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -260,7 +260,7 @@ static struct clk_pll gpll0 = {
.l_reg = 0x21004,
.m_reg = 0x21008,
.n_reg = 0x2100c,
- .config_reg = 0x21014,
+ .config_reg = 0x21010,
.mode_reg = 0x21000,
.status_reg = 0x2101c,
.status_bit = 17,
@@ -287,7 +287,7 @@ static struct clk_pll gpll1 = {
.l_reg = 0x20004,
.m_reg = 0x20008,
.n_reg = 0x2000c,
- .config_reg = 0x20014,
+ .config_reg = 0x20010,
.mode_reg = 0x20000,
.status_reg = 0x2001c,
.status_bit = 17,
@@ -314,7 +314,7 @@ static struct clk_pll gpll2 = {
.l_reg = 0x4a004,
.m_reg = 0x4a008,
.n_reg = 0x4a00c,
- .config_reg = 0x4a014,
+ .config_reg = 0x4a010,
.mode_reg = 0x4a000,
.status_reg = 0x4a01c,
.status_bit = 17,
@@ -341,7 +341,7 @@ static struct clk_pll bimc_pll = {
.l_reg = 0x23004,
.m_reg = 0x23008,
.n_reg = 0x2300c,
- .config_reg = 0x23014,
+ .config_reg = 0x23010,
.mode_reg = 0x23000,
.status_reg = 0x2301c,
.status_bit = 17,
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
new file mode 100644
index 000000000000..778354f82b1e
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -0,0 +1,3988 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Linaro Limited
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8939.h>
+#include <dt-bindings/reset/qcom,gcc-msm8939.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_GPLL0_AUX,
+ P_BIMC,
+ P_GPLL1,
+ P_GPLL1_AUX,
+ P_GPLL2,
+ P_GPLL2_AUX,
+ P_GPLL3,
+ P_GPLL3_AUX,
+ P_GPLL4,
+ P_GPLL5,
+ P_GPLL5_AUX,
+ P_GPLL5_EARLY,
+ P_GPLL6,
+ P_GPLL6_AUX,
+ P_SLEEP_CLK,
+ P_DSI0_PHYPLL_BYTE,
+ P_DSI0_PHYPLL_DSI,
+ P_EXT_PRI_I2S,
+ P_EXT_SEC_I2S,
+ P_EXT_MCLK,
+};
+
+static struct clk_pll gpll0 = {
+ .l_reg = 0x21004,
+ .m_reg = 0x21008,
+ .n_reg = 0x2100c,
+ .config_reg = 0x21010,
+ .mode_reg = 0x21000,
+ .status_reg = 0x2101c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll0_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll1 = {
+ .l_reg = 0x20004,
+ .m_reg = 0x20008,
+ .n_reg = 0x2000c,
+ .config_reg = 0x20010,
+ .mode_reg = 0x20000,
+ .status_reg = 0x2001c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll1_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll2 = {
+ .l_reg = 0x4a004,
+ .m_reg = 0x4a008,
+ .n_reg = 0x4a00c,
+ .config_reg = 0x4a010,
+ .mode_reg = 0x4a000,
+ .status_reg = 0x4a01c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll2_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll2_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll bimc_pll = {
+ .l_reg = 0x23004,
+ .m_reg = 0x23008,
+ .n_reg = 0x2300c,
+ .config_reg = 0x23010,
+ .mode_reg = 0x23000,
+ .status_reg = 0x2301c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap bimc_pll_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "bimc_pll_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &bimc_pll.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll3 = {
+ .l_reg = 0x22004,
+ .m_reg = 0x22008,
+ .n_reg = 0x2200c,
+ .config_reg = 0x22010,
+ .mode_reg = 0x22000,
+ .status_reg = 0x2201c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll3_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+/* GPLL3 at 1100 MHz, main output enabled. */
+static const struct pll_config gpll3_config = {
+ .l = 57,
+ .m = 7,
+ .n = 24,
+ .vco_val = 0x0,
+ .vco_mask = BIT(20),
+ .pre_div_val = 0x0,
+ .pre_div_mask = BIT(12),
+ .post_div_val = 0x0,
+ .post_div_mask = BIT(9) | BIT(8),
+ .mn_ena_mask = BIT(24),
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+};
+
+static struct clk_pll gpll4 = {
+ .l_reg = 0x24004,
+ .m_reg = 0x24008,
+ .n_reg = 0x2400c,
+ .config_reg = 0x24010,
+ .mode_reg = 0x24000,
+ .status_reg = 0x2401c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll4_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+/* GPLL4 at 1200 MHz, main output enabled. */
+static struct pll_config gpll4_config = {
+ .l = 62,
+ .m = 1,
+ .n = 2,
+ .vco_val = 0x0,
+ .vco_mask = BIT(20),
+ .pre_div_val = 0x0,
+ .pre_div_mask = BIT(12),
+ .post_div_val = 0x0,
+ .post_div_mask = BIT(9) | BIT(8),
+ .mn_ena_mask = BIT(24),
+ .main_output_mask = BIT(0),
+};
+
+static struct clk_pll gpll5 = {
+ .l_reg = 0x25004,
+ .m_reg = 0x25008,
+ .n_reg = 0x2500c,
+ .config_reg = 0x25010,
+ .mode_reg = 0x25000,
+ .status_reg = 0x2501c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll5_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll5_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll6 = {
+ .l_reg = 0x37004,
+ .m_reg = 0x37008,
+ .n_reg = 0x3700c,
+ .config_reg = 0x37010,
+ .mode_reg = 0x37000,
+ .status_reg = 0x3701c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll6_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_bimc_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_BIMC, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_bimc_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &bimc_pll_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6a_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2a_gpll3_gpll6a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2_AUX, 4 },
+ { P_GPLL3, 2 },
+ { P_GPLL6_AUX, 3 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll2a_gpll3_gpll6a_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
+ { .hw = &gpll3_vote.hw },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll2_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 3 },
+ { P_GPLL4, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll2_gpll4_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
+ { .hw = &gpll4_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0a_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll1a_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1_AUX, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll1a_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll1a_gpll6_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1_AUX, 2 },
+ { P_GPLL6, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .hw = &gpll6_vote.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll1a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll1a_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+};
+
+static const struct parent_map gcc_xo_dsibyte_map[] = {
+ { P_XO, 0, },
+ { P_DSI0_PHYPLL_BYTE, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_dsibyte_parent_data[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
+};
+
+static const struct parent_map gcc_xo_gpll0a_dsibyte_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 2 },
+ { P_DSI0_PHYPLL_BYTE, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0a_dsibyte_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
+};
+
+static const struct parent_map gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_DSI0_PHYPLL_DSI, 2 },
+ { P_GPLL6, 3 },
+ { P_GPLL3_AUX, 4 },
+ { P_GPLL0_AUX, 5 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
+ { .hw = &gpll6_vote.hw },
+ { .hw = &gpll3_vote.hw },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0a_dsiphy_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 2 },
+ { P_DSI0_PHYPLL_DSI, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0a_dsiphy_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll5a_gpll6_bimc_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL5_AUX, 3 },
+ { P_GPLL6, 2 },
+ { P_BIMC, 4 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll5_vote.hw },
+ { .hw = &gpll6_vote.hw },
+ { .hw = &bimc_pll_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll1_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1, 2 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll1_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll1_epi2s_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_PRI_I2S, 2 },
+ { P_EXT_MCLK, 3 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll1_epi2s_emclk_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .fw_name = "ext_pri_i2s", .name = "ext_pri_i2s" },
+ { .fw_name = "ext_mclk", .name = "ext_mclk" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll1_esi2s_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_SEC_I2S, 2 },
+ { P_EXT_MCLK, 3 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll1_esi2s_emclk_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "ext_sec_i2s", .name = "ext_sec_i2s" },
+ { .fw_name = "ext_mclk", .name = "ext_mclk" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_sleep_map[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll1_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_MCLK, 2 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll1_emclk_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "ext_mclk", .name = "ext_mclk" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll6_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL6, 1 },
+ { P_GPLL0, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll6_gpll0_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll6_vote.hw },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll6_gpll0a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL6, 1 },
+ { P_GPLL0_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll6_gpll0a_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll6_vote.hw },
+ { .hw = &gpll0_vote.hw },
+};
+
+static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x27000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcnoc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 system_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x26004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll6a_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_noc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6a_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 bimc_ddr_clk_src = {
+ .cmd_rcgr = 0x32004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_ddr_clk_src",
+ .parent_data = gcc_xo_gpll0_bimc_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ahb_clk[] = {
+ F(40000000, P_GPLL0, 10, 1, 2),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_ahb_clk_src = {
+ .cmd_rcgr = 0x5a000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+ .cmd_rcgr = 0x46000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_apss_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apss_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0_1_clk[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x4e020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x4f020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_oxili_gfx3d_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(220000000, P_GPLL3, 5, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(310000000, P_GPLL2_AUX, 3, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(465000000, P_GPLL2_AUX, 2, 0, 0),
+ F(550000000, P_GPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2a_gpll3_gpll6a_map,
+ .freq_tbl = ftbl_gcc_oxili_gfx3d_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll2a_gpll3_gpll6a_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_vfe0_clk[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177780000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(465000000, P_GPLL2, 2, 0, 0),
+ F(480000000, P_GPLL4, 2.5, 0, 0),
+ F(600000000, P_GPLL4, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x58000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_gpll4_map,
+ .freq_tbl = ftbl_gcc_camss_vfe0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe0_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll2_gpll4_parent_data,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_6_i2c_apps_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0200c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_6_spi_apps_clk[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x03000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x03014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x04000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x04024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x05000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x05024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x06000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x06024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x07000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x07024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_uart1_6_apps_clk[] = {
+ F(3686400, P_GPLL0, 1, 72, 15625),
+ F(7372800, P_GPLL0, 1, 144, 15625),
+ F(14745600, P_GPLL0, 1, 288, 15625),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 1, 3, 100),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(32000000, P_GPLL0, 1, 1, 25),
+ F(40000000, P_GPLL0, 1, 1, 20),
+ F(46400000, P_GPLL0, 1, 29, 500),
+ F(48000000, P_GPLL0, 1, 3, 50),
+ F(51200000, P_GPLL0, 1, 8, 125),
+ F(56000000, P_GPLL0, 1, 7, 100),
+ F(58982400, P_GPLL0, 1, 1152, 15625),
+ F(60000000, P_GPLL0, 1, 3, 40),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x02044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x03034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cci_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_map,
+ .freq_tbl = ftbl_gcc_camss_cci_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cci_clk_src",
+ .parent_data = gcc_xo_gpll0a_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_gp0_1_clk[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x54000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_camss_gp0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x55000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_camss_gp0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_jpeg0_clk[] = {
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x57000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_jpeg0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg0_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = {
+ F(24000000, P_GPLL0, 1, 1, 45),
+ F(66670000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x52000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_gpll6_sleep_map,
+ .freq_tbl = ftbl_gcc_camss_mclk0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk0_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x53000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_gpll6_sleep_map,
+ .freq_tbl = ftbl_gcc_camss_mclk0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk1_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0_1phytimer_clk[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x4e000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0phytimer_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x4f000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1phytimer_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cpp_clk[] = {
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(465000000, P_GPLL2, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x58018,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_map,
+ .freq_tbl = ftbl_gcc_camss_cpp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cpp_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll2_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_crypto_clk[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ { }
+};
+
+/* This is not in the documentation but is in the downstream driver */
+static struct clk_rcg2 crypto_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_crypto_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "crypto_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_3_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x08004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_gp1_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x09004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_gp1_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x0a004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_gp1_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x4d044,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_data = gcc_xo_gpll0a_dsibyte_parent_data,
+ .num_parents = 3,
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x4d0b0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_data = gcc_xo_gpll0a_dsibyte_parent_data,
+ .num_parents = 3,
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_mdss_esc_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x4d060,
+ .hid_width = 5,
+ .parent_map = gcc_xo_dsibyte_map,
+ .freq_tbl = ftbl_gcc_mdss_esc_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_data = gcc_xo_dsibyte_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x4d0a8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_dsibyte_map,
+ .freq_tbl = ftbl_gcc_mdss_esc_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_data = gcc_xo_dsibyte_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_mdss_mdp_clk[] = {
+ F(50000000, P_GPLL0_AUX, 16, 0, 0),
+ F(80000000, P_GPLL0_AUX, 10, 0, 0),
+ F(100000000, P_GPLL0_AUX, 8, 0, 0),
+ F(160000000, P_GPLL0_AUX, 5, 0, 0),
+ F(177780000, P_GPLL0_AUX, 4.5, 0, 0),
+ F(200000000, P_GPLL0_AUX, 4, 0, 0),
+ F(266670000, P_GPLL0_AUX, 3, 0, 0),
+ F(307200000, P_GPLL1, 2, 0, 0),
+ F(366670000, P_GPLL3_AUX, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x4d014,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_map,
+ .freq_tbl = ftbl_gcc_mdss_mdp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_data = gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_parent_data,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x4d000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_dsiphy_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_data = gcc_xo_gpll0a_dsiphy_parent_data,
+ .num_parents = 3,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x4d0b8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_dsiphy_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_data = gcc_xo_gpll0a_dsiphy_parent_data,
+ .num_parents = 3,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_mdss_vsync_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x4d02c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_map,
+ .freq_tbl = ftbl_gcc_mdss_vsync_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_data = gcc_xo_gpll0a_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk[] = {
+ F(64000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
+/* This is not in the documentation but is in the downstream driver */
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x44010,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_pdm2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc_apps_clk[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 10, 1, 4),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(177770000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x42004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x43004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_apss_tcu_clk[] = {
+ F(154285000, P_GPLL6, 7, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_tcu_clk_src = {
+ .cmd_rcgr = 0x1207c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll5a_gpll6_bimc_map,
+ .freq_tbl = ftbl_gcc_apss_tcu_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apss_tcu_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_bimc_gpu_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266500000, P_BIMC, 4, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(533000000, P_BIMC, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 bimc_gpu_clk_src = {
+ .cmd_rcgr = 0x31028,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll5a_gpll6_bimc_map,
+ .freq_tbl = ftbl_gcc_bimc_gpu_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_gpu_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data,
+ .num_parents = 5,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+ F(80000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x41010,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_hs_system_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hs_system_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_fs_system_clk[] = {
+ F(64000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_fs_system_clk_src = {
+ .cmd_rcgr = 0x3f010,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_fs_system_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_fs_system_clk_src",
+ .parent_data = gcc_xo_gpll6_gpll0_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_fs_ic_clk[] = {
+ F(60000000, P_GPLL6, 1, 1, 18),
+ { }
+};
+
+static struct clk_rcg2 usb_fs_ic_clk_src = {
+ .cmd_rcgr = 0x3f034,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_fs_ic_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_fs_ic_clk_src",
+ .parent_data = gcc_xo_gpll6_gpll0a_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ultaudio_ahb_clk[] = {
+ F(3200000, P_XO, 6, 0, 0),
+ F(6400000, P_XO, 3, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0, 10, 1, 2),
+ F(66670000, P_GPLL0, 12, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_ahbfabric_clk_src = {
+ .cmd_rcgr = 0x1c010,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll0_gpll1_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_ahbfabric_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1_sleep_parent_data,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_clk = {
+ .halt_reg = 0x1c028,
+ .clkr = {
+ .enable_reg = 0x1c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_ahbfabric_ixfabric_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_ahbfabric_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_lpm_clk = {
+ .halt_reg = 0x1c024,
+ .clkr = {
+ .enable_reg = 0x1c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_ahbfabric_ixfabric_lpm_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_ahbfabric_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ultaudio_lpaif_i2s_clk[] = {
+ F(128000, P_XO, 10, 1, 15),
+ F(256000, P_XO, 5, 1, 15),
+ F(384000, P_XO, 5, 1, 10),
+ F(512000, P_XO, 5, 2, 15),
+ F(576000, P_XO, 5, 3, 20),
+ F(705600, P_GPLL1, 16, 1, 80),
+ F(768000, P_XO, 5, 1, 5),
+ F(800000, P_XO, 5, 5, 24),
+ F(1024000, P_XO, 5, 4, 15),
+ F(1152000, P_XO, 1, 3, 50),
+ F(1411200, P_GPLL1, 16, 1, 40),
+ F(1536000, P_XO, 1, 2, 25),
+ F(1600000, P_XO, 12, 0, 0),
+ F(1728000, P_XO, 5, 9, 20),
+ F(2048000, P_XO, 5, 8, 15),
+ F(2304000, P_XO, 5, 3, 5),
+ F(2400000, P_XO, 8, 0, 0),
+ F(2822400, P_GPLL1, 16, 1, 20),
+ F(3072000, P_XO, 5, 4, 5),
+ F(4096000, P_GPLL1, 9, 2, 49),
+ F(4800000, P_XO, 4, 0, 0),
+ F(5644800, P_GPLL1, 16, 1, 10),
+ F(6144000, P_GPLL1, 7, 1, 21),
+ F(8192000, P_GPLL1, 9, 4, 49),
+ F(9600000, P_XO, 2, 0, 0),
+ F(11289600, P_GPLL1, 16, 1, 5),
+ F(12288000, P_GPLL1, 7, 2, 21),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_lpaif_pri_i2s_clk_src = {
+ .cmd_rcgr = 0x1c054,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_epi2s_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_pri_i2s_clk_src",
+ .parent_data = gcc_xo_gpll1_epi2s_emclk_sleep_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_pri_i2s_clk = {
+ .halt_reg = 0x1c068,
+ .clkr = {
+ .enable_reg = 0x1c068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_pri_i2s_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_lpaif_pri_i2s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 ultaudio_lpaif_sec_i2s_clk_src = {
+ .cmd_rcgr = 0x1c06c,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_esi2s_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_sec_i2s_clk_src",
+ .parent_data = gcc_xo_gpll1_esi2s_emclk_sleep_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_sec_i2s_clk = {
+ .halt_reg = 0x1c080,
+ .clkr = {
+ .enable_reg = 0x1c080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_sec_i2s_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_lpaif_sec_i2s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 ultaudio_lpaif_aux_i2s_clk_src = {
+ .cmd_rcgr = 0x1c084,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_aux_i2s_clk_src",
+ .parent_data = gcc_xo_gpll1_esi2s_emclk_sleep_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_aux_i2s_clk = {
+ .halt_reg = 0x1c098,
+ .clkr = {
+ .enable_reg = 0x1c098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_aux_i2s_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_lpaif_aux_i2s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ultaudio_xo_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_xo_clk_src = {
+ .cmd_rcgr = 0x1c034,
+ .hid_width = 5,
+ .parent_map = gcc_xo_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_xo_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_xo_clk_src",
+ .parent_data = gcc_xo_sleep_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_avsync_xo_clk = {
+ .halt_reg = 0x1c04c,
+ .clkr = {
+ .enable_reg = 0x1c04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_avsync_xo_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_stc_xo_clk = {
+ .halt_reg = 0x1c050,
+ .clkr = {
+ .enable_reg = 0x1c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_stc_xo_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_codec_clk[] = {
+ F(9600000, P_XO, 2, 0, 0),
+ F(12288000, P_XO, 1, 16, 25),
+ F(19200000, P_XO, 1, 0, 0),
+ F(11289600, P_EXT_MCLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 codec_digcodec_clk_src = {
+ .cmd_rcgr = 0x1c09c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll1_emclk_sleep_map,
+ .freq_tbl = ftbl_codec_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "codec_digcodec_clk_src",
+ .parent_data = gcc_xo_gpll1_emclk_sleep_parent_data,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_codec_digcodec_clk = {
+ .halt_reg = 0x1c0b0,
+ .clkr = {
+ .enable_reg = 0x1c0b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_codec_digcodec_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &codec_digcodec_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_pcnoc_mport_clk = {
+ .halt_reg = 0x1c000,
+ .clkr = {
+ .enable_reg = 0x1c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_pcnoc_mport_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = {
+ .halt_reg = 0x1c004,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_pcnoc_sway_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+ .cmd_rcgr = 0x4C000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_venus0_vcodec0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vcodec0_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x01008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x01004,
+ .clkr = {
+ .enable_reg = 0x01004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x02008,
+ .clkr = {
+ .enable_reg = 0x02008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02004,
+ .clkr = {
+ .enable_reg = 0x02004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x03010,
+ .clkr = {
+ .enable_reg = 0x03010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x0300c,
+ .clkr = {
+ .enable_reg = 0x0300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x04020,
+ .clkr = {
+ .enable_reg = 0x04020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0401c,
+ .clkr = {
+ .enable_reg = 0x0401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x05020,
+ .clkr = {
+ .enable_reg = 0x05020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x0501c,
+ .clkr = {
+ .enable_reg = 0x0501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x06020,
+ .clkr = {
+ .enable_reg = 0x06020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x0601c,
+ .clkr = {
+ .enable_reg = 0x0601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup5_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x07020,
+ .clkr = {
+ .enable_reg = 0x07020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x0701c,
+ .clkr = {
+ .enable_reg = 0x0701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup6_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0203c,
+ .clkr = {
+ .enable_reg = 0x0203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0302c,
+ .clkr = {
+ .enable_reg = 0x0302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_ahb_clk = {
+ .halt_reg = 0x5101c,
+ .clkr = {
+ .enable_reg = 0x5101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_clk = {
+ .halt_reg = 0x51018,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cci_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0_ahb_clk = {
+ .halt_reg = 0x4e040,
+ .clkr = {
+ .enable_reg = 0x4e040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0_clk = {
+ .halt_reg = 0x4e03c,
+ .clkr = {
+ .enable_reg = 0x4e03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phy_clk = {
+ .halt_reg = 0x4e048,
+ .clkr = {
+ .enable_reg = 0x4e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phy_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0pix_clk = {
+ .halt_reg = 0x4e058,
+ .clkr = {
+ .enable_reg = 0x4e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0pix_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0rdi_clk = {
+ .halt_reg = 0x4e050,
+ .clkr = {
+ .enable_reg = 0x4e050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0rdi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1_ahb_clk = {
+ .halt_reg = 0x4f040,
+ .clkr = {
+ .enable_reg = 0x4f040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1_clk = {
+ .halt_reg = 0x4f03c,
+ .clkr = {
+ .enable_reg = 0x4f03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phy_clk = {
+ .halt_reg = 0x4f048,
+ .clkr = {
+ .enable_reg = 0x4f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phy_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1pix_clk = {
+ .halt_reg = 0x4f058,
+ .clkr = {
+ .enable_reg = 0x4f058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1pix_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1rdi_clk = {
+ .halt_reg = 0x4f050,
+ .clkr = {
+ .enable_reg = 0x4f050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1rdi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi_vfe0_clk = {
+ .halt_reg = 0x58050,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi_vfe0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_gp0_clk = {
+ .halt_reg = 0x54018,
+ .clkr = {
+ .enable_reg = 0x54018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_gp0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_gp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_gp1_clk = {
+ .halt_reg = 0x55018,
+ .clkr = {
+ .enable_reg = 0x55018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ispif_ahb_clk = {
+ .halt_reg = 0x50004,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ispif_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg0_clk = {
+ .halt_reg = 0x57020,
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_jpeg0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &jpeg0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg_ahb_clk = {
+ .halt_reg = 0x57024,
+ .clkr = {
+ .enable_reg = 0x57024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_jpeg_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg_axi_clk = {
+ .halt_reg = 0x57028,
+ .clkr = {
+ .enable_reg = 0x57028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_jpeg_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x52018,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x53018,
+ .clkr = {
+ .enable_reg = 0x53018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_micro_ahb_clk = {
+ .halt_reg = 0x5600c,
+ .clkr = {
+ .enable_reg = 0x5600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_micro_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x4e01c,
+ .clkr = {
+ .enable_reg = 0x4e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x4f01c,
+ .clkr = {
+ .enable_reg = 0x4f01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ahb_clk = {
+ .halt_reg = 0x5a014,
+ .clkr = {
+ .enable_reg = 0x5a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x56004,
+ .clkr = {
+ .enable_reg = 0x56004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cpp_ahb_clk = {
+ .halt_reg = 0x58040,
+ .clkr = {
+ .enable_reg = 0x58040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cpp_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cpp_clk = {
+ .halt_reg = 0x5803c,
+ .clkr = {
+ .enable_reg = 0x5803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cpp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cpp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe0_clk = {
+ .halt_reg = 0x58038,
+ .clkr = {
+ .enable_reg = 0x58038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_vfe0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe_ahb_clk = {
+ .halt_reg = 0x58044,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_vfe_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe_axi_clk = {
+ .halt_reg = 0x58048,
+ .clkr = {
+ .enable_reg = 0x58048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_vfe_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_gmem_clk = {
+ .halt_reg = 0x59024,
+ .clkr = {
+ .enable_reg = 0x59024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_gmem_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x08000,
+ .clkr = {
+ .enable_reg = 0x08000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x09000,
+ .clkr = {
+ .enable_reg = 0x09000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x0a000,
+ .clkr = {
+ .enable_reg = 0x0a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_ahb_clk = {
+ .halt_reg = 0x4d07c,
+ .clkr = {
+ .enable_reg = 0x4d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_axi_clk = {
+ .halt_reg = 0x4d080,
+ .clkr = {
+ .enable_reg = 0x4d080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_byte0_clk = {
+ .halt_reg = 0x4d094,
+ .clkr = {
+ .enable_reg = 0x4d094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_byte0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_byte1_clk = {
+ .halt_reg = 0x4d0a0,
+ .clkr = {
+ .enable_reg = 0x4d0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_byte1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_esc0_clk = {
+ .halt_reg = 0x4d098,
+ .clkr = {
+ .enable_reg = 0x4d098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_esc0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_esc1_clk = {
+ .halt_reg = 0x4d09c,
+ .clkr = {
+ .enable_reg = 0x4d09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_esc1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_mdp_clk = {
+ .halt_reg = 0x4D088,
+ .clkr = {
+ .enable_reg = 0x4D088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_mdp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_pclk0_clk = {
+ .halt_reg = 0x4d084,
+ .clkr = {
+ .enable_reg = 0x4d084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_pclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_pclk1_clk = {
+ .halt_reg = 0x4d0a4,
+ .clkr = {
+ .enable_reg = 0x4d0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_pclk1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_vsync_clk = {
+ .halt_reg = 0x4d090,
+ .clkr = {
+ .enable_reg = 0x4d090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_vsync_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x49000,
+ .clkr = {
+ .enable_reg = 0x49000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x49004,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_ahb_clk = {
+ .halt_reg = 0x59028,
+ .clkr = {
+ .enable_reg = 0x59028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_gfx3d_clk = {
+ .halt_reg = 0x59020,
+ .clkr = {
+ .enable_reg = 0x59020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_gfx3d_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4400c,
+ .clkr = {
+ .enable_reg = 0x4400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x44004,
+ .clkr = {
+ .enable_reg = 0x44004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x4201c,
+ .clkr = {
+ .enable_reg = 0x4201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x42018,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x4301c,
+ .clkr = {
+ .enable_reg = 0x4301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x43018,
+ .clkr = {
+ .enable_reg = 0x43018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_tcu_clk = {
+ .halt_reg = 0x12018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_tcu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tcu_clk = {
+ .halt_reg = 0x12020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tcu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tbu_clk = {
+ .halt_reg = 0x12010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdp_tbu_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdp_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_tbu_clk = {
+ .halt_reg = 0x12014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vfe_tbu_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vfe_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_jpeg_tbu_clk = {
+ .halt_reg = 0x12034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_jpeg_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_smmu_cfg_clk = {
+ .halt_reg = 0x12038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_cfg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gtcu_ahb_clk = {
+ .halt_reg = 0x12044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gtcu_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpp_tbu_clk = {
+ .halt_reg = 0x12040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpp_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdp_rt_tbu_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdp_rt_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x31024,
+ .clkr = {
+ .enable_reg = 0x31024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gfx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &bimc_gpu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gpu_clk = {
+ .halt_reg = 0x31040,
+ .clkr = {
+ .enable_reg = 0x31040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gpu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &bimc_gpu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+ .halt_reg = 0x4102c,
+ .clkr = {
+ .enable_reg = 0x4102c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2a_phy_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_fs_ahb_clk = {
+ .halt_reg = 0x3f008,
+ .clkr = {
+ .enable_reg = 0x3f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_fs_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_fs_ic_clk = {
+ .halt_reg = 0x3f030,
+ .clkr = {
+ .enable_reg = 0x3f030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_fs_ic_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &usb_fs_ic_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_fs_system_clk = {
+ .halt_reg = 0x3f004,
+ .clkr = {
+ .enable_reg = 0x3f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_fs_system_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &usb_fs_system_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+ .halt_reg = 0x41008,
+ .clkr = {
+ .enable_reg = 0x41008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x41004,
+ .clkr = {
+ .enable_reg = 0x41004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_system_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &usb_hs_system_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_ahb_clk = {
+ .halt_reg = 0x4c020,
+ .clkr = {
+ .enable_reg = 0x4c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_axi_clk = {
+ .halt_reg = 0x4c024,
+ .clkr = {
+ .enable_reg = 0x4c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_vcodec0_clk = {
+ .halt_reg = 0x4c01c,
+ .clkr = {
+ .enable_reg = 0x4c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_vcodec0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
+ .halt_reg = 0x4c02c,
+ .clkr = {
+ .enable_reg = 0x4c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_core0_vcodec0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_core1_vcodec0_clk = {
+ .halt_reg = 0x4c034,
+ .clkr = {
+ .enable_reg = 0x4c034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_core1_vcodec0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_timer_clk = {
+ .halt_reg = 0x59040,
+ .clkr = {
+ .enable_reg = 0x59040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_timer_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x4c018,
+ .pd = {
+ .name = "venus",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x4d078,
+ .pd = {
+ .name = "mdss",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x5701c,
+ .pd = {
+ .name = "jpeg",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe_gdsc = {
+ .gdscr = 0x58034,
+ .pd = {
+ .name = "vfe",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x5901c,
+ .pd = {
+ .name = "oxili",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x4c028,
+ .pd = {
+ .name = "venus_core0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core1_gdsc = {
+ .gdscr = 0x4c030,
+ .pd = {
+ .name = "venus_core1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_msm8939_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_VOTE] = &gpll0_vote,
+ [BIMC_PLL] = &bimc_pll.clkr,
+ [BIMC_PLL_VOTE] = &bimc_pll_vote,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL1_VOTE] = &gpll1_vote,
+ [GPLL2] = &gpll2.clkr,
+ [GPLL2_VOTE] = &gpll2_vote,
+ [PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
+ [SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr,
+ [CAMSS_AHB_CLK_SRC] = &camss_ahb_clk_src.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [APSS_TCU_CLK_SRC] = &apss_tcu_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr,
+ [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE_AHB_CLK] = &gcc_camss_vfe_ahb_clk.clkr,
+ [GCC_CAMSS_VFE_AXI_CLK] = &gcc_camss_vfe_axi_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_OXILI_GMEM_CLK] = &gcc_oxili_gmem_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr,
+ [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [BIMC_DDR_CLK_SRC] = &bimc_ddr_clk_src.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr,
+ [BIMC_GPU_CLK_SRC] = &bimc_gpu_clk_src.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [ULTAUDIO_AHBFABRIC_CLK_SRC] = &ultaudio_ahbfabric_clk_src.clkr,
+ [ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC] = &ultaudio_lpaif_pri_i2s_clk_src.clkr,
+ [ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC] = &ultaudio_lpaif_sec_i2s_clk_src.clkr,
+ [ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC] = &ultaudio_lpaif_aux_i2s_clk_src.clkr,
+ [ULTAUDIO_XO_CLK_SRC] = &ultaudio_xo_clk_src.clkr,
+ [CODEC_DIGCODEC_CLK_SRC] = &codec_digcodec_clk_src.clkr,
+ [GCC_ULTAUDIO_PCNOC_MPORT_CLK] = &gcc_ultaudio_pcnoc_mport_clk.clkr,
+ [GCC_ULTAUDIO_PCNOC_SWAY_CLK] = &gcc_ultaudio_pcnoc_sway_clk.clkr,
+ [GCC_ULTAUDIO_AVSYNC_XO_CLK] = &gcc_ultaudio_avsync_xo_clk.clkr,
+ [GCC_ULTAUDIO_STC_XO_CLK] = &gcc_ultaudio_stc_xo_clk.clkr,
+ [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_clk.clkr,
+ [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_lpm_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK] = &gcc_ultaudio_lpaif_pri_i2s_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK] = &gcc_ultaudio_lpaif_sec_i2s_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK] = &gcc_ultaudio_lpaif_aux_i2s_clk.clkr,
+ [GCC_CODEC_DIGCODEC_CLK] = &gcc_codec_digcodec_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_VOTE] = &gpll3_vote,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_VOTE] = &gpll4_vote,
+ [GPLL5] = &gpll5.clkr,
+ [GPLL5_VOTE] = &gpll5_vote,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_VOTE] = &gpll6_vote,
+ [BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [GCC_MDSS_BYTE1_CLK] = &gcc_mdss_byte1_clk.clkr,
+ [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [GCC_MDSS_ESC1_CLK] = &gcc_mdss_esc1_clk.clkr,
+ [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [GCC_MDSS_PCLK1_CLK] = &gcc_mdss_pclk1_clk.clkr,
+ [GCC_GFX_TBU_CLK] = &gcc_gfx_tbu_clk.clkr,
+ [GCC_CPP_TBU_CLK] = &gcc_cpp_tbu_clk.clkr,
+ [GCC_MDP_RT_TBU_CLK] = &gcc_mdp_rt_tbu_clk.clkr,
+ [USB_FS_SYSTEM_CLK_SRC] = &usb_fs_system_clk_src.clkr,
+ [USB_FS_IC_CLK_SRC] = &usb_fs_ic_clk_src.clkr,
+ [GCC_USB_FS_AHB_CLK] = &gcc_usb_fs_ahb_clk.clkr,
+ [GCC_USB_FS_IC_CLK] = &gcc_usb_fs_ic_clk.clkr,
+ [GCC_USB_FS_SYSTEM_CLK] = &gcc_usb_fs_system_clk.clkr,
+ [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr,
+ [GCC_VENUS0_CORE1_VCODEC0_CLK] = &gcc_venus0_core1_vcodec0_clk.clkr,
+ [GCC_OXILI_TIMER_CLK] = &gcc_oxili_timer_clk.clkr,
+};
+
+static struct gdsc *gcc_msm8939_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [VFE_GDSC] = &vfe_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_CORE1_GDSC] = &venus_core1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_msm8939_resets[] = {
+ [GCC_BLSP1_BCR] = { 0x01000 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x02000 },
+ [GCC_BLSP1_UART1_BCR] = { 0x02038 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x03008 },
+ [GCC_BLSP1_UART2_BCR] = { 0x03028 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x04018 },
+ [GCC_BLSP1_UART3_BCR] = { 0x04038 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x05018 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x06018 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x07018 },
+ [GCC_IMEM_BCR] = { 0x0e000 },
+ [GCC_SMMU_BCR] = { 0x12000 },
+ [GCC_APSS_TCU_BCR] = { 0x12050 },
+ [GCC_SMMU_XPU_BCR] = { 0x12054 },
+ [GCC_PCNOC_TBU_BCR] = { 0x12058 },
+ [GCC_PRNG_BCR] = { 0x13000 },
+ [GCC_BOOT_ROM_BCR] = { 0x13008 },
+ [GCC_CRYPTO_BCR] = { 0x16000 },
+ [GCC_SEC_CTRL_BCR] = { 0x1a000 },
+ [GCC_AUDIO_CORE_BCR] = { 0x1c008 },
+ [GCC_ULT_AUDIO_BCR] = { 0x1c0b4 },
+ [GCC_DEHR_BCR] = { 0x1f000 },
+ [GCC_SYSTEM_NOC_BCR] = { 0x26000 },
+ [GCC_PCNOC_BCR] = { 0x27018 },
+ [GCC_TCSR_BCR] = { 0x28000 },
+ [GCC_QDSS_BCR] = { 0x29000 },
+ [GCC_DCD_BCR] = { 0x2a000 },
+ [GCC_MSG_RAM_BCR] = { 0x2b000 },
+ [GCC_MPM_BCR] = { 0x2c000 },
+ [GCC_SPMI_BCR] = { 0x2e000 },
+ [GCC_SPDM_BCR] = { 0x2f000 },
+ [GCC_MM_SPDM_BCR] = { 0x2f024 },
+ [GCC_BIMC_BCR] = { 0x31000 },
+ [GCC_RBCPR_BCR] = { 0x33000 },
+ [GCC_TLMM_BCR] = { 0x34000 },
+ [GCC_CAMSS_CSI2_BCR] = { 0x3c038 },
+ [GCC_CAMSS_CSI2PHY_BCR] = { 0x3c044 },
+ [GCC_CAMSS_CSI2RDI_BCR] = { 0x3c04c },
+ [GCC_CAMSS_CSI2PIX_BCR] = { 0x3c054 },
+ [GCC_USB_FS_BCR] = { 0x3f000 },
+ [GCC_USB_HS_BCR] = { 0x41000 },
+ [GCC_USB2A_PHY_BCR] = { 0x41028 },
+ [GCC_SDCC1_BCR] = { 0x42000 },
+ [GCC_SDCC2_BCR] = { 0x43000 },
+ [GCC_PDM_BCR] = { 0x44000 },
+ [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x47000 },
+ [GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x48000 },
+ [GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x48008 },
+ [GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x48010 },
+ [GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x48018 },
+ [GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x48020 },
+ [GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x48028 },
+ [GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x48030 },
+ [GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x48038 },
+ [GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x48040 },
+ [GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x48048 },
+ [GCC_MMSS_BCR] = { 0x4b000 },
+ [GCC_VENUS0_BCR] = { 0x4c014 },
+ [GCC_MDSS_BCR] = { 0x4d074 },
+ [GCC_CAMSS_PHY0_BCR] = { 0x4e018 },
+ [GCC_CAMSS_CSI0_BCR] = { 0x4e038 },
+ [GCC_CAMSS_CSI0PHY_BCR] = { 0x4e044 },
+ [GCC_CAMSS_CSI0RDI_BCR] = { 0x4e04c },
+ [GCC_CAMSS_CSI0PIX_BCR] = { 0x4e054 },
+ [GCC_CAMSS_PHY1_BCR] = { 0x4f018 },
+ [GCC_CAMSS_CSI1_BCR] = { 0x4f038 },
+ [GCC_CAMSS_CSI1PHY_BCR] = { 0x4f044 },
+ [GCC_CAMSS_CSI1RDI_BCR] = { 0x4f04c },
+ [GCC_CAMSS_CSI1PIX_BCR] = { 0x4f054 },
+ [GCC_CAMSS_ISPIF_BCR] = { 0x50000 },
+ [GCC_BLSP1_QUP4_SPI_APPS_CBCR] = { 0x0501c },
+ [GCC_CAMSS_CCI_BCR] = { 0x51014 },
+ [GCC_CAMSS_MCLK0_BCR] = { 0x52014 },
+ [GCC_CAMSS_MCLK1_BCR] = { 0x53014 },
+ [GCC_CAMSS_GP0_BCR] = { 0x54014 },
+ [GCC_CAMSS_GP1_BCR] = { 0x55014 },
+ [GCC_CAMSS_TOP_BCR] = { 0x56000 },
+ [GCC_CAMSS_MICRO_BCR] = { 0x56008 },
+ [GCC_CAMSS_JPEG_BCR] = { 0x57018 },
+ [GCC_CAMSS_VFE_BCR] = { 0x58030 },
+ [GCC_CAMSS_CSI_VFE0_BCR] = { 0x5804c },
+ [GCC_OXILI_BCR] = { 0x59018 },
+ [GCC_GMEM_BCR] = { 0x5902c },
+ [GCC_CAMSS_AHB_BCR] = { 0x5a018 },
+ [GCC_CAMSS_MCLK2_BCR] = { 0x5c014 },
+ [GCC_MDP_TBU_BCR] = { 0x62000 },
+ [GCC_GFX_TBU_BCR] = { 0x63000 },
+ [GCC_GFX_TCU_BCR] = { 0x64000 },
+ [GCC_MSS_TBU_AXI_BCR] = { 0x65000 },
+ [GCC_MSS_TBU_GSS_AXI_BCR] = { 0x66000 },
+ [GCC_MSS_TBU_Q6_AXI_BCR] = { 0x67000 },
+ [GCC_GTCU_AHB_BCR] = { 0x68000 },
+ [GCC_SMMU_CFG_BCR] = { 0x69000 },
+ [GCC_VFE_TBU_BCR] = { 0x6a000 },
+ [GCC_VENUS_TBU_BCR] = { 0x6b000 },
+ [GCC_JPEG_TBU_BCR] = { 0x6c000 },
+ [GCC_PRONTO_TBU_BCR] = { 0x6d000 },
+ [GCC_CPP_TBU_BCR] = { 0x6e000 },
+ [GCC_MDP_RT_TBU_BCR] = { 0x6f000 },
+ [GCC_SMMU_CATS_BCR] = { 0x7c000 },
+};
+
+static const struct regmap_config gcc_msm8939_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_msm8939_desc = {
+ .config = &gcc_msm8939_regmap_config,
+ .clks = gcc_msm8939_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8939_clocks),
+ .resets = gcc_msm8939_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8939_resets),
+ .gdscs = gcc_msm8939_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8939_gdscs),
+};
+
+static const struct of_device_id gcc_msm8939_match_table[] = {
+ { .compatible = "qcom,gcc-msm8939" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8939_match_table);
+
+static int gcc_msm8939_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_msm8939_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_pll_configure_sr_hpm_lp(&gpll3, regmap, &gpll3_config, true);
+ clk_pll_configure_sr_hpm_lp(&gpll4, regmap, &gpll4_config, true);
+
+ return qcom_cc_really_probe(pdev, &gcc_msm8939_desc, regmap);
+}
+
+static struct platform_driver gcc_msm8939_driver = {
+ .probe = gcc_msm8939_probe,
+ .driver = {
+ .name = "gcc-msm8939",
+ .of_match_table = gcc_msm8939_match_table,
+ },
+};
+
+static int __init gcc_msm8939_init(void)
+{
+ return platform_driver_register(&gcc_msm8939_driver);
+}
+core_initcall(gcc_msm8939_init);
+
+static void __exit gcc_msm8939_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8939_driver);
+}
+module_exit(gcc_msm8939_exit);
+
+MODULE_DESCRIPTION("Qualcomm GCC MSM8939 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index df1d7056436c..9d7016bcd680 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -1110,6 +1110,27 @@ static struct clk_rcg2 ufs_axi_clk_src = {
},
};
+static const struct freq_tbl ftbl_ufs_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_unipro_core_clk_src = {
+ .cmd_rcgr = 0x76028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_ufs_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
@@ -2549,6 +2570,11 @@ static struct clk_branch gcc_ufs_unipro_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "ufs_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2904,6 +2930,7 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
[SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr,
[TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr,
[UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
+ [UFS_UNIPRO_CORE_CLK_SRC] = &ufs_unipro_core_clk_src.clkr,
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
[USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index 6a51b5b5fc19..ca4383e3a02a 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -390,6 +390,7 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GPLL6_OUT_MAIN, 7.5, 0, 0),
F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
@@ -405,8 +406,8 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.name = "gcc_qupv3_wrap0_s0_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -414,15 +415,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
.cmd_rcgr = 0x17034,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.name = "gcc_qupv3_wrap0_s1_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -430,15 +431,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
.cmd_rcgr = 0x17164,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.name = "gcc_qupv3_wrap0_s2_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -446,15 +447,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
.cmd_rcgr = 0x17294,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.name = "gcc_qupv3_wrap0_s3_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -462,15 +463,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
.cmd_rcgr = 0x173c4,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.name = "gcc_qupv3_wrap0_s4_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -478,15 +479,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
.cmd_rcgr = 0x174f4,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.name = "gcc_qupv3_wrap0_s5_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -494,15 +495,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
.cmd_rcgr = 0x17624,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.name = "gcc_qupv3_wrap1_s0_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -510,15 +511,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
.cmd_rcgr = 0x18018,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.name = "gcc_qupv3_wrap1_s1_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -526,15 +527,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
.cmd_rcgr = 0x18148,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.name = "gcc_qupv3_wrap1_s2_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -542,15 +543,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
.cmd_rcgr = 0x18278,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.name = "gcc_qupv3_wrap1_s3_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -558,15 +559,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
.cmd_rcgr = 0x183a8,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.name = "gcc_qupv3_wrap1_s4_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -574,15 +575,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
.cmd_rcgr = 0x184d8,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.name = "gcc_qupv3_wrap1_s5_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -590,7 +591,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
.cmd_rcgr = 0x18608,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
};
@@ -816,6 +817,26 @@ static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_sec_ctrl_clk_src[] = {
+ F(4800000, P_BI_TCXO, 4, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sec_ctrl_clk_src = {
+ .cmd_rcgr = 0x3d030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_sec_ctrl_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sec_ctrl_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
.halt_reg = 0x82024,
.halt_check = BRANCH_HALT_DELAY,
@@ -2406,6 +2427,7 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GCC_MSS_NAV_AXI_CLK] = &gcc_mss_nav_axi_clk.clkr,
[GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
[GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_SEC_CTRL_CLK_SRC] = &gcc_sec_ctrl_clk_src.clkr,
};
static const struct qcom_reset_map gcc_sc7180_resets[] = {
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index 732bc7c937e6..72524cf11048 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -1616,6 +1616,36 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
},
};
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gpu_gpll0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_gpu_iref_clk = {
.halt_reg = 0x8c010,
.halt_check = BRANCH_HALT,
@@ -1698,6 +1728,36 @@ static struct clk_branch gcc_npu_cfg_ahb_clk = {
},
};
+static struct clk_branch gcc_npu_gpll0_clk_src = {
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_div_clk_src = {
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_npu_gpll0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_npu_trig_clk = {
.halt_reg = 0x4d00c,
.halt_check = BRANCH_VOTED,
@@ -2812,6 +2872,45 @@ static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = {
},
};
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x7501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x750ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_ufs_card_unipro_core_clk = {
.halt_reg = 0x75058,
.halt_check = BRANCH_HALT,
@@ -2992,6 +3091,45 @@ static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
},
};
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x770ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
.halt_reg = 0x77058,
.halt_check = BRANCH_HALT,
@@ -3374,12 +3512,16 @@ static struct clk_regmap *gcc_sm8150_clocks[] = {
[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
[GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
[GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
[GCC_NPU_AT_CLK] = &gcc_npu_at_clk.clkr,
[GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
[GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr,
+ [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr,
+ [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr,
[GCC_NPU_TRIG_CLK] = &gcc_npu_trig_clk.clkr,
[GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr,
[GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr,
@@ -3484,6 +3626,9 @@ static struct clk_regmap *gcc_sm8150_clocks[] = {
[GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
[GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] =
&gcc_ufs_card_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
[GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
[GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] =
&gcc_ufs_card_unipro_core_clk_src.clkr,
@@ -3501,6 +3646,9 @@ static struct clk_regmap *gcc_sm8150_clocks[] = {
[GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
[GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
[GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
[GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
[GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
&gcc_ufs_phy_unipro_core_clk_src.clkr,
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index a250f59708d8..04944f11659b 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -11,6 +11,7 @@
#include <linux/ktime.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include "gdsc.h"
@@ -112,6 +113,12 @@ static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
int ret;
u32 val = (status == GDSC_ON) ? 0 : SW_COLLAPSE_MASK;
+ if (status == GDSC_ON && sc->rsupply) {
+ ret = regulator_enable(sc->rsupply);
+ if (ret < 0)
+ return ret;
+ }
+
ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
if (ret)
return ret;
@@ -143,6 +150,13 @@ static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
ret = gdsc_poll_status(sc, status);
WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n");
+
+ if (!ret && status == GDSC_OFF && sc->rsupply) {
+ ret = regulator_disable(sc->rsupply);
+ if (ret < 0)
+ return ret;
+ }
+
return ret;
}
@@ -371,6 +385,15 @@ int gdsc_register(struct gdsc_desc *desc,
if (!data->domains)
return -ENOMEM;
+ for (i = 0; i < num; i++) {
+ if (!scs[i] || !scs[i]->supply)
+ continue;
+
+ scs[i]->rsupply = devm_regulator_get(dev, scs[i]->supply);
+ if (IS_ERR(scs[i]->rsupply))
+ return PTR_ERR(scs[i]->rsupply);
+ }
+
data->num_domains = num;
for (i = 0; i < num; i++) {
if (!scs[i])
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 64cdc8cf0d4d..c36fc26dcdff 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -10,6 +10,7 @@
#include <linux/pm_domain.h>
struct regmap;
+struct regulator;
struct reset_controller_dev;
/**
@@ -52,6 +53,9 @@ struct gdsc {
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
+
+ const char *supply;
+ struct regulator *rsupply;
};
struct gdsc_desc {
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 6c7592ddf8bb..3b3aac07fb2d 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -3064,7 +3064,9 @@ static struct gdsc gpu_gx_gdsc = {
.name = "gpu_gx",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &gpu_gdsc.pd,
.flags = CLAMP_IO,
+ .supply = "vdd-gfx",
};
static struct clk_regmap *mmcc_msm8996_clocks[] = {
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index ac2dd92ce2ef..9eb79bf90643 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -8,6 +8,7 @@ config CLK_RENESAS
select CLK_R7S9210 if ARCH_R7S9210
select CLK_R8A73A4 if ARCH_R8A73A4
select CLK_R8A7740 if ARCH_R8A7740
+ select CLK_R8A7742 if ARCH_R8A7742
select CLK_R8A7743 if ARCH_R8A7743 || ARCH_R8A7744
select CLK_R8A7745 if ARCH_R8A7745
select CLK_R8A77470 if ARCH_R8A77470
@@ -55,6 +56,10 @@ config CLK_R8A7740
select CLK_RENESAS_CPG_MSTP
select CLK_RENESAS_DIV6
+config CLK_R8A7742
+ bool "RZ/G1H clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN2_CPG
+
config CLK_R8A7743
bool "RZ/G1M clock support" if COMPILE_TEST
select CLK_RCAR_GEN2_CPG
@@ -90,12 +95,10 @@ config CLK_R8A7779
config CLK_R8A7790
bool "R-Car H2 clock support" if COMPILE_TEST
select CLK_RCAR_GEN2_CPG
- select CLK_RENESAS_DIV6
config CLK_R8A7791
bool "R-Car M2-W/N clock support" if COMPILE_TEST
select CLK_RCAR_GEN2_CPG
- select CLK_RENESAS_DIV6
config CLK_R8A7792
bool "R-Car V2H clock support" if COMPILE_TEST
@@ -104,7 +107,6 @@ config CLK_R8A7792
config CLK_R8A7794
bool "R-Car E2 clock support" if COMPILE_TEST
select CLK_RCAR_GEN2_CPG
- select CLK_RENESAS_DIV6
config CLK_R8A7795
bool "R-Car H3 clock support" if COMPILE_TEST
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 4a722bc5aac7..a4066f9b34ef 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_CLK_RZA1) += clk-rz.o
obj-$(CONFIG_CLK_R7S9210) += r7s9210-cpg-mssr.o
obj-$(CONFIG_CLK_R8A73A4) += clk-r8a73a4.o
obj-$(CONFIG_CLK_R8A7740) += clk-r8a7740.o
+obj-$(CONFIG_CLK_R8A7742) += r8a7742-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7743) += r8a7743-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7745) += r8a7745-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77470) += r8a77470-cpg-mssr.o
diff --git a/drivers/clk/renesas/r8a7742-cpg-mssr.c b/drivers/clk/renesas/r8a7742-cpg-mssr.c
new file mode 100644
index 000000000000..e919828668a4
--- /dev/null
+++ b/drivers/clk/renesas/r8a7742-cpg-mssr.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a7742 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a7742-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A7742_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_USB_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a7742_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("usb_extal", CLK_USB_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN2_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN2_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN2_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN2_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("z", R8A7742_CLK_Z, CLK_TYPE_GEN2_Z, CLK_PLL0),
+ DEF_BASE("lb", R8A7742_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
+ DEF_BASE("sdh", R8A7742_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
+ DEF_BASE("sd0", R8A7742_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
+ DEF_BASE("sd1", R8A7742_CLK_SD1, CLK_TYPE_GEN2_SD1, CLK_PLL1),
+ DEF_BASE("qspi", R8A7742_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
+ DEF_BASE("rcan", R8A7742_CLK_RCAN, CLK_TYPE_GEN2_RCAN, CLK_USB_EXTAL),
+
+ DEF_FIXED("z2", R8A7742_CLK_Z2, CLK_PLL1, 2, 1),
+ DEF_FIXED("zg", R8A7742_CLK_ZG, CLK_PLL1, 3, 1),
+ DEF_FIXED("zx", R8A7742_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("zs", R8A7742_CLK_ZS, CLK_PLL1, 6, 1),
+ DEF_FIXED("hp", R8A7742_CLK_HP, CLK_PLL1, 12, 1),
+ DEF_FIXED("b", R8A7742_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("p", R8A7742_CLK_P, CLK_PLL1, 24, 1),
+ DEF_FIXED("cl", R8A7742_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("m2", R8A7742_CLK_M2, CLK_PLL1, 8, 1),
+ DEF_FIXED("zb3", R8A7742_CLK_ZB3, CLK_PLL3, 4, 1),
+ DEF_FIXED("zb3d2", R8A7742_CLK_ZB3D2, CLK_PLL3, 8, 1),
+ DEF_FIXED("ddr", R8A7742_CLK_DDR, CLK_PLL3, 8, 1),
+ DEF_FIXED("mp", R8A7742_CLK_MP, CLK_PLL1_DIV2, 15, 1),
+ DEF_FIXED("cp", R8A7742_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("r", R8A7742_CLK_R, CLK_PLL1, 49152, 1),
+ DEF_FIXED("osc", R8A7742_CLK_OSC, CLK_PLL1, 12288, 1),
+
+ DEF_DIV6P1("sd2", R8A7742_CLK_SD2, CLK_PLL1_DIV2, 0x078),
+ DEF_DIV6P1("sd3", R8A7742_CLK_SD3, CLK_PLL1_DIV2, 0x26c),
+ DEF_DIV6P1("mmc0", R8A7742_CLK_MMC0, CLK_PLL1_DIV2, 0x240),
+ DEF_DIV6P1("mmc1", R8A7742_CLK_MMC1, CLK_PLL1_DIV2, 0x244),
+};
+
+static const struct mssr_mod_clk r8a7742_mod_clks[] __initconst = {
+ DEF_MOD("msiof0", 0, R8A7742_CLK_MP),
+ DEF_MOD("vcp1", 100, R8A7742_CLK_ZS),
+ DEF_MOD("vcp0", 101, R8A7742_CLK_ZS),
+ DEF_MOD("vpc1", 102, R8A7742_CLK_ZS),
+ DEF_MOD("vpc0", 103, R8A7742_CLK_ZS),
+ DEF_MOD("tmu1", 111, R8A7742_CLK_P),
+ DEF_MOD("3dg", 112, R8A7742_CLK_ZG),
+ DEF_MOD("2d-dmac", 115, R8A7742_CLK_ZS),
+ DEF_MOD("fdp1-2", 117, R8A7742_CLK_ZS),
+ DEF_MOD("fdp1-1", 118, R8A7742_CLK_ZS),
+ DEF_MOD("fdp1-0", 119, R8A7742_CLK_ZS),
+ DEF_MOD("tmu3", 121, R8A7742_CLK_P),
+ DEF_MOD("tmu2", 122, R8A7742_CLK_P),
+ DEF_MOD("cmt0", 124, R8A7742_CLK_R),
+ DEF_MOD("tmu0", 125, R8A7742_CLK_CP),
+ DEF_MOD("vsp1du1", 127, R8A7742_CLK_ZS),
+ DEF_MOD("vsp1du0", 128, R8A7742_CLK_ZS),
+ DEF_MOD("vsp1-sy", 131, R8A7742_CLK_ZS),
+ DEF_MOD("scifa2", 202, R8A7742_CLK_MP),
+ DEF_MOD("scifa1", 203, R8A7742_CLK_MP),
+ DEF_MOD("scifa0", 204, R8A7742_CLK_MP),
+ DEF_MOD("msiof2", 205, R8A7742_CLK_MP),
+ DEF_MOD("scifb0", 206, R8A7742_CLK_MP),
+ DEF_MOD("scifb1", 207, R8A7742_CLK_MP),
+ DEF_MOD("msiof1", 208, R8A7742_CLK_MP),
+ DEF_MOD("msiof3", 215, R8A7742_CLK_MP),
+ DEF_MOD("scifb2", 216, R8A7742_CLK_MP),
+ DEF_MOD("sys-dmac1", 218, R8A7742_CLK_ZS),
+ DEF_MOD("sys-dmac0", 219, R8A7742_CLK_ZS),
+ DEF_MOD("iic2", 300, R8A7742_CLK_HP),
+ DEF_MOD("tpu0", 304, R8A7742_CLK_CP),
+ DEF_MOD("mmcif1", 305, R8A7742_CLK_MMC1),
+ DEF_MOD("scif2", 310, R8A7742_CLK_P),
+ DEF_MOD("sdhi3", 311, R8A7742_CLK_SD3),
+ DEF_MOD("sdhi2", 312, R8A7742_CLK_SD2),
+ DEF_MOD("sdhi1", 313, R8A7742_CLK_SD1),
+ DEF_MOD("sdhi0", 314, R8A7742_CLK_SD0),
+ DEF_MOD("mmcif0", 315, R8A7742_CLK_MMC0),
+ DEF_MOD("iic0", 318, R8A7742_CLK_HP),
+ DEF_MOD("pciec", 319, R8A7742_CLK_MP),
+ DEF_MOD("iic1", 323, R8A7742_CLK_HP),
+ DEF_MOD("usb3.0", 328, R8A7742_CLK_MP),
+ DEF_MOD("cmt1", 329, R8A7742_CLK_R),
+ DEF_MOD("usbhs-dmac0", 330, R8A7742_CLK_HP),
+ DEF_MOD("usbhs-dmac1", 331, R8A7742_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A7742_CLK_R),
+ DEF_MOD("irqc", 407, R8A7742_CLK_CP),
+ DEF_MOD("intc-sys", 408, R8A7742_CLK_ZS),
+ DEF_MOD("audio-dmac1", 501, R8A7742_CLK_HP),
+ DEF_MOD("audio-dmac0", 502, R8A7742_CLK_HP),
+ DEF_MOD("thermal", 522, CLK_EXTAL),
+ DEF_MOD("pwm", 523, R8A7742_CLK_P),
+ DEF_MOD("usb-ehci", 703, R8A7742_CLK_MP),
+ DEF_MOD("usbhs", 704, R8A7742_CLK_HP),
+ DEF_MOD("hscif1", 716, R8A7742_CLK_ZS),
+ DEF_MOD("hscif0", 717, R8A7742_CLK_ZS),
+ DEF_MOD("scif1", 720, R8A7742_CLK_P),
+ DEF_MOD("scif0", 721, R8A7742_CLK_P),
+ DEF_MOD("du2", 722, R8A7742_CLK_ZX),
+ DEF_MOD("du1", 723, R8A7742_CLK_ZX),
+ DEF_MOD("du0", 724, R8A7742_CLK_ZX),
+ DEF_MOD("lvds1", 725, R8A7742_CLK_ZX),
+ DEF_MOD("lvds0", 726, R8A7742_CLK_ZX),
+ DEF_MOD("r-gp2d", 807, R8A7742_CLK_ZX),
+ DEF_MOD("vin3", 808, R8A7742_CLK_ZG),
+ DEF_MOD("vin2", 809, R8A7742_CLK_ZG),
+ DEF_MOD("vin1", 810, R8A7742_CLK_ZG),
+ DEF_MOD("vin0", 811, R8A7742_CLK_ZG),
+ DEF_MOD("etheravb", 812, R8A7742_CLK_HP),
+ DEF_MOD("ether", 813, R8A7742_CLK_P),
+ DEF_MOD("sata1", 814, R8A7742_CLK_ZS),
+ DEF_MOD("sata0", 815, R8A7742_CLK_ZS),
+ DEF_MOD("imr-x2-1", 820, R8A7742_CLK_ZG),
+ DEF_MOD("imr-x2-0", 821, R8A7742_CLK_HP),
+ DEF_MOD("imr-lsx2-1", 822, R8A7742_CLK_P),
+ DEF_MOD("imr-lsx2-0", 823, R8A7742_CLK_ZS),
+ DEF_MOD("gpio5", 907, R8A7742_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A7742_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A7742_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A7742_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A7742_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A7742_CLK_CP),
+ DEF_MOD("can1", 915, R8A7742_CLK_P),
+ DEF_MOD("can0", 916, R8A7742_CLK_P),
+ DEF_MOD("qspi_mod", 917, R8A7742_CLK_QSPI),
+ DEF_MOD("iicdvfs", 926, R8A7742_CLK_CP),
+ DEF_MOD("i2c3", 928, R8A7742_CLK_HP),
+ DEF_MOD("i2c2", 929, R8A7742_CLK_HP),
+ DEF_MOD("i2c1", 930, R8A7742_CLK_HP),
+ DEF_MOD("i2c0", 931, R8A7742_CLK_HP),
+ DEF_MOD("ssi-all", 1005, R8A7742_CLK_P),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A7742_CLK_P),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+};
+
+static const unsigned int r8a7742_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
+ MOD_CLK_ID(408), /* INTC-SYS (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *1
+ *---------------------------------------------------
+ * 0 0 0 15 x172/2 x208/2 x106
+ * 0 0 1 15 x172/2 x208/2 x88
+ * 0 1 0 20 x130/2 x156/2 x80
+ * 0 1 1 20 x130/2 x156/2 x66
+ * 1 0 0 26 / 2 x200/2 x240/2 x122
+ * 1 0 1 26 / 2 x200/2 x240/2 x102
+ * 1 1 0 30 / 2 x172/2 x208/2 x106
+ * 1 1 1 30 / 2 x172/2 x208/2 x88
+ *
+ * *1 : Table 7.5a indicates VCO output (PLLx = VCO/2)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+
+static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = {
+ /* EXTAL div PLL1 mult PLL3 mult */
+ { 1, 208, 106, },
+ { 1, 208, 88, },
+ { 1, 156, 80, },
+ { 1, 156, 66, },
+ { 2, 240, 122, },
+ { 2, 240, 102, },
+ { 2, 208, 106, },
+ { 2, 208, 88, },
+};
+
+static int __init r8a7742_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen2_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen2_cpg_init(cpg_pll_config, 2, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a7742_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a7742_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a7742_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a7742_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a7742_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a7742_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a7742_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a7742_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen2_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 1907ee195a08..d900f6bf53d0 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R9A09G032 clock driver
+ * R9A06G032 clock driver
*
* Copyright (C) 2018 Renesas Electronics Europe Limited
*
@@ -338,8 +338,8 @@ clk_rdesc_get(struct r9a06g032_priv *clocks,
}
/*
- * This implements the R9A09G032 clock gate 'driver'. We cannot use the system's
- * clock gate framework as the gates on the R9A09G032 have a special enabling
+ * This implements the R9A06G032 clock gate 'driver'. We cannot use the system's
+ * clock gate framework as the gates on the R9A06G032 have a special enabling
* sequence, therefore we use this little proxy.
*/
struct r9a06g032_clk_gate {
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index a2663fbbd7a5..dcb6e2706d37 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -673,6 +673,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r7s9210_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A7742
+ {
+ .compatible = "renesas,r8a7742-cpg-mssr",
+ .data = &r8a7742_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A7743
{
.compatible = "renesas,r8a7743-cpg-mssr",
@@ -812,7 +818,8 @@ static int cpg_mssr_suspend_noirq(struct device *dev)
/* Save module registers with bits under our control */
for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
if (priv->smstpcr_saved[reg].mask)
- priv->smstpcr_saved[reg].val =
+ priv->smstpcr_saved[reg].val = priv->stbyctrl ?
+ readb(priv->base + STBCR(reg)) :
readl(priv->base + SMSTPCR(reg));
}
@@ -872,8 +879,9 @@ static int cpg_mssr_resume_noirq(struct device *dev)
}
if (!i)
- dev_warn(dev, "Failed to enable SMSTP %p[0x%x]\n",
- priv->base + SMSTPCR(reg), oldval & mask);
+ dev_warn(dev, "Failed to enable %s%u[0x%x]\n",
+ priv->stbyctrl ? "STB" : "SMSTP", reg,
+ oldval & mask);
}
return 0;
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 3b852ba0ecec..55a18ef0efaf 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -155,6 +155,7 @@ struct cpg_mssr_info {
};
extern const struct cpg_mssr_info r7s9210_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a7742_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7743_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7745_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77470_cpg_mssr_info;
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index c9e5a1fb6653..fea33399a632 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -540,7 +540,7 @@ static const struct samsung_div_clock exynos5800_div_clks[] __initconst = {
static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
- GATE_BUS_TOP, 24, 0, 0),
+ GATE_BUS_TOP, 24, CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
};
@@ -943,25 +943,25 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
- GATE_BUS_TOP, 5, 0, 0),
+ GATE_BUS_TOP, 5, CLK_IS_CRITICAL, 0),
GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
- GATE_BUS_TOP, 8, 0, 0),
+ GATE_BUS_TOP, 8, CLK_IS_CRITICAL, 0),
GATE(CLK_PCLK66_GPIO, "pclk66_gpio", "mout_user_pclk66_gpio",
GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen",
GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk266_isp", "mout_user_aclk266_isp",
- GATE_BUS_TOP, 13, 0, 0),
+ GATE_BUS_TOP, 13, CLK_IS_CRITICAL, 0),
GATE(0, "aclk166", "mout_user_aclk166",
GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
- GATE_BUS_TOP, 16, 0, 0),
+ GATE_BUS_TOP, 16, CLK_IS_CRITICAL, 0),
GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
GATE_BUS_TOP, 17, CLK_IS_CRITICAL, 0),
GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
@@ -1161,9 +1161,11 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE_IP_GSCL1, 3, 0, 0),
GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "dout_gscl_blk_333",
GATE_IP_GSCL1, 4, 0, 0),
- GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, 0, 0),
- GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, 0, 0),
- GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "dout_gscl_blk_333",
+ GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12,
+ CLK_IS_CRITICAL, 0),
+ GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13,
+ CLK_IS_CRITICAL, 0),
+ GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3", "dout_gscl_blk_333",
GATE_IP_GSCL1, 16, 0, 0),
GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
GATE_IP_GSCL1, 17, 0, 0),
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 4b1aa9382ad2..6f29ecd0442e 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -1706,7 +1706,8 @@ static const struct samsung_gate_clock peric_gate_clks[] __initconst = {
GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric",
ENABLE_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_i2s1_peric",
- ENABLE_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_PERIC, 6,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC,
5, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC,
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index 5f30fe72cd51..c7aba1e1af70 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -387,7 +387,7 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
ARRAY_SIZE(s3c2450_gates));
samsung_clk_register_alias(ctx, s3c2450_aliases,
ARRAY_SIZE(s3c2450_aliases));
- /* fall through - as s3c2450 extends the s3c2416 clocks */
+ fallthrough; /* as s3c2450 extends the s3c2416 clocks */
case S3C2416:
samsung_clk_register_div(ctx, s3c2416_dividers,
ARRAY_SIZE(s3c2416_dividers));
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
index ce5aa7802eb8..bf736f8d201a 100644
--- a/drivers/clk/socfpga/Makefile
+++ b/drivers/clk/socfpga/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_ARCH_SOCFPGA) += clk.o clk-gate.o clk-pll.o clk-periph.o
obj-$(CONFIG_ARCH_SOCFPGA) += clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o
obj-$(CONFIG_ARCH_STRATIX10) += clk-s10.o
obj-$(CONFIG_ARCH_STRATIX10) += clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o
+obj-$(CONFIG_ARCH_AGILEX) += clk-agilex.o
+obj-$(CONFIG_ARCH_AGILEX) += clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o
diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
new file mode 100644
index 000000000000..699527f7e764
--- /dev/null
+++ b/drivers/clk/socfpga/clk-agilex.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019, Intel Corporation
+ */
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/agilex-clock.h>
+
+#include "stratix10-clk.h"
+
+static const struct clk_parent_data pll_mux[] = {
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data cntr_mux[] = {
+ { .fw_name = "main_pll",
+ .name = "main_pll", },
+ { .fw_name = "periph_pll",
+ .name = "periph_pll", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data boot_mux[] = {
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+};
+
+static const struct clk_parent_data mpu_free_mux[] = {
+ { .fw_name = "main_pll_c0",
+ .name = "main_pll_c0", },
+ { .fw_name = "peri_pll_c0",
+ .name = "peri_pll_c0", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data noc_free_mux[] = {
+ { .fw_name = "main_pll_c1",
+ .name = "main_pll_c1", },
+ { .fw_name = "peri_pll_c1",
+ .name = "peri_pll_c1", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data emaca_free_mux[] = {
+ { .fw_name = "main_pll_c2",
+ .name = "main_pll_c2", },
+ { .fw_name = "peri_pll_c2",
+ .name = "peri_pll_c2", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data emacb_free_mux[] = {
+ { .fw_name = "main_pll_c3",
+ .name = "main_pll_c3", },
+ { .fw_name = "peri_pll_c3",
+ .name = "peri_pll_c3", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data emac_ptp_free_mux[] = {
+ { .fw_name = "main_pll_c3",
+ .name = "main_pll_c3", },
+ { .fw_name = "peri_pll_c3",
+ .name = "peri_pll_c3", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data gpio_db_free_mux[] = {
+ { .fw_name = "main_pll_c3",
+ .name = "main_pll_c3", },
+ { .fw_name = "peri_pll_c3",
+ .name = "peri_pll_c3", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data psi_ref_free_mux[] = {
+ { .fw_name = "main_pll_c3",
+ .name = "main_pll_c3", },
+ { .fw_name = "peri_pll_c3",
+ .name = "peri_pll_c3", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data sdmmc_free_mux[] = {
+ { .fw_name = "main_pll_c3",
+ .name = "main_pll_c3", },
+ { .fw_name = "peri_pll_c3",
+ .name = "peri_pll_c3", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data s2f_usr0_free_mux[] = {
+ { .fw_name = "main_pll_c2",
+ .name = "main_pll_c2", },
+ { .fw_name = "peri_pll_c2",
+ .name = "peri_pll_c2", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data s2f_usr1_free_mux[] = {
+ { .fw_name = "main_pll_c2",
+ .name = "main_pll_c2", },
+ { .fw_name = "peri_pll_c2",
+ .name = "peri_pll_c2", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data mpu_mux[] = {
+ { .fw_name = "mpu_free_clk",
+ .name = "mpu_free_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data s2f_usr0_mux[] = {
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data emac_mux[] = {
+ { .fw_name = "emaca_free_clk",
+ .name = "emaca_free_clk", },
+ { .fw_name = "emacb_free_clk",
+ .name = "emacb_free_clk", },
+};
+
+static const struct clk_parent_data noc_mux[] = {
+ { .fw_name = "noc_free_clk",
+ .name = "noc_free_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+/* clocks in AO (always on) controller */
+static const struct stratix10_pll_clock agilex_pll_clks[] = {
+ { AGILEX_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
+ 0x0},
+ { AGILEX_MAIN_PLL_CLK, "main_pll", pll_mux, ARRAY_SIZE(pll_mux),
+ 0, 0x48},
+ { AGILEX_PERIPH_PLL_CLK, "periph_pll", pll_mux, ARRAY_SIZE(pll_mux),
+ 0, 0x9c},
+};
+
+static const struct stratix10_perip_c_clock agilex_main_perip_c_clks[] = {
+ { AGILEX_MAIN_PLL_C0_CLK, "main_pll_c0", "main_pll", NULL, 1, 0, 0x58},
+ { AGILEX_MAIN_PLL_C1_CLK, "main_pll_c1", "main_pll", NULL, 1, 0, 0x5C},
+ { AGILEX_MAIN_PLL_C2_CLK, "main_pll_c2", "main_pll", NULL, 1, 0, 0x64},
+ { AGILEX_MAIN_PLL_C3_CLK, "main_pll_c3", "main_pll", NULL, 1, 0, 0x68},
+ { AGILEX_PERIPH_PLL_C0_CLK, "peri_pll_c0", "periph_pll", NULL, 1, 0, 0xAC},
+ { AGILEX_PERIPH_PLL_C1_CLK, "peri_pll_c1", "periph_pll", NULL, 1, 0, 0xB0},
+ { AGILEX_PERIPH_PLL_C2_CLK, "peri_pll_c2", "periph_pll", NULL, 1, 0, 0xB8},
+ { AGILEX_PERIPH_PLL_C3_CLK, "peri_pll_c3", "periph_pll", NULL, 1, 0, 0xBC},
+};
+
+static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = {
+ { AGILEX_MPU_FREE_CLK, "mpu_free_clk", NULL, mpu_free_mux, ARRAY_SIZE(mpu_free_mux),
+ 0, 0x3C, 0, 0, 0},
+ { AGILEX_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
+ 0, 0x40, 0, 0, 1},
+ { AGILEX_L4_SYS_FREE_CLK, "l4_sys_free_clk", "noc_free_clk", NULL, 1, 0,
+ 0, 4, 0, 0},
+ { AGILEX_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
+ 0, 0, 0, 0x30, 1},
+ { AGILEX_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
+ 0, 0xD4, 0, 0x88, 0},
+ { AGILEX_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
+ 0, 0xD8, 0, 0x88, 1},
+ { AGILEX_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
+ ARRAY_SIZE(emac_ptp_free_mux), 0, 0xDC, 0, 0x88, 2},
+ { AGILEX_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
+ ARRAY_SIZE(gpio_db_free_mux), 0, 0xE0, 0, 0x88, 3},
+ { AGILEX_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
+ ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0x88, 4},
+ { AGILEX_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", NULL, s2f_usr0_free_mux,
+ ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0, 0},
+ { AGILEX_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, s2f_usr1_free_mux,
+ ARRAY_SIZE(s2f_usr1_free_mux), 0, 0xEC, 0, 0x88, 5},
+ { AGILEX_PSI_REF_FREE_CLK, "psi_ref_free_clk", NULL, psi_ref_free_mux,
+ ARRAY_SIZE(psi_ref_free_mux), 0, 0xF0, 0, 0x88, 6},
+};
+
+static const struct stratix10_gate_clock agilex_gate_clks[] = {
+ { AGILEX_MPU_CLK, "mpu_clk", NULL, mpu_mux, ARRAY_SIZE(mpu_mux), 0, 0x24,
+ 0, 0, 0, 0, 0x30, 0, 0},
+ { AGILEX_MPU_PERIPH_CLK, "mpu_periph_clk", "mpu_clk", NULL, 1, 0, 0x24,
+ 0, 0, 0, 0, 0, 0, 4},
+ { AGILEX_MPU_L2RAM_CLK, "mpu_l2ram_clk", "mpu_clk", NULL, 1, 0, 0x24,
+ 0, 0, 0, 0, 0, 0, 2},
+ { AGILEX_L4_MAIN_CLK, "l4_main_clk", "noc_clk", NULL, 1, 0, 0x24,
+ 1, 0x44, 0, 2, 0, 0, 0},
+ { AGILEX_L4_MP_CLK, "l4_mp_clk", "noc_clk", NULL, 1, 0, 0x24,
+ 2, 0x44, 8, 2, 0, 0, 0},
+ /*
+ * The l4_sp_clk feeds a 100 MHz clock to various peripherals, one of them
+ * being the SP timers, thus cannot get gated.
+ */
+ { AGILEX_L4_SP_CLK, "l4_sp_clk", "noc_clk", NULL, 1, CLK_IS_CRITICAL, 0x24,
+ 3, 0x44, 16, 2, 0, 0, 0},
+ { AGILEX_CS_AT_CLK, "cs_at_clk", "noc_clk", NULL, 1, 0, 0x24,
+ 4, 0x44, 24, 2, 0, 0, 0},
+ { AGILEX_CS_TRACE_CLK, "cs_trace_clk", "noc_clk", NULL, 1, 0, 0x24,
+ 4, 0x44, 26, 2, 0, 0, 0},
+ { AGILEX_CS_PDBG_CLK, "cs_pdbg_clk", "cs_at_clk", NULL, 1, 0, 0x24,
+ 4, 0x44, 28, 1, 0, 0, 0},
+ { AGILEX_CS_TIMER_CLK, "cs_timer_clk", "noc_clk", NULL, 1, 0, 0x24,
+ 5, 0, 0, 0, 0, 0, 0},
+ { AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x24,
+ 6, 0, 0, 0, 0, 0, 0},
+ { AGILEX_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
+ 0, 0, 0, 0, 0x94, 26, 0},
+ { AGILEX_EMAC1_CLK, "emac1_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
+ 1, 0, 0, 0, 0x94, 27, 0},
+ { AGILEX_EMAC2_CLK, "emac2_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
+ 2, 0, 0, 0, 0x94, 28, 0},
+ { AGILEX_EMAC_PTP_CLK, "emac_ptp_clk", "emac_ptp_free_clk", NULL, 1, 0, 0x7C,
+ 3, 0, 0, 0, 0, 0, 0},
+ { AGILEX_GPIO_DB_CLK, "gpio_db_clk", "gpio_db_free_clk", NULL, 1, 0, 0x7C,
+ 4, 0x98, 0, 16, 0, 0, 0},
+ { AGILEX_SDMMC_CLK, "sdmmc_clk", "sdmmc_free_clk", NULL, 1, 0, 0x7C,
+ 5, 0, 0, 0, 0, 0, 4},
+ { AGILEX_S2F_USER1_CLK, "s2f_user1_clk", "s2f_user1_free_clk", NULL, 1, 0, 0x7C,
+ 6, 0, 0, 0, 0, 0, 0},
+ { AGILEX_PSI_REF_CLK, "psi_ref_clk", "psi_ref_free_clk", NULL, 1, 0, 0x7C,
+ 7, 0, 0, 0, 0, 0, 0},
+ { AGILEX_USB_CLK, "usb_clk", "l4_mp_clk", NULL, 1, 0, 0x7C,
+ 8, 0, 0, 0, 0, 0, 0},
+ { AGILEX_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0x7C,
+ 9, 0, 0, 0, 0, 0, 0},
+ { AGILEX_NAND_CLK, "nand_clk", "l4_main_clk", NULL, 1, 0, 0x7C,
+ 10, 0, 0, 0, 0, 0, 0},
+};
+
+static int agilex_clk_register_c_perip(const struct stratix10_perip_c_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_periph(&clks[i], base);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+ return 0;
+}
+
+static int agilex_clk_register_cnt_perip(const struct stratix10_perip_cnt_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_cnt_periph(&clks[i], base);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
+static int agilex_clk_register_gate(const struct stratix10_gate_clock *clks, int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_gate(&clks[i], base);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
+static int agilex_clk_register_pll(const struct stratix10_pll_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = agilex_register_pll(&clks[i], base);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
+static struct stratix10_clock_data *__socfpga_agilex_clk_init(struct platform_device *pdev,
+ int nr_clks)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct stratix10_clock_data *clk_data;
+ struct clk **clk_table;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ clk_data = devm_kzalloc(dev, sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return ERR_PTR(-ENOMEM);
+
+ clk_data->base = base;
+ clk_table = devm_kcalloc(dev, nr_clks, sizeof(*clk_table), GFP_KERNEL);
+ if (!clk_table)
+ return ERR_PTR(-ENOMEM);
+
+ clk_data->clk_data.clks = clk_table;
+ clk_data->clk_data.clk_num = nr_clks;
+ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data->clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_data;
+}
+
+static int agilex_clkmgr_probe(struct platform_device *pdev)
+{
+ struct stratix10_clock_data *clk_data;
+
+ clk_data = __socfpga_agilex_clk_init(pdev, AGILEX_NUM_CLKS);
+ if (IS_ERR(clk_data))
+ return PTR_ERR(clk_data);
+
+ agilex_clk_register_pll(agilex_pll_clks, ARRAY_SIZE(agilex_pll_clks), clk_data);
+
+ agilex_clk_register_c_perip(agilex_main_perip_c_clks,
+ ARRAY_SIZE(agilex_main_perip_c_clks), clk_data);
+
+ agilex_clk_register_cnt_perip(agilex_main_perip_cnt_clks,
+ ARRAY_SIZE(agilex_main_perip_cnt_clks),
+ clk_data);
+
+ agilex_clk_register_gate(agilex_gate_clks, ARRAY_SIZE(agilex_gate_clks),
+ clk_data);
+ return 0;
+}
+
+static const struct of_device_id agilex_clkmgr_match_table[] = {
+ { .compatible = "intel,agilex-clkmgr",
+ .data = agilex_clkmgr_probe },
+ { }
+};
+
+static struct platform_driver agilex_clkmgr_driver = {
+ .probe = agilex_clkmgr_probe,
+ .driver = {
+ .name = "agilex-clkmgr",
+ .suppress_bind_attrs = true,
+ .of_match_table = agilex_clkmgr_match_table,
+ },
+};
+
+static int __init agilex_clk_init(void)
+{
+ return platform_driver_register(&agilex_clkmgr_driver);
+}
+core_initcall(agilex_clk_init);
diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
index 8be4722f6064..083b2ec21fdd 100644
--- a/drivers/clk/socfpga/clk-gate-s10.c
+++ b/drivers/clk/socfpga/clk-gate-s10.c
@@ -70,7 +70,6 @@ struct clk *s10_register_gate(const struct stratix10_gate_clock *clks, void __io
struct clk *clk;
struct socfpga_gate_clk *socfpga_clk;
struct clk_init_data init;
- const char * const *parent_names = clks->parent_names;
const char *parent_name = clks->parent_name;
socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
@@ -108,7 +107,9 @@ struct clk *s10_register_gate(const struct stratix10_gate_clock *clks, void __io
init.flags = clks->flags;
init.num_parents = clks->num_parents;
- init.parent_names = parent_names ? parent_names : &parent_name;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ if (init.parent_names == NULL)
+ init.parent_data = clks->parent_data;
socfpga_clk->hw.hw.init = &init;
clk = clk_register(NULL, &socfpga_clk->hw.hw);
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index dd6d4056e9de..397b77b89b16 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -81,7 +81,6 @@ struct clk *s10_register_periph(const struct stratix10_perip_c_clock *clks,
struct clk_init_data init;
const char *name = clks->name;
const char *parent_name = clks->parent_name;
- const char * const *parent_names = clks->parent_names;
periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
if (WARN_ON(!periph_clk))
@@ -94,7 +93,9 @@ struct clk *s10_register_periph(const struct stratix10_perip_c_clock *clks,
init.flags = clks->flags;
init.num_parents = clks->num_parents;
- init.parent_names = parent_names ? parent_names : &parent_name;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ if (init.parent_names == NULL)
+ init.parent_data = clks->parent_data;
periph_clk->hw.hw.init = &init;
@@ -114,7 +115,6 @@ struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *clks
struct clk_init_data init;
const char *name = clks->name;
const char *parent_name = clks->parent_name;
- const char * const *parent_names = clks->parent_names;
periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
if (WARN_ON(!periph_clk))
@@ -137,7 +137,9 @@ struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *clks
init.flags = clks->flags;
init.num_parents = clks->num_parents;
- init.parent_names = parent_names ? parent_names : &parent_name;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ if (init.parent_names == NULL)
+ init.parent_data = clks->parent_data;
periph_clk->hw.hw.init = &init;
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index 3816fc04b274..db54f7d806a0 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -58,7 +58,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
CLK_MGR_PLL_CLK_SRC_MASK;
}
-static struct clk_ops clk_pll_ops = {
+static const struct clk_ops clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
.get_parent = clk_pll_get_parent,
};
@@ -102,8 +102,6 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
pll_clk->hw.hw.init = &init;
pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
- clk_pll_ops.enable = clk_gate_ops.enable;
- clk_pll_ops.disable = clk_gate_ops.disable;
clk = clk_register(NULL, &pll_clk->hw.hw);
if (WARN_ON(IS_ERR(clk))) {
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index a301bb22f36c..4e268953b7da 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -18,8 +18,12 @@
#define SOCFPGA_PLL_RESET_MASK 0x2
#define SOCFPGA_PLL_REFDIV_MASK 0x00003F00
#define SOCFPGA_PLL_REFDIV_SHIFT 8
+#define SOCFPGA_PLL_AREFDIV_MASK 0x00000F00
+#define SOCFPGA_PLL_DREFDIV_MASK 0x00003000
+#define SOCFPGA_PLL_DREFDIV_SHIFT 12
#define SOCFPGA_PLL_MDIV_MASK 0xFF000000
#define SOCFPGA_PLL_MDIV_SHIFT 24
+#define SOCFPGA_AGILEX_PLL_MDIV_MASK 0x000003FF
#define SWCTRLBTCLKSEL_MASK 0x200
#define SWCTRLBTCLKSEL_SHIFT 9
@@ -27,6 +31,27 @@
#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
+static unsigned long agilex_clk_pll_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ unsigned long arefdiv, reg, mdiv;
+ unsigned long long vco_freq;
+
+ /* read VCO1 reg for numerator and denominator */
+ reg = readl(socfpgaclk->hw.reg);
+ arefdiv = (reg & SOCFPGA_PLL_AREFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT;
+
+ vco_freq = (unsigned long long)parent_rate / arefdiv;
+
+ /* Read mdiv and fdiv from the fdbck register */
+ reg = readl(socfpgaclk->hw.reg + 0x24);
+ mdiv = reg & SOCFPGA_AGILEX_PLL_MDIV_MASK;
+
+ vco_freq = (unsigned long long)vco_freq * mdiv;
+ return (unsigned long)vco_freq;
+}
+
static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
@@ -98,13 +123,19 @@ static int clk_pll_prepare(struct clk_hw *hwclk)
return 0;
}
-static struct clk_ops clk_pll_ops = {
+static const struct clk_ops agilex_clk_pll_ops = {
+ .recalc_rate = agilex_clk_pll_recalc_rate,
+ .get_parent = clk_pll_get_parent,
+ .prepare = clk_pll_prepare,
+};
+
+static const struct clk_ops clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
.get_parent = clk_pll_get_parent,
.prepare = clk_pll_prepare,
};
-static struct clk_ops clk_boot_ops = {
+static const struct clk_ops clk_boot_ops = {
.recalc_rate = clk_boot_clk_recalc_rate,
.get_parent = clk_boot_get_parent,
.prepare = clk_pll_prepare,
@@ -117,7 +148,6 @@ struct clk *s10_register_pll(const struct stratix10_pll_clock *clks,
struct socfpga_pll *pll_clk;
struct clk_init_data init;
const char *name = clks->name;
- const char * const *parent_names = clks->parent_names;
pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
if (WARN_ON(!pll_clk))
@@ -134,12 +164,48 @@ struct clk *s10_register_pll(const struct stratix10_pll_clock *clks,
init.flags = clks->flags;
init.num_parents = clks->num_parents;
- init.parent_names = parent_names;
+ init.parent_names = NULL;
+ init.parent_data = clks->parent_data;
+ pll_clk->hw.hw.init = &init;
+
+ pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER;
+
+ clk = clk_register(NULL, &pll_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(pll_clk);
+ return NULL;
+ }
+ return clk;
+}
+
+struct clk *agilex_register_pll(const struct stratix10_pll_clock *clks,
+ void __iomem *reg)
+{
+ struct clk *clk;
+ struct socfpga_pll *pll_clk;
+ struct clk_init_data init;
+ const char *name = clks->name;
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (WARN_ON(!pll_clk))
+ return NULL;
+
+ pll_clk->hw.reg = reg + clks->offset;
+
+ if (streq(name, SOCFPGA_BOOT_CLK))
+ init.ops = &clk_boot_ops;
+ else
+ init.ops = &agilex_clk_pll_ops;
+
+ init.name = name;
+ init.flags = clks->flags;
+
+ init.num_parents = clks->num_parents;
+ init.parent_names = NULL;
+ init.parent_data = clks->parent_data;
pll_clk->hw.hw.init = &init;
pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER;
- clk_pll_ops.enable = clk_gate_ops.enable;
- clk_pll_ops.disable = clk_gate_ops.disable;
clk = clk_register(NULL, &pll_clk->hw.hw);
if (WARN_ON(IS_ERR(clk))) {
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index dc65cc0fd3bd..e5fb786843f3 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -65,7 +65,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
CLK_MGR_PLL_CLK_SRC_MASK;
}
-static struct clk_ops clk_pll_ops = {
+static const struct clk_ops clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
.get_parent = clk_pll_get_parent,
};
@@ -105,8 +105,6 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
pll_clk->hw.hw.init = &init;
pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
- clk_pll_ops.enable = clk_gate_ops.enable;
- clk_pll_ops.disable = clk_gate_ops.disable;
clk = clk_register(NULL, &pll_clk->hw.hw);
if (WARN_ON(IS_ERR(clk))) {
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index dea7c6c7d269..c1dfc9b34e4e 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -12,35 +12,137 @@
#include "stratix10-clk.h"
-static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
- "f2s-free-clk",};
-static const char * const cntr_mux[] = { "main_pll", "periph_pll",
- "osc1", "cb-intosc-hs-div2-clk",
- "f2s-free-clk"};
-static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
-
-static const char * const noc_free_mux[] = {"main_noc_base_clk",
- "peri_noc_base_clk",
- "osc1", "cb-intosc-hs-div2-clk",
- "f2s-free-clk"};
-
-static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
-static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
-static const char * const emac_ptp_free_mux[] = {"peri_emac_ptp_clk", "boot_clk"};
-static const char * const gpio_db_free_mux[] = {"peri_gpio_db_clk", "boot_clk"};
-static const char * const sdmmc_free_mux[] = {"main_sdmmc_clk", "boot_clk"};
-static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"};
-static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
-static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
-
-static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
-static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
-static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
-
-static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
- "peri_mpu_base_clk",
- "osc1", "cb-intosc-hs-div2-clk",
- "f2s-free-clk"};
+static const struct clk_parent_data pll_mux[] = {
+ { .fw_name = "osc1",
+ .name = "osc1" },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk" },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk" },
+};
+
+static const struct clk_parent_data cntr_mux[] = {
+ { .fw_name = "main_pll",
+ .name = "main_pll", },
+ { .fw_name = "periph_pll",
+ .name = "periph_pll", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data boot_mux[] = {
+ { .fw_name = "osc1",
+ .name = "osc1" },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk" },
+};
+
+static const struct clk_parent_data noc_free_mux[] = {
+ { .fw_name = "main_noc_base_clk",
+ .name = "main_noc_base_clk", },
+ { .fw_name = "peri_noc_base_clk",
+ .name = "peri_noc_base_clk", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data emaca_free_mux[] = {
+ { .fw_name = "peri_emaca_clk",
+ .name = "peri_emaca_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data emacb_free_mux[] = {
+ { .fw_name = "peri_emacb_clk",
+ .name = "peri_emacb_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data emac_ptp_free_mux[] = {
+ { .fw_name = "peri_emac_ptp_clk",
+ .name = "peri_emac_ptp_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data gpio_db_free_mux[] = {
+ { .fw_name = "peri_gpio_db_clk",
+ .name = "peri_gpio_db_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data sdmmc_free_mux[] = {
+ { .fw_name = "main_sdmmc_clk",
+ .name = "main_sdmmc_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data s2f_usr1_free_mux[] = {
+ { .fw_name = "peri_s2f_usr1_clk",
+ .name = "peri_s2f_usr1_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data psi_ref_free_mux[] = {
+ { .fw_name = "peri_psi_ref_clk",
+ .name = "peri_psi_ref_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data mpu_mux[] = {
+ { .fw_name = "mpu_free_clk",
+ .name = "mpu_free_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data s2f_usr0_mux[] = {
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data emac_mux[] = {
+ { .fw_name = "emaca_free_clk",
+ .name = "emaca_free_clk", },
+ { .fw_name = "emacb_free_clk",
+ .name = "emacb_free_clk", },
+};
+
+static const struct clk_parent_data noc_mux[] = {
+ { .fw_name = "noc_free_clk",
+ .name = "noc_free_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data mpu_free_mux[] = {
+ { .fw_name = "main_mpu_base_clk",
+ .name = "main_mpu_base_clk", },
+ { .fw_name = "peri_mpu_base_clk",
+ .name = "peri_mpu_base_clk", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
/* clocks in AO (always on) controller */
static const struct stratix10_pll_clock s10_pll_clks[] = {
diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
index fcabef42249c..f9d5d724c694 100644
--- a/drivers/clk/socfpga/stratix10-clk.h
+++ b/drivers/clk/socfpga/stratix10-clk.h
@@ -14,7 +14,7 @@ struct stratix10_clock_data {
struct stratix10_pll_clock {
unsigned int id;
const char *name;
- const char *const *parent_names;
+ const struct clk_parent_data *parent_data;
u8 num_parents;
unsigned long flags;
unsigned long offset;
@@ -24,7 +24,7 @@ struct stratix10_perip_c_clock {
unsigned int id;
const char *name;
const char *parent_name;
- const char *const *parent_names;
+ const struct clk_parent_data *parent_data;
u8 num_parents;
unsigned long flags;
unsigned long offset;
@@ -34,7 +34,7 @@ struct stratix10_perip_cnt_clock {
unsigned int id;
const char *name;
const char *parent_name;
- const char *const *parent_names;
+ const struct clk_parent_data *parent_data;
u8 num_parents;
unsigned long flags;
unsigned long offset;
@@ -47,7 +47,7 @@ struct stratix10_gate_clock {
unsigned int id;
const char *name;
const char *parent_name;
- const char *const *parent_names;
+ const struct clk_parent_data *parent_data;
u8 num_parents;
unsigned long flags;
unsigned long gate_reg;
@@ -62,6 +62,8 @@ struct stratix10_gate_clock {
struct clk *s10_register_pll(const struct stratix10_pll_clock *,
void __iomem *);
+struct clk *agilex_register_pll(const struct stratix10_pll_clock *,
+ void __iomem *);
struct clk *s10_register_periph(const struct stratix10_perip_c_clock *,
void __iomem *);
struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *,
diff --git a/drivers/clk/sprd/gate.c b/drivers/clk/sprd/gate.c
index 574cfc116bbc..56e1714b541e 100644
--- a/drivers/clk/sprd/gate.c
+++ b/drivers/clk/sprd/gate.c
@@ -94,8 +94,15 @@ static int sprd_gate_is_enabled(struct clk_hw *hw)
{
struct sprd_gate *sg = hw_to_sprd_gate(hw);
struct sprd_clk_common *common = &sg->common;
+ struct clk_hw *parent;
unsigned int reg;
+ if (sg->flags & SPRD_GATE_NON_AON) {
+ parent = clk_hw_get_parent(hw);
+ if (!parent || !clk_hw_is_enabled(parent))
+ return 0;
+ }
+
regmap_read(common->regmap, common->reg, &reg);
if (sg->flags & CLK_GATE_SET_TO_DISABLE)
diff --git a/drivers/clk/sprd/gate.h b/drivers/clk/sprd/gate.h
index b55817869367..e738dafa4fe9 100644
--- a/drivers/clk/sprd/gate.h
+++ b/drivers/clk/sprd/gate.h
@@ -19,6 +19,15 @@ struct sprd_gate {
struct sprd_clk_common common;
};
+/*
+ * sprd_gate->flags is used for:
+ * CLK_GATE_SET_TO_DISABLE BIT(0)
+ * CLK_GATE_HIWORD_MASK BIT(1)
+ * CLK_GATE_BIG_ENDIAN BIT(2)
+ * so we define new flags from BIT(3)
+ */
+#define SPRD_GATE_NON_AON BIT(3) /* not alway powered on, check before read */
+
#define SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
_sc_offset, _enable_mask, _flags, \
_gate_flags, _udelay, _ops, _fn) \
diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
index 15791484388f..13a322b2535a 100644
--- a/drivers/clk/sprd/pll.c
+++ b/drivers/clk/sprd/pll.c
@@ -106,7 +106,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
cfg = kcalloc(regs_num, sizeof(*cfg), GFP_KERNEL);
if (!cfg)
- return -ENOMEM;
+ return parent_rate;
for (i = 0; i < regs_num; i++)
cfg[i] = sprd_pll_read(pll, i);
diff --git a/drivers/clk/sprd/sc9863a-clk.c b/drivers/clk/sprd/sc9863a-clk.c
index 2e2dfb2d48ff..ad2e0f9f8563 100644
--- a/drivers/clk/sprd/sc9863a-clk.c
+++ b/drivers/clk/sprd/sc9863a-clk.c
@@ -23,22 +23,22 @@
#include "pll.h"
/* mpll*_gate clocks control cpu cores, they were enabled by default */
-SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll0_gate, "mpll0-gate", "ext-26m", 0x94,
- 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll0_gate, "dpll0-gate", "ext-26m", 0x98,
- 0x1000, BIT(0), 0, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(lpll_gate, "lpll-gate", "ext-26m", 0x9c,
- 0x1000, BIT(0), 0, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(gpll_gate, "gpll-gate", "ext-26m", 0xa8,
- 0x1000, BIT(0), 0, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll1_gate, "dpll1-gate", "ext-26m", 0x1dc,
- 0x1000, BIT(0), 0, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll1_gate, "mpll1-gate", "ext-26m", 0x1e0,
- 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll2_gate, "mpll2-gate", "ext-26m", 0x1e4,
- 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(isppll_gate, "isppll-gate", "ext-26m", 0x1e8,
- 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll0_gate, "mpll0-gate", "ext-26m", 0x94,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll0_gate, "dpll0-gate", "ext-26m", 0x98,
+ 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(lpll_gate, "lpll-gate", "ext-26m", 0x9c,
+ 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(gpll_gate, "gpll-gate", "ext-26m", 0xa8,
+ 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll1_gate, "dpll1-gate", "ext-26m", 0x1dc,
+ 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll1_gate, "mpll1-gate", "ext-26m", 0x1e0,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll2_gate, "mpll2-gate", "ext-26m", 0x1e4,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(isppll_gate, "isppll-gate", "ext-26m",
+ 0x1e8, 0x1000, BIT(0), 0, 0, 240);
static struct sprd_clk_common *sc9863a_pmu_gate_clks[] = {
/* address base is 0x402b0000 */
@@ -1615,6 +1615,36 @@ static const struct sprd_clk_desc sc9863a_mm_gate_desc = {
.hw_clks = &sc9863a_mm_gate_hws,
};
+/* camera sensor clocks */
+static SPRD_GATE_CLK_HW(mipi_csi_clk, "mipi-csi-clk", &mahb_ckg_eb.common.hw,
+ 0x20, BIT(16), 0, SPRD_GATE_NON_AON);
+static SPRD_GATE_CLK_HW(mipi_csi_s_clk, "mipi-csi-s-clk", &mahb_ckg_eb.common.hw,
+ 0x24, BIT(16), 0, SPRD_GATE_NON_AON);
+static SPRD_GATE_CLK_HW(mipi_csi_m_clk, "mipi-csi-m-clk", &mahb_ckg_eb.common.hw,
+ 0x28, BIT(16), 0, SPRD_GATE_NON_AON);
+
+static struct sprd_clk_common *sc9863a_mm_clk_clks[] = {
+ /* address base is 0x60900000 */
+ &mipi_csi_clk.common,
+ &mipi_csi_s_clk.common,
+ &mipi_csi_m_clk.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_mm_clk_hws = {
+ .hws = {
+ [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
+ [CLK_MIPI_CSI_S] = &mipi_csi_s_clk.common.hw,
+ [CLK_MIPI_CSI_M] = &mipi_csi_m_clk.common.hw,
+ },
+ .num = CLK_MM_CLK_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_mm_clk_desc = {
+ .clk_clks = sc9863a_mm_clk_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_mm_clk_clks),
+ .hw_clks = &sc9863a_mm_clk_hws,
+};
+
static SPRD_SC_GATE_CLK_FW_NAME(sim0_eb, "sim0-eb", "ext-26m", 0x0,
0x1000, BIT(0), 0, 0);
static SPRD_SC_GATE_CLK_FW_NAME(iis0_eb, "iis0-eb", "ext-26m", 0x0,
@@ -1738,6 +1768,8 @@ static const struct of_device_id sprd_sc9863a_clk_ids[] = {
.data = &sc9863a_aonapb_gate_desc },
{ .compatible = "sprd,sc9863a-mm-gate", /* 0x60800000 */
.data = &sc9863a_mm_gate_desc },
+ { .compatible = "sprd,sc9863a-mm-clk", /* 0x60900000 */
+ .data = &sc9863a_mm_clk_desc },
{ .compatible = "sprd,sc9863a-apapb-gate", /* 0x71300000 */
.data = &sc9863a_apapb_gate_desc },
{ }
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 4413b6e04a8e..55873d4b7603 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -375,6 +375,7 @@ static void __init st_of_flexgen_setup(struct device_node *np)
break;
}
+ flex_flags &= ~CLK_IS_CRITICAL;
of_clk_detect_critical(np, i, &flex_flags);
/*
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 27201fd26e44..e1aa1fbac48a 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -90,7 +90,7 @@ static void sun6i_a31_get_pll1_factors(struct factors_request *req)
* Round down the frequency to the closest multiple of either
* 6 or 16
*/
- u32 round_freq_6 = round_down(freq_mhz, 6);
+ u32 round_freq_6 = rounddown(freq_mhz, 6);
u32 round_freq_16 = round_down(freq_mhz, 16);
if (round_freq_6 > round_freq_16)
diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig
index 4d99a8770485..deaa4605824c 100644
--- a/drivers/clk/tegra/Kconfig
+++ b/drivers/clk/tegra/Kconfig
@@ -1,8 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config TEGRA_CLK_EMC
- def_bool y
- depends on TEGRA124_EMC
-
config CLK_TEGRA_BPMP
def_bool y
depends on TEGRA_BPMP
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index 1f7c30f87ece..eec2313fd37e 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -13,8 +13,8 @@ obj-y += clk-super.o
obj-y += clk-tegra-audio.o
obj-y += clk-tegra-periph.o
obj-y += clk-tegra-fixed.o
+obj-y += clk-tegra-super-cclk.o
obj-y += clk-tegra-super-gen4.o
-obj-$(CONFIG_TEGRA_CLK_EMC) += clk-emc.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20-emc.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
@@ -22,8 +22,10 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra20-emc.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
obj-$(CONFIG_TEGRA_CLK_DFLL) += clk-tegra124-dfll-fcpu.o
+obj-$(CONFIG_TEGRA124_EMC) += clk-tegra124-emc.o
obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
obj-y += cvb.o
obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o
+obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210-emc.o
obj-$(CONFIG_CLK_TEGRA_BPMP) += clk-bpmp.o
obj-y += clk-utils.o
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 531c2b3d814e..0b212cf2e794 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -744,13 +744,19 @@ static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
state = clk_pll_is_enabled(hw);
+ if (state && pll->params->pre_rate_change) {
+ ret = pll->params->pre_rate_change();
+ if (WARN_ON(ret))
+ return ret;
+ }
+
_get_pll_mnp(pll, &old_cfg);
if (state && pll->params->defaults_set && pll->params->dyn_ramp &&
(cfg->m == old_cfg.m) && (cfg->p == old_cfg.p)) {
ret = pll->params->dyn_ramp(pll, cfg);
if (!ret)
- return 0;
+ goto done;
}
if (state) {
@@ -772,6 +778,10 @@ static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
pll_clk_start_ss(pll);
}
+done:
+ if (state && pll->params->post_rate_change)
+ pll->params->post_rate_change();
+
return ret;
}
diff --git a/drivers/clk/tegra/clk-tegra-super-cclk.c b/drivers/clk/tegra/clk-tegra-super-cclk.c
new file mode 100644
index 000000000000..a03119c30456
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra-super-cclk.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on clk-super.c
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Based on older tegra20-cpufreq driver by Colin Cross <ccross@google.com>
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ * Copyright (C) 2019 GRATE-DRIVER project
+ */
+
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+#define PLLP_INDEX 4
+#define PLLX_INDEX 8
+
+#define SUPER_CDIV_ENB BIT(31)
+
+static struct tegra_clk_super_mux *cclk_super;
+static bool cclk_on_pllx;
+
+static u8 cclk_super_get_parent(struct clk_hw *hw)
+{
+ return tegra_clk_super_ops.get_parent(hw);
+}
+
+static int cclk_super_set_parent(struct clk_hw *hw, u8 index)
+{
+ return tegra_clk_super_ops.set_parent(hw, index);
+}
+
+static int cclk_super_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return tegra_clk_super_ops.set_rate(hw, rate, parent_rate);
+}
+
+static unsigned long cclk_super_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ if (cclk_super_get_parent(hw) == PLLX_INDEX)
+ return parent_rate;
+
+ return tegra_clk_super_ops.recalc_rate(hw, parent_rate);
+}
+
+static int cclk_super_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *pllp_hw = clk_hw_get_parent_by_index(hw, PLLP_INDEX);
+ struct clk_hw *pllx_hw = clk_hw_get_parent_by_index(hw, PLLX_INDEX);
+ struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
+ unsigned long pllp_rate;
+ long rate = req->rate;
+
+ if (WARN_ON_ONCE(!pllp_hw || !pllx_hw))
+ return -EINVAL;
+
+ /*
+ * Switch parent to PLLP for all CCLK rates that are suitable for PLLP.
+ * PLLX will be disabled in this case, saving some power.
+ */
+ pllp_rate = clk_hw_get_rate(pllp_hw);
+
+ if (rate <= pllp_rate) {
+ if (super->flags & TEGRA20_SUPER_CLK)
+ rate = pllp_rate;
+ else
+ rate = tegra_clk_super_ops.round_rate(hw, rate,
+ &pllp_rate);
+
+ req->best_parent_rate = pllp_rate;
+ req->best_parent_hw = pllp_hw;
+ req->rate = rate;
+ } else {
+ rate = clk_hw_round_rate(pllx_hw, rate);
+ req->best_parent_rate = rate;
+ req->best_parent_hw = pllx_hw;
+ req->rate = rate;
+ }
+
+ if (WARN_ON_ONCE(rate <= 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct clk_ops tegra_cclk_super_ops = {
+ .get_parent = cclk_super_get_parent,
+ .set_parent = cclk_super_set_parent,
+ .set_rate = cclk_super_set_rate,
+ .recalc_rate = cclk_super_recalc_rate,
+ .determine_rate = cclk_super_determine_rate,
+};
+
+static const struct clk_ops tegra_cclk_super_mux_ops = {
+ .get_parent = cclk_super_get_parent,
+ .set_parent = cclk_super_set_parent,
+ .determine_rate = cclk_super_determine_rate,
+};
+
+struct clk *tegra_clk_register_super_cclk(const char *name,
+ const char * const *parent_names, u8 num_parents,
+ unsigned long flags, void __iomem *reg, u8 clk_super_flags,
+ spinlock_t *lock)
+{
+ struct tegra_clk_super_mux *super;
+ struct clk *clk;
+ struct clk_init_data init;
+ u32 val;
+
+ if (WARN_ON(cclk_super))
+ return ERR_PTR(-EBUSY);
+
+ super = kzalloc(sizeof(*super), GFP_KERNEL);
+ if (!super)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ super->reg = reg;
+ super->lock = lock;
+ super->width = 4;
+ super->flags = clk_super_flags;
+ super->hw.init = &init;
+
+ if (super->flags & TEGRA20_SUPER_CLK) {
+ init.ops = &tegra_cclk_super_mux_ops;
+ } else {
+ init.ops = &tegra_cclk_super_ops;
+
+ super->frac_div.reg = reg + 4;
+ super->frac_div.shift = 16;
+ super->frac_div.width = 8;
+ super->frac_div.frac_width = 1;
+ super->frac_div.lock = lock;
+ super->div_ops = &tegra_clk_frac_div_ops;
+ }
+
+ /*
+ * Tegra30+ has the following CPUG clock topology:
+ *
+ * +---+ +-------+ +-+ +-+ +-+
+ * PLLP+->+ +->+DIVIDER+->+0| +-------->+0| ------------->+0|
+ * | | +-------+ | | | +---+ | | | | |
+ * PLLC+->+MUX| | +->+ | S | | +->+ | +->+CPU
+ * ... | | | | | | K | | | | +-------+ | |
+ * PLLX+->+-->+------------>+1| +->+ I +->+1| +->+ DIV2 +->+1|
+ * +---+ +++ | P | +++ |SKIPPER| +++
+ * ^ | P | ^ +-------+ ^
+ * | | E | | |
+ * PLLX_SEL+--+ | R | | OVERHEAT+--+
+ * +---+ |
+ * |
+ * SUPER_CDIV_ENB+--+
+ *
+ * Tegra20 is similar, but simpler. It doesn't have the divider and
+ * thermal DIV2 skipper.
+ *
+ * At least for now we're not going to use clock-skipper, hence let's
+ * ensure that it is disabled.
+ */
+ val = readl_relaxed(reg + 4);
+ val &= ~SUPER_CDIV_ENB;
+ writel_relaxed(val, reg + 4);
+
+ clk = clk_register(NULL, &super->hw);
+ if (IS_ERR(clk))
+ kfree(super);
+ else
+ cclk_super = super;
+
+ return clk;
+}
+
+int tegra_cclk_pre_pllx_rate_change(void)
+{
+ if (IS_ERR_OR_NULL(cclk_super))
+ return -EINVAL;
+
+ if (cclk_super_get_parent(&cclk_super->hw) == PLLX_INDEX)
+ cclk_on_pllx = true;
+ else
+ cclk_on_pllx = false;
+
+ /*
+ * CPU needs to be temporarily re-parented away from PLLX if PLLX
+ * changes its rate. PLLP is a safe parent for CPU on all Tegra SoCs.
+ */
+ if (cclk_on_pllx)
+ cclk_super_set_parent(&cclk_super->hw, PLLP_INDEX);
+
+ return 0;
+}
+
+void tegra_cclk_post_pllx_rate_change(void)
+{
+ if (cclk_on_pllx)
+ cclk_super_set_parent(&cclk_super->hw, PLLX_INDEX);
+}
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
index 745f9faa98d8..745f9faa98d8 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-tegra124-emc.c
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 085feb04e913..3efc651b42e3 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -391,6 +391,8 @@ static struct tegra_clk_pll_params pll_x_params = {
.lock_delay = 300,
.freq_table = pll_x_freq_table,
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .pre_rate_change = tegra_cclk_pre_pllx_rate_change,
+ .post_rate_change = tegra_cclk_post_pllx_rate_change,
};
static struct tegra_clk_pll_params pll_e_params = {
@@ -702,9 +704,10 @@ static void tegra20_super_clk_init(void)
struct clk *clk;
/* CCLK */
- clk = tegra_clk_register_super_mux("cclk", cclk_parents,
+ clk = tegra_clk_register_super_cclk("cclk", cclk_parents,
ARRAY_SIZE(cclk_parents), CLK_SET_RATE_PARENT,
- clk_base + CCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
+ clk_base + CCLK_BURST_POLICY, TEGRA20_SUPER_CLK,
+ NULL);
clks[TEGRA20_CLK_CCLK] = clk;
/* SCLK */
diff --git a/drivers/clk/tegra/clk-tegra210-emc.c b/drivers/clk/tegra/clk-tegra210-emc.c
new file mode 100644
index 000000000000..352a2c3fc374
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra210-emc.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/tegra.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#define CLK_SOURCE_EMC 0x19c
+#define CLK_SOURCE_EMC_2X_CLK_SRC GENMASK(31, 29)
+#define CLK_SOURCE_EMC_MC_EMC_SAME_FREQ BIT(16)
+#define CLK_SOURCE_EMC_2X_CLK_DIVISOR GENMASK(7, 0)
+
+#define CLK_SRC_PLLM 0
+#define CLK_SRC_PLLC 1
+#define CLK_SRC_PLLP 2
+#define CLK_SRC_CLK_M 3
+#define CLK_SRC_PLLM_UD 4
+#define CLK_SRC_PLLMB_UD 5
+#define CLK_SRC_PLLMB 6
+#define CLK_SRC_PLLP_UD 7
+
+struct tegra210_clk_emc {
+ struct clk_hw hw;
+ void __iomem *regs;
+
+ struct tegra210_clk_emc_provider *provider;
+
+ struct clk *parents[8];
+};
+
+static inline struct tegra210_clk_emc *
+to_tegra210_clk_emc(struct clk_hw *hw)
+{
+ return container_of(hw, struct tegra210_clk_emc, hw);
+}
+
+static const char *tegra210_clk_emc_parents[] = {
+ "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb_ud",
+ "pll_mb", "pll_p_ud",
+};
+
+static u8 tegra210_clk_emc_get_parent(struct clk_hw *hw)
+{
+ struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
+ u32 value;
+ u8 src;
+
+ value = readl_relaxed(emc->regs + CLK_SOURCE_EMC);
+ src = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_SRC, value);
+
+ return src;
+}
+
+static unsigned long tegra210_clk_emc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
+ u32 value, div;
+
+ /*
+ * CCF assumes that neither the parent nor its rate will change during
+ * ->set_rate(), so the parent rate passed in here was cached from the
+ * parent before the ->set_rate() call.
+ *
+ * This can lead to wrong results being reported for the EMC clock if
+ * the parent and/or parent rate have changed as part of the EMC rate
+ * change sequence. Fix this by overriding the parent clock with what
+ * we know to be the correct value after the rate change.
+ */
+ parent_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
+
+ value = readl_relaxed(emc->regs + CLK_SOURCE_EMC);
+
+ div = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_DIVISOR, value);
+ div += 2;
+
+ return DIV_ROUND_UP(parent_rate * 2, div);
+}
+
+static long tegra210_clk_emc_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
+ struct tegra210_clk_emc_provider *provider = emc->provider;
+ unsigned int i;
+
+ if (!provider || !provider->configs || provider->num_configs == 0)
+ return clk_hw_get_rate(hw);
+
+ for (i = 0; i < provider->num_configs; i++) {
+ if (provider->configs[i].rate >= rate)
+ return provider->configs[i].rate;
+ }
+
+ return provider->configs[i - 1].rate;
+}
+
+static struct clk *tegra210_clk_emc_find_parent(struct tegra210_clk_emc *emc,
+ u8 index)
+{
+ struct clk_hw *parent = clk_hw_get_parent_by_index(&emc->hw, index);
+ const char *name = clk_hw_get_name(parent);
+
+ /* XXX implement cache? */
+
+ return __clk_lookup(name);
+}
+
+static int tegra210_clk_emc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
+ struct tegra210_clk_emc_provider *provider = emc->provider;
+ struct tegra210_clk_emc_config *config;
+ struct device *dev = provider->dev;
+ struct clk_hw *old, *new, *parent;
+ u8 old_idx, new_idx, index;
+ struct clk *clk;
+ unsigned int i;
+ int err;
+
+ if (!provider || !provider->configs || provider->num_configs == 0)
+ return -EINVAL;
+
+ for (i = 0; i < provider->num_configs; i++) {
+ if (provider->configs[i].rate >= rate) {
+ config = &provider->configs[i];
+ break;
+ }
+ }
+
+ if (i == provider->num_configs)
+ config = &provider->configs[i - 1];
+
+ old_idx = tegra210_clk_emc_get_parent(hw);
+ new_idx = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_SRC, config->value);
+
+ old = clk_hw_get_parent_by_index(hw, old_idx);
+ new = clk_hw_get_parent_by_index(hw, new_idx);
+
+ /* if the rate has changed... */
+ if (config->parent_rate != clk_hw_get_rate(old)) {
+ /* ... but the clock source remains the same ... */
+ if (new_idx == old_idx) {
+ /* ... switch to the alternative clock source. */
+ switch (new_idx) {
+ case CLK_SRC_PLLM:
+ new_idx = CLK_SRC_PLLMB;
+ break;
+
+ case CLK_SRC_PLLM_UD:
+ new_idx = CLK_SRC_PLLMB_UD;
+ break;
+
+ case CLK_SRC_PLLMB_UD:
+ new_idx = CLK_SRC_PLLM_UD;
+ break;
+
+ case CLK_SRC_PLLMB:
+ new_idx = CLK_SRC_PLLM;
+ break;
+ }
+
+ /*
+ * This should never happen because we can't deal with
+ * it.
+ */
+ if (WARN_ON(new_idx == old_idx))
+ return -EINVAL;
+
+ new = clk_hw_get_parent_by_index(hw, new_idx);
+ }
+
+ index = new_idx;
+ parent = new;
+ } else {
+ index = old_idx;
+ parent = old;
+ }
+
+ clk = tegra210_clk_emc_find_parent(emc, index);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ dev_err(dev, "failed to get parent clock for index %u: %d\n",
+ index, err);
+ return err;
+ }
+
+ /* set the new parent clock to the required rate */
+ if (clk_get_rate(clk) != config->parent_rate) {
+ err = clk_set_rate(clk, config->parent_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate %lu Hz for %pC: %d\n",
+ config->parent_rate, clk, err);
+ return err;
+ }
+ }
+
+ /* enable the new parent clock */
+ if (parent != old) {
+ err = clk_prepare_enable(clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable parent clock %pC: %d\n",
+ clk, err);
+ return err;
+ }
+ }
+
+ /* update the EMC source configuration to reflect the new parent */
+ config->value &= ~CLK_SOURCE_EMC_2X_CLK_SRC;
+ config->value |= FIELD_PREP(CLK_SOURCE_EMC_2X_CLK_SRC, index);
+
+ /*
+ * Finally, switch the EMC programming with both old and new parent
+ * clocks enabled.
+ */
+ err = provider->set_rate(dev, config);
+ if (err < 0) {
+ dev_err(dev, "failed to set EMC rate to %lu Hz: %d\n", rate,
+ err);
+
+ /*
+ * If we're unable to switch to the new EMC frequency, we no
+ * longer need the new parent to be enabled.
+ */
+ if (parent != old)
+ clk_disable_unprepare(clk);
+
+ return err;
+ }
+
+ /* reparent to new parent clock and disable the old parent clock */
+ if (parent != old) {
+ clk = tegra210_clk_emc_find_parent(emc, old_idx);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ dev_err(dev,
+ "failed to get parent clock for index %u: %d\n",
+ old_idx, err);
+ return err;
+ }
+
+ clk_hw_reparent(hw, parent);
+ clk_disable_unprepare(clk);
+ }
+
+ return err;
+}
+
+static const struct clk_ops tegra210_clk_emc_ops = {
+ .get_parent = tegra210_clk_emc_get_parent,
+ .recalc_rate = tegra210_clk_emc_recalc_rate,
+ .round_rate = tegra210_clk_emc_round_rate,
+ .set_rate = tegra210_clk_emc_set_rate,
+};
+
+struct clk *tegra210_clk_register_emc(struct device_node *np,
+ void __iomem *regs)
+{
+ struct tegra210_clk_emc *emc;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ emc = kzalloc(sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return ERR_PTR(-ENOMEM);
+
+ emc->regs = regs;
+
+ init.name = "emc";
+ init.ops = &tegra210_clk_emc_ops;
+ init.flags = CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE;
+ init.parent_names = tegra210_clk_emc_parents;
+ init.num_parents = ARRAY_SIZE(tegra210_clk_emc_parents);
+ emc->hw.init = &init;
+
+ clk = clk_register(NULL, &emc->hw);
+ if (IS_ERR(clk)) {
+ kfree(emc);
+ return clk;
+ }
+
+ return clk;
+}
+
+int tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider)
+{
+ struct clk_hw *hw = __clk_get_hw(clk);
+ struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
+ struct device *dev = provider->dev;
+ unsigned int i;
+ int err;
+
+ if (!try_module_get(provider->owner))
+ return -ENODEV;
+
+ for (i = 0; i < provider->num_configs; i++) {
+ struct tegra210_clk_emc_config *config = &provider->configs[i];
+ struct clk_hw *parent;
+ bool same_freq;
+ u8 div, src;
+
+ div = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_DIVISOR, config->value);
+ src = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_SRC, config->value);
+
+ /* do basic sanity checking on the EMC timings */
+ if (div & 0x1) {
+ dev_err(dev, "invalid odd divider %u for rate %lu Hz\n",
+ div, config->rate);
+ err = -EINVAL;
+ goto put;
+ }
+
+ same_freq = config->value & CLK_SOURCE_EMC_MC_EMC_SAME_FREQ;
+
+ if (same_freq != config->same_freq) {
+ dev_err(dev,
+ "ambiguous EMC to MC ratio for rate %lu Hz\n",
+ config->rate);
+ err = -EINVAL;
+ goto put;
+ }
+
+ parent = clk_hw_get_parent_by_index(hw, src);
+ config->parent = src;
+
+ if (src == CLK_SRC_PLLM || src == CLK_SRC_PLLM_UD) {
+ config->parent_rate = config->rate * (1 + div / 2);
+ } else {
+ unsigned long rate = config->rate * (1 + div / 2);
+
+ config->parent_rate = clk_hw_get_rate(parent);
+
+ if (config->parent_rate != rate) {
+ dev_err(dev,
+ "rate %lu Hz does not match input\n",
+ config->rate);
+ err = -EINVAL;
+ goto put;
+ }
+ }
+ }
+
+ emc->provider = provider;
+
+ return 0;
+
+put:
+ module_put(provider->owner);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tegra210_clk_emc_attach);
+
+void tegra210_clk_emc_detach(struct clk *clk)
+{
+ struct tegra210_clk_emc *emc = to_tegra210_clk_emc(__clk_get_hw(clk));
+
+ module_put(emc->provider->owner);
+ emc->provider = NULL;
+}
+EXPORT_SYMBOL_GPL(tegra210_clk_emc_detach);
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index defe3b7ebfa4..68cbb98af567 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -37,6 +37,7 @@
#define CLK_SOURCE_LA 0x1f8
#define CLK_SOURCE_SDMMC2 0x154
#define CLK_SOURCE_SDMMC4 0x164
+#define CLK_SOURCE_EMC_DLL 0x664
#define PLLC_BASE 0x80
#define PLLC_OUT 0x84
@@ -227,6 +228,10 @@
#define RST_DFLL_DVCO 0x2f4
#define DVFS_DFLL_RESET_SHIFT 0
+#define CLK_RST_CONTROLLER_CLK_OUT_ENB_X_SET 0x284
+#define CLK_RST_CONTROLLER_CLK_OUT_ENB_X_CLR 0x288
+#define CLK_OUT_ENB_X_CLK_ENB_EMC_DLL BIT(14)
+
#define CLK_RST_CONTROLLER_RST_DEV_Y_SET 0x2a8
#define CLK_RST_CONTROLLER_RST_DEV_Y_CLR 0x2ac
#define CPU_SOFTRST_CTRL 0x380
@@ -314,12 +319,6 @@ static unsigned long tegra210_input_freq[] = {
[8] = 12000000,
};
-static const char *mux_pllmcp_clkm[] = {
- "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb",
- "pll_p",
-};
-#define mux_pllmcp_clkm_idx NULL
-
#define PLL_ENABLE (1 << 30)
#define PLLCX_MISC1_IDDQ (1 << 27)
@@ -555,6 +554,27 @@ void tegra210_set_sata_pll_seq_sw(bool state)
}
EXPORT_SYMBOL_GPL(tegra210_set_sata_pll_seq_sw);
+void tegra210_clk_emc_dll_enable(bool flag)
+{
+ u32 offset = flag ? CLK_RST_CONTROLLER_CLK_OUT_ENB_X_SET :
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_X_CLR;
+
+ writel_relaxed(CLK_OUT_ENB_X_CLK_ENB_EMC_DLL, clk_base + offset);
+}
+EXPORT_SYMBOL_GPL(tegra210_clk_emc_dll_enable);
+
+void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value)
+{
+ writel_relaxed(emc_dll_src_value, clk_base + CLK_SOURCE_EMC_DLL);
+}
+EXPORT_SYMBOL_GPL(tegra210_clk_emc_dll_update_setting);
+
+void tegra210_clk_emc_update_setting(u32 emc_src_value)
+{
+ writel_relaxed(emc_src_value, clk_base + CLK_SOURCE_EMC);
+}
+EXPORT_SYMBOL_GPL(tegra210_clk_emc_update_setting);
+
static void tegra210_generic_mbist_war(struct tegra210_domain_mbist_war *mbist)
{
u32 val;
@@ -2310,7 +2330,6 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_i2c2] = { .dt_id = TEGRA210_CLK_I2C2, .present = true },
[tegra_clk_uartc_8] = { .dt_id = TEGRA210_CLK_UARTC, .present = true },
[tegra_clk_mipi_cal] = { .dt_id = TEGRA210_CLK_MIPI_CAL, .present = true },
- [tegra_clk_emc] = { .dt_id = TEGRA210_CLK_EMC, .present = true },
[tegra_clk_usb2] = { .dt_id = TEGRA210_CLK_USB2, .present = true },
[tegra_clk_bsev] = { .dt_id = TEGRA210_CLK_BSEV, .present = true },
[tegra_clk_uartd_8] = { .dt_id = TEGRA210_CLK_UARTD, .present = true },
@@ -2953,6 +2972,27 @@ static const char * const sor1_parents[] = {
static u32 sor1_parents_idx[] = { 0, 2, 5, 6 };
+static const struct clk_div_table mc_div_table_tegra210[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 4 },
+ { .val = 2, .div = 1 },
+ { .val = 3, .div = 2 },
+ { .val = 0, .div = 0 },
+};
+
+static void tegra210_clk_register_mc(const char *name,
+ const char *parent_name)
+{
+ struct clk *clk;
+
+ clk = clk_register_divider_table(NULL, name, parent_name,
+ CLK_IS_CRITICAL,
+ clk_base + CLK_SOURCE_EMC,
+ 15, 2, CLK_DIVIDER_READ_ONLY,
+ mc_div_table_tegra210, &emc_lock);
+ clks[TEGRA210_CLK_MC] = clk;
+}
+
static const char * const sor1_out_parents[] = {
/*
* Bit 0 of the mux selects sor1_pad_clkout, irrespective of bit 1, so
@@ -2995,7 +3035,8 @@ static const char * const la_parents[] = {
static struct tegra_clk_periph tegra210_la =
TEGRA_CLK_PERIPH(29, 7, 9, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, 76, 0, NULL, NULL);
-static __init void tegra210_periph_clk_init(void __iomem *clk_base,
+static __init void tegra210_periph_clk_init(struct device_node *np,
+ void __iomem *clk_base,
void __iomem *pmc_base)
{
struct clk *clk;
@@ -3035,22 +3076,19 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
periph_clk_enb_refcnt);
clks[TEGRA210_CLK_DSIB] = clk;
+ /* csi_tpg */
+ clk = clk_register_gate(NULL, "csi_tpg", "pll_d",
+ CLK_SET_RATE_PARENT, clk_base + PLLD_BASE,
+ 23, 0, &pll_d_lock);
+ clk_register_clkdev(clk, "csi_tpg", NULL);
+ clks[TEGRA210_CLK_CSI_TPG] = clk;
+
/* la */
clk = tegra_clk_register_periph("la", la_parents,
ARRAY_SIZE(la_parents), &tegra210_la, clk_base,
CLK_SOURCE_LA, 0);
clks[TEGRA210_CLK_LA] = clk;
- /* emc mux */
- clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm), 0,
- clk_base + CLK_SOURCE_EMC,
- 29, 3, 0, &emc_lock);
-
- clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
- &emc_lock);
- clks[TEGRA210_CLK_MC] = clk;
-
/* cml0 */
clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
0, 0, &pll_e_lock);
@@ -3093,6 +3131,13 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
}
tegra_periph_clk_init(clk_base, pmc_base, tegra210_clks, &pll_p_params);
+
+ /* emc */
+ clk = tegra210_clk_register_emc(np, clk_base);
+ clks[TEGRA210_CLK_EMC] = clk;
+
+ /* mc */
+ tegra210_clk_register_mc("mc", "emc");
}
static void __init tegra210_pll_init(void __iomem *clk_base,
@@ -3153,6 +3198,17 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
clk_register_clkdev(clk, "pll_m_ud", NULL);
clks[TEGRA210_CLK_PLL_M_UD] = clk;
+ /* PLLMB_UD */
+ clk = clk_register_fixed_factor(NULL, "pll_mb_ud", "pll_mb",
+ CLK_SET_RATE_PARENT, 1, 1);
+ clk_register_clkdev(clk, "pll_mb_ud", NULL);
+ clks[TEGRA210_CLK_PLL_MB_UD] = clk;
+
+ /* PLLP_UD */
+ clk = clk_register_fixed_factor(NULL, "pll_p_ud", "pll_p",
+ 0, 1, 1);
+ clks[TEGRA210_CLK_PLL_P_UD] = clk;
+
/* PLLU_VCO */
if (!tegra210_init_pllu()) {
clk = clk_register_fixed_rate(NULL, "pll_u_vco", "pll_ref", 0,
@@ -3680,7 +3736,7 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra_fixed_clk_init(tegra210_clks);
tegra210_pll_init(clk_base, pmc_base);
- tegra210_periph_clk_init(clk_base, pmc_base);
+ tegra210_periph_clk_init(np, clk_base, pmc_base);
tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks,
tegra210_audio_plls,
ARRAY_SIZE(tegra210_audio_plls), 24576000);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 3255f82e61b5..37244a7e68c2 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -499,6 +499,8 @@ static struct tegra_clk_pll_params pll_x_params __ro_after_init = {
.freq_table = pll_x_freq_table,
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_DCCON |
TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .pre_rate_change = tegra_cclk_pre_pllx_rate_change,
+ .post_rate_change = tegra_cclk_post_pllx_rate_change,
};
static struct tegra_clk_pll_params pll_e_params __ro_after_init = {
@@ -926,11 +928,11 @@ static void __init tegra30_super_clk_init(void)
clk_register_clkdev(clk, "pll_p_out4_cclkg", NULL);
/* CCLKG */
- clk = tegra_clk_register_super_mux("cclk_g", cclk_g_parents,
+ clk = tegra_clk_register_super_cclk("cclk_g", cclk_g_parents,
ARRAY_SIZE(cclk_g_parents),
CLK_SET_RATE_PARENT,
clk_base + CCLKG_BURST_POLICY,
- 0, 4, 0, 0, NULL);
+ 0, NULL);
clks[TEGRA30_CLK_CCLK_G] = clk;
/*
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 2c9a68302e02..6b565f6b5f66 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -266,6 +266,10 @@ struct tegra_clk_pll;
* disabled.
* @dyn_ramp: Callback which can be used to define a custom
* dynamic ramp function for a given PLL.
+ * @pre_rate_change: Callback which is invoked just before changing
+ * PLL's rate.
+ * @post_rate_change: Callback which is invoked right after changing
+ * PLL's rate.
*
* Flags:
* TEGRA_PLL_USE_LOCK - This flag indicated to use lock bits for
@@ -342,6 +346,8 @@ struct tegra_clk_pll_params {
void (*set_defaults)(struct tegra_clk_pll *pll);
int (*dyn_ramp)(struct tegra_clk_pll *pll,
struct tegra_clk_pll_freq_table *cfg);
+ int (*pre_rate_change)(void);
+ void (*post_rate_change)(void);
};
#define TEGRA_PLL_USE_LOCK BIT(0)
@@ -729,8 +735,10 @@ struct clk *tegra_clk_register_periph_data(void __iomem *clk_base,
* TEGRA_DIVIDER_2 - LP cluster has additional divider. This flag indicates
* that this is LP cluster clock.
* TEGRA210_CPU_CLK - This flag is used to identify CPU cluster for gen5
- * super mux parent using PLLP branches. To use PLLP branches to CPU, need
- * to configure additional bit PLLP_OUT_CPU in the clock registers.
+ * super mux parent using PLLP branches. To use PLLP branches to CPU, need
+ * to configure additional bit PLLP_OUT_CPU in the clock registers.
+ * TEGRA20_SUPER_CLK - Tegra20 doesn't have a dedicated divider for Super
+ * clocks, it only has a clock-skipper.
*/
struct tegra_clk_super_mux {
struct clk_hw hw;
@@ -748,6 +756,7 @@ struct tegra_clk_super_mux {
#define TEGRA_DIVIDER_2 BIT(0)
#define TEGRA210_CPU_CLK BIT(1)
+#define TEGRA20_SUPER_CLK BIT(2)
extern const struct clk_ops tegra_clk_super_ops;
struct clk *tegra_clk_register_super_mux(const char *name,
@@ -758,6 +767,12 @@ struct clk *tegra_clk_register_super_clk(const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags, void __iomem *reg, u8 clk_super_flags,
spinlock_t *lock);
+struct clk *tegra_clk_register_super_cclk(const char *name,
+ const char * const *parent_names, u8 num_parents,
+ unsigned long flags, void __iomem *reg, u8 clk_super_flags,
+ spinlock_t *lock);
+int tegra_cclk_pre_pllx_rate_change(void);
+void tegra_cclk_post_pllx_rate_change(void);
/**
* struct tegra_sdmmc_mux - switch divider with Low Jitter inputs for SDMMC
@@ -866,7 +881,7 @@ void tegra_super_clk_gen5_init(void __iomem *clk_base,
void __iomem *pmc_base, struct tegra_clk *tegra_clks,
struct tegra_clk_pll_params *pll_params);
-#ifdef CONFIG_TEGRA_CLK_EMC
+#ifdef CONFIG_TEGRA124_EMC
struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
spinlock_t *lock);
#else
@@ -907,4 +922,7 @@ void tegra_clk_periph_resume(void);
bool tegra20_clk_emc_driver_available(struct clk_hw *emc_hw);
struct clk *tegra20_clk_register_emc(void __iomem *ioaddr, bool low_jitter);
+struct clk *tegra210_clk_register_emc(struct device_node *np,
+ void __iomem *regs);
+
#endif /* TEGRA_CLK_H */
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 312a20f8ec0e..a38c92153979 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -606,13 +606,13 @@ static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initcons
static const struct
omap_clkctrl_reg_data omap4_l4_secure_clkctrl_regs[] __initconst = {
- { OMAP4_AES1_CLKCTRL, NULL, CLKF_SW_SUP, "" },
- { OMAP4_AES2_CLKCTRL, NULL, CLKF_SW_SUP, "" },
- { OMAP4_DES3DES_CLKCTRL, NULL, CLKF_SW_SUP, "" },
- { OMAP4_PKA_CLKCTRL, NULL, CLKF_SW_SUP, "" },
- { OMAP4_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
- { OMAP4_SHA2MD5_CLKCTRL, NULL, CLKF_SW_SUP, "" },
- { OMAP4_CRYPTODMA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+ { OMAP4_AES1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_div_ck" },
+ { OMAP4_AES2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_div_ck" },
+ { OMAP4_DES3DES_CLKCTRL, NULL, CLKF_SW_SUP, "l4_div_ck" },
+ { OMAP4_PKA_CLKCTRL, NULL, CLKF_SW_SUP, "l4_div_ck" },
+ { OMAP4_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_div_ck" },
+ { OMAP4_SHA2MD5_CLKCTRL, NULL, CLKF_SW_SUP, "l3_div_ck" },
+ { OMAP4_CRYPTODMA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l3_div_ck" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 92bf2dda95b9..8694bc9f5fc7 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -303,13 +303,13 @@ static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst
static const struct
omap_clkctrl_reg_data omap5_l4_secure_clkctrl_regs[] __initconst = {
- { OMAP5_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "" },
- { OMAP5_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "" },
- { OMAP5_DES3DES_CLKCTRL, NULL, CLKF_HW_SUP, "" },
- { OMAP5_FPKA_CLKCTRL, NULL, CLKF_SW_SUP, "" },
- { OMAP5_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
- { OMAP5_SHA2MD5_CLKCTRL, NULL, CLKF_HW_SUP, "" },
- { OMAP5_DMA_CRYPTO_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+ { OMAP5_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { OMAP5_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { OMAP5_DES3DES_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_FPKA_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" },
+ { OMAP5_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_root_clk_div" },
+ { OMAP5_SHA2MD5_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { OMAP5_DMA_CRYPTO_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l3_iclk_div" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 14b645093107..b4cf578a69e1 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -312,15 +312,6 @@ static const char * const dra7_gpu_hyd_mux_parents[] __initconst = {
NULL,
};
-static const char * const dra7_gpu_sys_clk_parents[] __initconst = {
- "sys_clkin",
- NULL,
-};
-
-static const struct omap_clkctrl_div_data dra7_gpu_sys_clk_data __initconst = {
- .max_div = 2,
-};
-
static const struct omap_clkctrl_bit_data dra7_gpu_core_bit_data[] __initconst = {
{ 24, TI_CLK_MUX, dra7_gpu_core_mux_parents, NULL, },
{ 26, TI_CLK_MUX, dra7_gpu_hyd_mux_parents, NULL, },
@@ -328,7 +319,7 @@ static const struct omap_clkctrl_bit_data dra7_gpu_core_bit_data[] __initconst =
};
static const struct omap_clkctrl_reg_data dra7_gpu_clkctrl_regs[] __initconst = {
- { DRA7_GPU_CLKCTRL, dra7_gpu_core_bit_data, CLKF_SW_SUP, "gpu_cm:clk:0000:24", },
+ { DRA7_GPU_CLKCTRL, dra7_gpu_core_bit_data, CLKF_SW_SUP, "gpu-clkctrl:0000:24", },
{ 0 },
};
@@ -644,7 +635,7 @@ static const struct omap_clkctrl_reg_data dra7_l4sec_clkctrl_regs[] __initconst
{ DRA7_L4SEC_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ DRA7_L4SEC_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ DRA7_L4SEC_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+ { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_root_clk_div" },
{ DRA7_L4SEC_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ 0 },
};
@@ -815,7 +806,7 @@ static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initcons
{ DRA7_WKUPAON_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ DRA7_WKUPAON_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0060:24" },
{ DRA7_WKUPAON_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0068:24" },
- { DRA7_WKUPAON_ADC_CLKCTRL, NULL, CLKF_SW_SUP, "mcan_clk" },
+ { DRA7_WKUPAON_ADC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SOC_DRA76, "mcan_clk" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
index 7d215cdf9dda..9daf3825f289 100644
--- a/drivers/clk/ti/clk-816x.c
+++ b/drivers/clk/ti/clk-816x.c
@@ -73,6 +73,7 @@ static const char *enable_init_clks[] = {
"ddr_pll_clk1",
"ddr_pll_clk2",
"ddr_pll_clk3",
+ "sysclk6_ck",
};
int __init dm816x_dt_clk_init(void)
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index 6a89936ba03a..eaa43575cfa5 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -196,6 +196,7 @@ cleanup:
if (!cclk->comp_clks[i])
continue;
list_del(&cclk->comp_clks[i]->link);
+ kfree(cclk->comp_clks[i]->parent_names);
kfree(cclk->comp_clks[i]);
}
diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig
index c2618f1477a2..8c1b0e8e8d32 100644
--- a/drivers/clk/versatile/Kconfig
+++ b/drivers/clk/versatile/Kconfig
@@ -1,33 +1,32 @@
# SPDX-License-Identifier: GPL-2.0-only
-config ICST
- bool
-config COMMON_CLK_VERSATILE
- bool "Clock driver for ARM Reference designs"
+menu "Clock driver for ARM Reference designs"
depends on ARCH_INTEGRATOR || ARCH_REALVIEW || \
- ARCH_VERSATILE || ARCH_VEXPRESS || ARM64 || \
- COMPILE_TEST
+ ARCH_VERSATILE || ARCH_VEXPRESS || COMPILE_TEST
+
+config ICST
+ bool "Clock driver for ARM Reference designs ICST"
select REGMAP_MMIO
---help---
Supports clocking on ARM Reference designs:
- Integrator/AP and Integrator/CP
- RealView PB1176, EB, PB11MP and PBX
- - Versatile Express
config CLK_SP810
bool "Clock driver for ARM SP810 System Controller"
- depends on COMMON_CLK_VERSATILE
- default y if ARCH_VEXPRESS
+ default y if (ARCH_VEXPRESS && ARM)
---help---
Supports clock muxing (REFCLK/TIMCLK to TIMERCLKEN0-3) capabilities
of the ARM SP810 System Controller cell.
config CLK_VEXPRESS_OSC
- bool "Clock driver for Versatile Express OSC clock generators"
- depends on COMMON_CLK_VERSATILE
+ tristate "Clock driver for Versatile Express OSC clock generators"
depends on VEXPRESS_CONFIG
+ select REGMAP_MMIO
default y if ARCH_VEXPRESS
---help---
Simple regmap-based driver driving clock generators on Versatile
Express platforms hidden behind its configuration infrastructure,
commonly known as OSCs.
+
+endmenu
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index f9f4babe3ca6..ca798249544d 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -8,7 +8,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/clk-integrator.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -20,26 +19,6 @@
#define IMPD1_OSC2 0x04
#define IMPD1_LOCK 0x08
-struct impd1_clk {
- char *pclkname;
- struct clk *pclk;
- char *vco1name;
- struct clk *vco1clk;
- char *vco2name;
- struct clk *vco2clk;
- struct clk *mmciclk;
- char *uartname;
- struct clk *uartclk;
- char *spiname;
- struct clk *spiclk;
- char *scname;
- struct clk *scclk;
- struct clk_lookup *clks[15];
-};
-
-/* One entry for each connected IM-PD1 LM */
-static struct impd1_clk impd1_clks[4];
-
/*
* There are two VCO's on the IM-PD1
*/
@@ -80,106 +59,6 @@ static const struct clk_icst_desc impd1_icst2_desc = {
.lock_offset = IMPD1_LOCK,
};
-/**
- * integrator_impd1_clk_init() - set up the integrator clock tree
- * @base: base address of the logic module (LM)
- * @id: the ID of this LM
- */
-void integrator_impd1_clk_init(void __iomem *base, unsigned int id)
-{
- struct impd1_clk *imc;
- struct clk *clk;
- struct clk *pclk;
- int i;
-
- if (id > 3) {
- pr_crit("no more than 4 LMs can be attached\n");
- return;
- }
- imc = &impd1_clks[id];
-
- /* Register the fixed rate PCLK */
- imc->pclkname = kasprintf(GFP_KERNEL, "lm%x-pclk", id);
- pclk = clk_register_fixed_rate(NULL, imc->pclkname, NULL, 0, 0);
- imc->pclk = pclk;
-
- imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id);
- clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, NULL,
- base);
- imc->vco1clk = clk;
- imc->clks[0] = clkdev_alloc(pclk, "apb_pclk", "lm%x:01000", id);
- imc->clks[1] = clkdev_alloc(clk, NULL, "lm%x:01000", id);
-
- /* VCO2 is also called "CLK2" */
- imc->vco2name = kasprintf(GFP_KERNEL, "lm%x-vco2", id);
- clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, NULL,
- base);
- imc->vco2clk = clk;
-
- /* MMCI uses CLK2 right off */
- imc->clks[2] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00700", id);
- imc->clks[3] = clkdev_alloc(clk, NULL, "lm%x:00700", id);
-
- /* UART reference clock divides CLK2 by a fixed factor 4 */
- imc->uartname = kasprintf(GFP_KERNEL, "lm%x-uartclk", id);
- clk = clk_register_fixed_factor(NULL, imc->uartname, imc->vco2name,
- CLK_IGNORE_UNUSED, 1, 4);
- imc->uartclk = clk;
- imc->clks[4] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00100", id);
- imc->clks[5] = clkdev_alloc(clk, NULL, "lm%x:00100", id);
- imc->clks[6] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00200", id);
- imc->clks[7] = clkdev_alloc(clk, NULL, "lm%x:00200", id);
-
- /* SPI PL022 clock divides CLK2 by a fixed factor 64 */
- imc->spiname = kasprintf(GFP_KERNEL, "lm%x-spiclk", id);
- clk = clk_register_fixed_factor(NULL, imc->spiname, imc->vco2name,
- CLK_IGNORE_UNUSED, 1, 64);
- imc->clks[8] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00300", id);
- imc->clks[9] = clkdev_alloc(clk, NULL, "lm%x:00300", id);
-
- /* The GPIO blocks and AACI have only PCLK */
- imc->clks[10] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00400", id);
- imc->clks[11] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00500", id);
- imc->clks[12] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00800", id);
-
- /* Smart Card clock divides CLK2 by a fixed factor 4 */
- imc->scname = kasprintf(GFP_KERNEL, "lm%x-scclk", id);
- clk = clk_register_fixed_factor(NULL, imc->scname, imc->vco2name,
- CLK_IGNORE_UNUSED, 1, 4);
- imc->scclk = clk;
- imc->clks[13] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00600", id);
- imc->clks[14] = clkdev_alloc(clk, NULL, "lm%x:00600", id);
-
- for (i = 0; i < ARRAY_SIZE(imc->clks); i++)
- clkdev_add(imc->clks[i]);
-}
-EXPORT_SYMBOL_GPL(integrator_impd1_clk_init);
-
-void integrator_impd1_clk_exit(unsigned int id)
-{
- int i;
- struct impd1_clk *imc;
-
- if (id > 3)
- return;
- imc = &impd1_clks[id];
-
- for (i = 0; i < ARRAY_SIZE(imc->clks); i++)
- clkdev_drop(imc->clks[i]);
- clk_unregister(imc->spiclk);
- clk_unregister(imc->uartclk);
- clk_unregister(imc->vco2clk);
- clk_unregister(imc->vco1clk);
- clk_unregister(imc->pclk);
- kfree(imc->scname);
- kfree(imc->spiname);
- kfree(imc->uartname);
- kfree(imc->vco2name);
- kfree(imc->vco1name);
- kfree(imc->pclkname);
-}
-EXPORT_SYMBOL_GPL(integrator_impd1_clk_exit);
-
static int integrator_impd1_clk_spawn(struct device *dev,
struct device_node *parent,
struct device_node *np)
diff --git a/drivers/clk/versatile/clk-versatile.c b/drivers/clk/versatile/clk-versatile.c
index fd54d5c0251c..8ed7a179f651 100644
--- a/drivers/clk/versatile/clk-versatile.c
+++ b/drivers/clk/versatile/clk-versatile.c
@@ -56,7 +56,7 @@ static const struct clk_icst_desc versatile_auxosc_desc __initconst = {
static void __init cm_osc_setup(struct device_node *np,
const struct clk_icst_desc *desc)
{
- struct clk *clk = ERR_PTR(-EINVAL);
+ struct clk *clk;
const char *clk_name = np->name;
const char *parent_name;
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index 7ade146a3ea9..b2b32fa2d7c3 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -7,6 +7,7 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -65,8 +66,8 @@ static int vexpress_osc_probe(struct platform_device *pdev)
{
struct clk_init_data init;
struct vexpress_osc *osc;
- struct clk *clk;
u32 range[2];
+ int ret;
osc = devm_kzalloc(&pdev->dev, sizeof(*osc), GFP_KERNEL);
if (!osc)
@@ -92,11 +93,11 @@ static int vexpress_osc_probe(struct platform_device *pdev)
osc->hw.init = &init;
- clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ret = devm_clk_hw_register(&pdev->dev, &osc->hw);
+ if (ret < 0)
+ return ret;
- of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get, clk);
+ devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get, &osc->hw);
clk_hw_set_rate_range(&osc->hw, osc->rate_min, osc->rate_max);
dev_dbg(&pdev->dev, "Registered clock '%s'\n", init.name);
@@ -108,6 +109,7 @@ static const struct of_device_id vexpress_osc_of_match[] = {
{ .compatible = "arm,vexpress-osc", },
{}
};
+MODULE_DEVICE_TABLE(of, vexpress_osc_of_match);
static struct platform_driver vexpress_osc_driver = {
.driver = {
@@ -116,9 +118,5 @@ static struct platform_driver vexpress_osc_driver = {
},
.probe = vexpress_osc_probe,
};
-
-static int __init vexpress_osc_init(void)
-{
- return platform_driver_register(&vexpress_osc_driver);
-}
-core_initcall(vexpress_osc_init);
+module_platform_driver(vexpress_osc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/x86/Kconfig b/drivers/clk/x86/Kconfig
new file mode 100644
index 000000000000..69642e15fcc1
--- /dev/null
+++ b/drivers/clk/x86/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CLK_LGM_CGU
+ depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST)
+ select OF_EARLY_FLATTREE
+ bool "Clock driver for Lightning Mountain(LGM) platform"
+ help
+ Clock Generation Unit(CGU) driver for Intel Lightning Mountain(LGM)
+ network processor SoC.
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
index e3ec81e2a1c2..7c774ea7ddeb 100644
--- a/drivers/clk/x86/Makefile
+++ b/drivers/clk/x86/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_PMC_ATOM) += clk-pmc-atom.o
obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE) += clk-st.o
clk-x86-lpss-objs := clk-lpt.o
obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o
+obj-$(CONFIG_CLK_LGM_CGU) += clk-cgu.o clk-cgu-pll.o clk-lgm.o
diff --git a/drivers/clk/x86/clk-cgu-pll.c b/drivers/clk/x86/clk-cgu-pll.c
new file mode 100644
index 000000000000..c03cc6b85b9f
--- /dev/null
+++ b/drivers/clk/x86/clk-cgu-pll.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation.
+ * Zhu YiXin <yixin.zhu@intel.com>
+ * Rahul Tanwar <rahul.tanwar@intel.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+
+#include "clk-cgu.h"
+
+#define to_lgm_clk_pll(_hw) container_of(_hw, struct lgm_clk_pll, hw)
+#define PLL_REF_DIV(x) ((x) + 0x08)
+
+/*
+ * Calculate formula:
+ * rate = (prate * mult + (prate * frac) / frac_div) / div
+ */
+static unsigned long
+lgm_pll_calc_rate(unsigned long prate, unsigned int mult,
+ unsigned int div, unsigned int frac, unsigned int frac_div)
+{
+ u64 crate, frate, rate64;
+
+ rate64 = prate;
+ crate = rate64 * mult;
+ frate = rate64 * frac;
+ do_div(frate, frac_div);
+ crate += frate;
+ do_div(crate, div);
+
+ return crate;
+}
+
+static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+ unsigned int div, mult, frac;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12);
+ div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6);
+ frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24);
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ if (pll->type == TYPE_LJPLL)
+ div *= 4;
+
+ return lgm_pll_calc_rate(prate, mult, div, frac, BIT(24));
+}
+
+static int lgm_pll_is_enabled(struct clk_hw *hw)
+{
+ struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1);
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ return ret;
+}
+
+static int lgm_pll_enable(struct clk_hw *hw)
+{
+ struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
+ ret = readl_poll_timeout_atomic(pll->membase + pll->reg,
+ val, (val & 0x1), 1, 100);
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ return ret;
+}
+
+static void lgm_pll_disable(struct clk_hw *hw)
+{
+ struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0);
+ spin_unlock_irqrestore(&pll->lock, flags);
+}
+
+static const struct clk_ops lgm_pll_ops = {
+ .recalc_rate = lgm_pll_recalc_rate,
+ .is_enabled = lgm_pll_is_enabled,
+ .enable = lgm_pll_enable,
+ .disable = lgm_pll_disable,
+};
+
+static struct clk_hw *
+lgm_clk_register_pll(struct lgm_clk_provider *ctx,
+ const struct lgm_pll_clk_data *list)
+{
+ struct clk_init_data init = {};
+ struct lgm_clk_pll *pll;
+ struct device *dev = ctx->dev;
+ struct clk_hw *hw;
+ int ret;
+
+ init.ops = &lgm_pll_ops;
+ init.name = list->name;
+ init.flags = list->flags;
+ init.parent_data = list->parent_data;
+ init.num_parents = list->num_parents;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->membase = ctx->membase;
+ pll->lock = ctx->lock;
+ pll->reg = list->reg;
+ pll->flags = list->flags;
+ pll->type = list->type;
+ pll->hw.init = &init;
+
+ hw = &pll->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return hw;
+}
+
+int lgm_clk_register_plls(struct lgm_clk_provider *ctx,
+ const struct lgm_pll_clk_data *list,
+ unsigned int nr_clk)
+{
+ struct clk_hw *hw;
+ int i;
+
+ for (i = 0; i < nr_clk; i++, list++) {
+ hw = lgm_clk_register_pll(ctx, list);
+ if (IS_ERR(hw)) {
+ dev_err(ctx->dev, "failed to register pll: %s\n",
+ list->name);
+ return PTR_ERR(hw);
+ }
+ ctx->clk_data.hws[list->id] = hw;
+ }
+
+ return 0;
+}
diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
new file mode 100644
index 000000000000..56af0e04ec1e
--- /dev/null
+++ b/drivers/clk/x86/clk-cgu.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation.
+ * Zhu YiXin <yixin.zhu@intel.com>
+ * Rahul Tanwar <rahul.tanwar@intel.com>
+ */
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/of.h>
+
+#include "clk-cgu.h"
+
+#define GATE_HW_REG_STAT(reg) ((reg) + 0x0)
+#define GATE_HW_REG_EN(reg) ((reg) + 0x4)
+#define GATE_HW_REG_DIS(reg) ((reg) + 0x8)
+#define MAX_DDIV_REG 8
+#define MAX_DIVIDER_VAL 64
+
+#define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw)
+#define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw)
+#define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw)
+#define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw)
+
+static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+{
+ unsigned long flags;
+
+ if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
+ spin_lock_irqsave(&ctx->lock, flags);
+ lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
+ list->div_width, list->div_val);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ }
+
+ return clk_hw_register_fixed_rate(NULL, list->name,
+ list->parent_data[0].name,
+ list->flags, list->mux_flags);
+}
+
+static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&mux->lock, flags);
+ if (mux->flags & MUX_CLK_SW)
+ val = mux->reg;
+ else
+ val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
+ mux->width);
+ spin_unlock_irqrestore(&mux->lock, flags);
+ return clk_mux_val_to_index(hw, NULL, mux->flags, val);
+}
+
+static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
+ unsigned long flags;
+ u32 val;
+
+ val = clk_mux_index_to_val(NULL, mux->flags, index);
+ spin_lock_irqsave(&mux->lock, flags);
+ if (mux->flags & MUX_CLK_SW)
+ mux->reg = val;
+ else
+ lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
+ mux->width, val);
+ spin_unlock_irqrestore(&mux->lock, flags);
+
+ return 0;
+}
+
+static int lgm_clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
+
+ return clk_mux_determine_rate_flags(hw, req, mux->flags);
+}
+
+static const struct clk_ops lgm_clk_mux_ops = {
+ .get_parent = lgm_clk_mux_get_parent,
+ .set_parent = lgm_clk_mux_set_parent,
+ .determine_rate = lgm_clk_mux_determine_rate,
+};
+
+static struct clk_hw *
+lgm_clk_register_mux(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+{
+ unsigned long flags, cflags = list->mux_flags;
+ struct device *dev = ctx->dev;
+ u8 shift = list->mux_shift;
+ u8 width = list->mux_width;
+ struct clk_init_data init = {};
+ struct lgm_clk_mux *mux;
+ u32 reg = list->mux_off;
+ struct clk_hw *hw;
+ int ret;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = list->name;
+ init.ops = &lgm_clk_mux_ops;
+ init.flags = list->flags;
+ init.parent_data = list->parent_data;
+ init.num_parents = list->num_parents;
+
+ mux->membase = ctx->membase;
+ mux->lock = ctx->lock;
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->width = width;
+ mux->flags = cflags;
+ mux->hw.init = &init;
+
+ hw = &mux->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (cflags & CLOCK_FLAG_VAL_INIT) {
+ spin_lock_irqsave(&mux->lock, flags);
+ lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
+ spin_unlock_irqrestore(&mux->lock, flags);
+ }
+
+ return hw;
+}
+
+static unsigned long
+lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&divider->lock, flags);
+ val = lgm_get_clk_val(divider->membase, divider->reg,
+ divider->shift, divider->width);
+ spin_unlock_irqrestore(&divider->lock, flags);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static long
+lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
+
+ return divider_round_rate(hw, rate, prate, divider->table,
+ divider->width, divider->flags);
+}
+
+static int
+lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
+ unsigned long flags;
+ int value;
+
+ value = divider_get_val(rate, prate, divider->table,
+ divider->width, divider->flags);
+ if (value < 0)
+ return value;
+
+ spin_lock_irqsave(&divider->lock, flags);
+ lgm_set_clk_val(divider->membase, divider->reg,
+ divider->shift, divider->width, value);
+ spin_unlock_irqrestore(&divider->lock, flags);
+
+ return 0;
+}
+
+static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
+{
+ struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
+ div->width_gate, enable);
+ spin_unlock_irqrestore(&div->lock, flags);
+ return 0;
+}
+
+static int lgm_clk_divider_enable(struct clk_hw *hw)
+{
+ return lgm_clk_divider_enable_disable(hw, 1);
+}
+
+static void lgm_clk_divider_disable(struct clk_hw *hw)
+{
+ lgm_clk_divider_enable_disable(hw, 0);
+}
+
+static const struct clk_ops lgm_clk_divider_ops = {
+ .recalc_rate = lgm_clk_divider_recalc_rate,
+ .round_rate = lgm_clk_divider_round_rate,
+ .set_rate = lgm_clk_divider_set_rate,
+ .enable = lgm_clk_divider_enable,
+ .disable = lgm_clk_divider_disable,
+};
+
+static struct clk_hw *
+lgm_clk_register_divider(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+{
+ unsigned long flags, cflags = list->div_flags;
+ struct device *dev = ctx->dev;
+ struct lgm_clk_divider *div;
+ struct clk_init_data init = {};
+ u8 shift = list->div_shift;
+ u8 width = list->div_width;
+ u8 shift_gate = list->div_shift_gate;
+ u8 width_gate = list->div_width_gate;
+ u32 reg = list->div_off;
+ struct clk_hw *hw;
+ int ret;
+
+ div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = list->name;
+ init.ops = &lgm_clk_divider_ops;
+ init.flags = list->flags;
+ init.parent_data = list->parent_data;
+ init.num_parents = 1;
+
+ div->membase = ctx->membase;
+ div->lock = ctx->lock;
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->shift_gate = shift_gate;
+ div->width_gate = width_gate;
+ div->flags = cflags;
+ div->table = list->div_table;
+ div->hw.init = &init;
+
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (cflags & CLOCK_FLAG_VAL_INIT) {
+ spin_lock_irqsave(&div->lock, flags);
+ lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
+ spin_unlock_irqrestore(&div->lock, flags);
+ }
+
+ return hw;
+}
+
+static struct clk_hw *
+lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+{
+ unsigned long flags;
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
+ list->parent_data[0].name, list->flags,
+ list->mult, list->div);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+
+ if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
+ spin_lock_irqsave(&ctx->lock, flags);
+ lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
+ list->div_width, list->div_val);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ }
+
+ return hw;
+}
+
+static int lgm_clk_gate_enable(struct clk_hw *hw)
+{
+ struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
+ unsigned long flags;
+ unsigned int reg;
+
+ spin_lock_irqsave(&gate->lock, flags);
+ reg = GATE_HW_REG_EN(gate->reg);
+ lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
+ spin_unlock_irqrestore(&gate->lock, flags);
+
+ return 0;
+}
+
+static void lgm_clk_gate_disable(struct clk_hw *hw)
+{
+ struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
+ unsigned long flags;
+ unsigned int reg;
+
+ spin_lock_irqsave(&gate->lock, flags);
+ reg = GATE_HW_REG_DIS(gate->reg);
+ lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
+ spin_unlock_irqrestore(&gate->lock, flags);
+}
+
+static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
+ unsigned int reg, ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gate->lock, flags);
+ reg = GATE_HW_REG_STAT(gate->reg);
+ ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
+ spin_unlock_irqrestore(&gate->lock, flags);
+
+ return ret;
+}
+
+static const struct clk_ops lgm_clk_gate_ops = {
+ .enable = lgm_clk_gate_enable,
+ .disable = lgm_clk_gate_disable,
+ .is_enabled = lgm_clk_gate_is_enabled,
+};
+
+static struct clk_hw *
+lgm_clk_register_gate(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+{
+ unsigned long flags, cflags = list->gate_flags;
+ const char *pname = list->parent_data[0].name;
+ struct device *dev = ctx->dev;
+ u8 shift = list->gate_shift;
+ struct clk_init_data init = {};
+ struct lgm_clk_gate *gate;
+ u32 reg = list->gate_off;
+ struct clk_hw *hw;
+ int ret;
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = list->name;
+ init.ops = &lgm_clk_gate_ops;
+ init.flags = list->flags;
+ init.parent_names = pname ? &pname : NULL;
+ init.num_parents = pname ? 1 : 0;
+
+ gate->membase = ctx->membase;
+ gate->lock = ctx->lock;
+ gate->reg = reg;
+ gate->shift = shift;
+ gate->flags = cflags;
+ gate->hw.init = &init;
+
+ hw = &gate->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (cflags & CLOCK_FLAG_VAL_INIT) {
+ spin_lock_irqsave(&gate->lock, flags);
+ lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
+ spin_unlock_irqrestore(&gate->lock, flags);
+ }
+
+ return hw;
+}
+
+int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list,
+ unsigned int nr_clk)
+{
+ struct clk_hw *hw;
+ unsigned int idx;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ switch (list->type) {
+ case CLK_TYPE_FIXED:
+ hw = lgm_clk_register_fixed(ctx, list);
+ break;
+ case CLK_TYPE_MUX:
+ hw = lgm_clk_register_mux(ctx, list);
+ break;
+ case CLK_TYPE_DIVIDER:
+ hw = lgm_clk_register_divider(ctx, list);
+ break;
+ case CLK_TYPE_FIXED_FACTOR:
+ hw = lgm_clk_register_fixed_factor(ctx, list);
+ break;
+ case CLK_TYPE_GATE:
+ hw = lgm_clk_register_gate(ctx, list);
+ break;
+ default:
+ dev_err(ctx->dev, "invalid clk type\n");
+ return -EINVAL;
+ }
+
+ if (IS_ERR(hw)) {
+ dev_err(ctx->dev,
+ "register clk: %s, type: %u failed!\n",
+ list->name, list->type);
+ return -EIO;
+ }
+ ctx->clk_data.hws[list->id] = hw;
+ }
+
+ return 0;
+}
+
+static unsigned long
+lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ unsigned int div0, div1, exdiv;
+ unsigned long flags;
+ u64 prate;
+
+ spin_lock_irqsave(&ddiv->lock, flags);
+ div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
+ ddiv->shift0, ddiv->width0) + 1;
+ div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
+ ddiv->shift1, ddiv->width1) + 1;
+ exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
+ ddiv->shift2, ddiv->width2);
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+
+ prate = (u64)parent_rate;
+ do_div(prate, div0);
+ do_div(prate, div1);
+
+ if (exdiv) {
+ do_div(prate, ddiv->div);
+ prate *= ddiv->mult;
+ }
+
+ return prate;
+}
+
+static int lgm_clk_ddiv_enable(struct clk_hw *hw)
+{
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ddiv->lock, flags);
+ lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
+ ddiv->width_gate, 1);
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+ return 0;
+}
+
+static void lgm_clk_ddiv_disable(struct clk_hw *hw)
+{
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ddiv->lock, flags);
+ lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
+ ddiv->width_gate, 0);
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+}
+
+static int
+lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2)
+{
+ u32 idx, temp;
+
+ *ddiv1 = 1;
+ *ddiv2 = 1;
+
+ if (div > MAX_DIVIDER_VAL)
+ div = MAX_DIVIDER_VAL;
+
+ if (div > 1) {
+ for (idx = 2; idx <= MAX_DDIV_REG; idx++) {
+ temp = DIV_ROUND_UP_ULL((u64)div, idx);
+ if (div % idx == 0 && temp <= MAX_DDIV_REG)
+ break;
+ }
+
+ if (idx > MAX_DDIV_REG)
+ return -EINVAL;
+
+ *ddiv1 = temp;
+ *ddiv2 = idx;
+ }
+
+ return 0;
+}
+
+static int
+lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ u32 div, ddiv1, ddiv2;
+ unsigned long flags;
+
+ div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
+
+ spin_lock_irqsave(&ddiv->lock, flags);
+ if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
+ div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
+ div = div * 2;
+ }
+
+ if (div <= 0) {
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+ return -EINVAL;
+ }
+
+ if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) {
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+ return -EINVAL;
+ }
+
+ lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
+ ddiv1 - 1);
+
+ lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1,
+ ddiv2 - 1);
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+
+ return 0;
+}
+
+static long
+lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ u32 div, ddiv1, ddiv2;
+ unsigned long flags;
+ u64 rate64;
+
+ div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
+
+ /* if predivide bit is enabled, modify div by factor of 2.5 */
+ spin_lock_irqsave(&ddiv->lock, flags);
+ if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
+ div = div * 2;
+ div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
+ }
+
+ if (div <= 0) {
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+ return *prate;
+ }
+
+ if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) {
+ if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) {
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+ return -EINVAL;
+ }
+ }
+
+ rate64 = *prate;
+ do_div(rate64, ddiv1);
+ do_div(rate64, ddiv2);
+
+ /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
+ if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
+ rate64 = rate64 * 2;
+ rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
+ }
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+
+ return rate64;
+}
+
+static const struct clk_ops lgm_clk_ddiv_ops = {
+ .recalc_rate = lgm_clk_ddiv_recalc_rate,
+ .enable = lgm_clk_ddiv_enable,
+ .disable = lgm_clk_ddiv_disable,
+ .set_rate = lgm_clk_ddiv_set_rate,
+ .round_rate = lgm_clk_ddiv_round_rate,
+};
+
+int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_ddiv_data *list,
+ unsigned int nr_clk)
+{
+ struct device *dev = ctx->dev;
+ struct clk_init_data init = {};
+ struct lgm_clk_ddiv *ddiv;
+ struct clk_hw *hw;
+ unsigned int idx;
+ int ret;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ ddiv = NULL;
+ ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL);
+ if (!ddiv)
+ return -ENOMEM;
+
+ memset(&init, 0, sizeof(init));
+ init.name = list->name;
+ init.ops = &lgm_clk_ddiv_ops;
+ init.flags = list->flags;
+ init.parent_data = list->parent_data;
+ init.num_parents = 1;
+
+ ddiv->membase = ctx->membase;
+ ddiv->lock = ctx->lock;
+ ddiv->reg = list->reg;
+ ddiv->shift0 = list->shift0;
+ ddiv->width0 = list->width0;
+ ddiv->shift1 = list->shift1;
+ ddiv->width1 = list->width1;
+ ddiv->shift_gate = list->shift_gate;
+ ddiv->width_gate = list->width_gate;
+ ddiv->shift2 = list->ex_shift;
+ ddiv->width2 = list->ex_width;
+ ddiv->flags = list->div_flags;
+ ddiv->mult = 2;
+ ddiv->div = 5;
+ ddiv->hw.init = &init;
+
+ hw = &ddiv->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "register clk: %s failed!\n", list->name);
+ return ret;
+ }
+ ctx->clk_data.hws[list->id] = hw;
+ }
+
+ return 0;
+}
diff --git a/drivers/clk/x86/clk-cgu.h b/drivers/clk/x86/clk-cgu.h
new file mode 100644
index 000000000000..4e22bfb22312
--- /dev/null
+++ b/drivers/clk/x86/clk-cgu.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ * Zhu YiXin <yixin.zhu@intel.com>
+ * Rahul Tanwar <rahul.tanwar@intel.com>
+ */
+
+#ifndef __CLK_CGU_H
+#define __CLK_CGU_H
+
+#include <linux/io.h>
+
+struct lgm_clk_mux {
+ struct clk_hw hw;
+ void __iomem *membase;
+ unsigned int reg;
+ u8 shift;
+ u8 width;
+ unsigned long flags;
+ spinlock_t lock;
+};
+
+struct lgm_clk_divider {
+ struct clk_hw hw;
+ void __iomem *membase;
+ unsigned int reg;
+ u8 shift;
+ u8 width;
+ u8 shift_gate;
+ u8 width_gate;
+ unsigned long flags;
+ const struct clk_div_table *table;
+ spinlock_t lock;
+};
+
+struct lgm_clk_ddiv {
+ struct clk_hw hw;
+ void __iomem *membase;
+ unsigned int reg;
+ u8 shift0;
+ u8 width0;
+ u8 shift1;
+ u8 width1;
+ u8 shift2;
+ u8 width2;
+ u8 shift_gate;
+ u8 width_gate;
+ unsigned int mult;
+ unsigned int div;
+ unsigned long flags;
+ spinlock_t lock;
+};
+
+struct lgm_clk_gate {
+ struct clk_hw hw;
+ void __iomem *membase;
+ unsigned int reg;
+ u8 shift;
+ unsigned long flags;
+ spinlock_t lock;
+};
+
+enum lgm_clk_type {
+ CLK_TYPE_FIXED,
+ CLK_TYPE_MUX,
+ CLK_TYPE_DIVIDER,
+ CLK_TYPE_FIXED_FACTOR,
+ CLK_TYPE_GATE,
+ CLK_TYPE_NONE,
+};
+
+/**
+ * struct lgm_clk_provider
+ * @membase: IO mem base address for CGU.
+ * @np: device node
+ * @dev: device
+ * @clk_data: array of hw clocks and clk number.
+ */
+struct lgm_clk_provider {
+ void __iomem *membase;
+ struct device_node *np;
+ struct device *dev;
+ struct clk_hw_onecell_data clk_data;
+ spinlock_t lock;
+};
+
+enum pll_type {
+ TYPE_ROPLL,
+ TYPE_LJPLL,
+ TYPE_NONE,
+};
+
+struct lgm_clk_pll {
+ struct clk_hw hw;
+ void __iomem *membase;
+ unsigned int reg;
+ unsigned long flags;
+ enum pll_type type;
+ spinlock_t lock;
+};
+
+/**
+ * struct lgm_pll_clk_data
+ * @id: platform specific id of the clock.
+ * @name: name of this pll clock.
+ * @parent_data: parent clock data.
+ * @num_parents: number of parents.
+ * @flags: optional flags for basic clock.
+ * @type: platform type of pll.
+ * @reg: offset of the register.
+ */
+struct lgm_pll_clk_data {
+ unsigned int id;
+ const char *name;
+ const struct clk_parent_data *parent_data;
+ u8 num_parents;
+ unsigned long flags;
+ enum pll_type type;
+ int reg;
+};
+
+#define LGM_PLL(_id, _name, _pdata, _flags, \
+ _reg, _type) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_data = _pdata, \
+ .num_parents = ARRAY_SIZE(_pdata), \
+ .flags = _flags, \
+ .reg = _reg, \
+ .type = _type, \
+ }
+
+struct lgm_clk_ddiv_data {
+ unsigned int id;
+ const char *name;
+ const struct clk_parent_data *parent_data;
+ u8 flags;
+ unsigned long div_flags;
+ unsigned int reg;
+ u8 shift0;
+ u8 width0;
+ u8 shift1;
+ u8 width1;
+ u8 shift_gate;
+ u8 width_gate;
+ u8 ex_shift;
+ u8 ex_width;
+};
+
+#define LGM_DDIV(_id, _name, _pname, _flags, _reg, \
+ _shft0, _wdth0, _shft1, _wdth1, \
+ _shft_gate, _wdth_gate, _xshft, _df) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = _pname, \
+ .name = _pname, \
+ }, \
+ .flags = _flags, \
+ .reg = _reg, \
+ .shift0 = _shft0, \
+ .width0 = _wdth0, \
+ .shift1 = _shft1, \
+ .width1 = _wdth1, \
+ .shift_gate = _shft_gate, \
+ .width_gate = _wdth_gate, \
+ .ex_shift = _xshft, \
+ .ex_width = 1, \
+ .div_flags = _df, \
+ }
+
+struct lgm_clk_branch {
+ unsigned int id;
+ enum lgm_clk_type type;
+ const char *name;
+ const struct clk_parent_data *parent_data;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned int mux_off;
+ u8 mux_shift;
+ u8 mux_width;
+ unsigned long mux_flags;
+ unsigned int mux_val;
+ unsigned int div_off;
+ u8 div_shift;
+ u8 div_width;
+ u8 div_shift_gate;
+ u8 div_width_gate;
+ unsigned long div_flags;
+ unsigned int div_val;
+ const struct clk_div_table *div_table;
+ unsigned int gate_off;
+ u8 gate_shift;
+ unsigned long gate_flags;
+ unsigned int gate_val;
+ unsigned int mult;
+ unsigned int div;
+};
+
+/* clock flags definition */
+#define CLOCK_FLAG_VAL_INIT BIT(16)
+#define MUX_CLK_SW BIT(17)
+
+#define LGM_MUX(_id, _name, _pdata, _f, _reg, \
+ _shift, _width, _cf, _v) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_MUX, \
+ .name = _name, \
+ .parent_data = _pdata, \
+ .num_parents = ARRAY_SIZE(_pdata), \
+ .flags = _f, \
+ .mux_off = _reg, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .mux_flags = _cf, \
+ .mux_val = _v, \
+ }
+
+#define LGM_DIV(_id, _name, _pname, _f, _reg, _shift, _width, \
+ _shift_gate, _width_gate, _cf, _v, _dtable) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_DIVIDER, \
+ .name = _name, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = _pname, \
+ .name = _pname, \
+ }, \
+ .num_parents = 1, \
+ .flags = _f, \
+ .div_off = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+ .div_shift_gate = _shift_gate, \
+ .div_width_gate = _width_gate, \
+ .div_flags = _cf, \
+ .div_val = _v, \
+ .div_table = _dtable, \
+ }
+
+#define LGM_GATE(_id, _name, _pname, _f, _reg, \
+ _shift, _cf, _v) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_GATE, \
+ .name = _name, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = _pname, \
+ .name = _pname, \
+ }, \
+ .num_parents = !_pname ? 0 : 1, \
+ .flags = _f, \
+ .gate_off = _reg, \
+ .gate_shift = _shift, \
+ .gate_flags = _cf, \
+ .gate_val = _v, \
+ }
+
+#define LGM_FIXED(_id, _name, _pname, _f, _reg, \
+ _shift, _width, _cf, _freq, _v) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_FIXED, \
+ .name = _name, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = _pname, \
+ .name = _pname, \
+ }, \
+ .num_parents = !_pname ? 0 : 1, \
+ .flags = _f, \
+ .div_off = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+ .div_flags = _cf, \
+ .div_val = _v, \
+ .mux_flags = _freq, \
+ }
+
+#define LGM_FIXED_FACTOR(_id, _name, _pname, _f, _reg, \
+ _shift, _width, _cf, _v, _m, _d) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_FIXED_FACTOR, \
+ .name = _name, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = _pname, \
+ .name = _pname, \
+ }, \
+ .num_parents = 1, \
+ .flags = _f, \
+ .div_off = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+ .div_flags = _cf, \
+ .div_val = _v, \
+ .mult = _m, \
+ .div = _d, \
+ }
+
+static inline void lgm_set_clk_val(void __iomem *membase, u32 reg,
+ u8 shift, u8 width, u32 set_val)
+{
+ u32 mask = (GENMASK(width - 1, 0) << shift);
+ u32 regval;
+
+ regval = readl(membase + reg);
+ regval = (regval & ~mask) | ((set_val << shift) & mask);
+ writel(regval, membase + reg);
+}
+
+static inline u32 lgm_get_clk_val(void __iomem *membase, u32 reg,
+ u8 shift, u8 width)
+{
+ u32 mask = (GENMASK(width - 1, 0) << shift);
+ u32 val;
+
+ val = readl(membase + reg);
+ val = (val & mask) >> shift;
+
+ return val;
+}
+
+int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list,
+ unsigned int nr_clk);
+int lgm_clk_register_plls(struct lgm_clk_provider *ctx,
+ const struct lgm_pll_clk_data *list,
+ unsigned int nr_clk);
+int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_ddiv_data *list,
+ unsigned int nr_clk);
+#endif /* __CLK_CGU_H */
diff --git a/drivers/clk/x86/clk-lgm.c b/drivers/clk/x86/clk-lgm.c
new file mode 100644
index 000000000000..020f4e83a5cc
--- /dev/null
+++ b/drivers/clk/x86/clk-lgm.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation.
+ * Zhu YiXin <yixin.zhu@intel.com>
+ * Rahul Tanwar <rahul.tanwar@intel.com>
+ */
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/intel,lgm-clk.h>
+#include "clk-cgu.h"
+
+#define PLL_DIV_WIDTH 4
+#define PLL_DDIV_WIDTH 3
+
+/* Gate0 clock shift */
+#define G_C55_SHIFT 7
+#define G_QSPI_SHIFT 9
+#define G_EIP197_SHIFT 11
+#define G_VAULT130_SHIFT 12
+#define G_TOE_SHIFT 13
+#define G_SDXC_SHIFT 14
+#define G_EMMC_SHIFT 15
+#define G_SPIDBG_SHIFT 17
+#define G_DMA3_SHIFT 28
+
+/* Gate1 clock shift */
+#define G_DMA0_SHIFT 0
+#define G_LEDC0_SHIFT 1
+#define G_LEDC1_SHIFT 2
+#define G_I2S0_SHIFT 3
+#define G_I2S1_SHIFT 4
+#define G_EBU_SHIFT 5
+#define G_PWM_SHIFT 6
+#define G_I2C0_SHIFT 7
+#define G_I2C1_SHIFT 8
+#define G_I2C2_SHIFT 9
+#define G_I2C3_SHIFT 10
+
+#define G_SSC0_SHIFT 12
+#define G_SSC1_SHIFT 13
+#define G_SSC2_SHIFT 14
+#define G_SSC3_SHIFT 15
+
+#define G_GPTC0_SHIFT 17
+#define G_GPTC1_SHIFT 18
+#define G_GPTC2_SHIFT 19
+#define G_GPTC3_SHIFT 20
+
+#define G_ASC0_SHIFT 22
+#define G_ASC1_SHIFT 23
+#define G_ASC2_SHIFT 24
+#define G_ASC3_SHIFT 25
+
+#define G_PCM0_SHIFT 27
+#define G_PCM1_SHIFT 28
+#define G_PCM2_SHIFT 29
+
+/* Gate2 clock shift */
+#define G_PCIE10_SHIFT 1
+#define G_PCIE11_SHIFT 2
+#define G_PCIE30_SHIFT 3
+#define G_PCIE31_SHIFT 4
+#define G_PCIE20_SHIFT 5
+#define G_PCIE21_SHIFT 6
+#define G_PCIE40_SHIFT 7
+#define G_PCIE41_SHIFT 8
+
+#define G_XPCS0_SHIFT 10
+#define G_XPCS1_SHIFT 11
+#define G_XPCS2_SHIFT 12
+#define G_XPCS3_SHIFT 13
+#define G_SATA0_SHIFT 14
+#define G_SATA1_SHIFT 15
+#define G_SATA2_SHIFT 16
+#define G_SATA3_SHIFT 17
+
+/* Gate3 clock shift */
+#define G_ARCEM4_SHIFT 0
+#define G_IDMAR1_SHIFT 2
+#define G_IDMAT0_SHIFT 3
+#define G_IDMAT1_SHIFT 4
+#define G_IDMAT2_SHIFT 5
+
+#define G_PPV4_SHIFT 8
+#define G_GSWIPO_SHIFT 9
+#define G_CQEM_SHIFT 10
+#define G_XPCS5_SHIFT 14
+#define G_USB1_SHIFT 25
+#define G_USB2_SHIFT 26
+
+
+/* Register definition */
+#define CGU_PLL0CZ_CFG0 0x000
+#define CGU_PLL0CM0_CFG0 0x020
+#define CGU_PLL0CM1_CFG0 0x040
+#define CGU_PLL0B_CFG0 0x060
+#define CGU_PLL1_CFG0 0x080
+#define CGU_PLL2_CFG0 0x0A0
+#define CGU_PLLPP_CFG0 0x0C0
+#define CGU_LJPLL3_CFG0 0x0E0
+#define CGU_LJPLL4_CFG0 0x100
+#define CGU_C55_PCMCR 0x18C
+#define CGU_PCMCR 0x190
+#define CGU_IF_CLK1 0x1A0
+#define CGU_IF_CLK2 0x1A4
+#define CGU_GATE0 0x300
+#define CGU_GATE1 0x310
+#define CGU_GATE2 0x320
+#define CGU_GATE3 0x310
+
+#define PLL_DIV(x) ((x) + 0x04)
+#define PLL_SSC(x) ((x) + 0x10)
+
+#define CLK_NR_CLKS (LGM_GCLK_USB2 + 1)
+
+/*
+ * Below table defines the pair's of regval & effective dividers.
+ * It's more efficient to provide an explicit table due to non-linear
+ * relation between values.
+ */
+static const struct clk_div_table pll_div[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 3 },
+ { .val = 3, .div = 4 },
+ { .val = 4, .div = 5 },
+ { .val = 5, .div = 6 },
+ { .val = 6, .div = 8 },
+ { .val = 7, .div = 10 },
+ { .val = 8, .div = 12 },
+ { .val = 9, .div = 16 },
+ { .val = 10, .div = 20 },
+ { .val = 11, .div = 24 },
+ { .val = 12, .div = 32 },
+ { .val = 13, .div = 40 },
+ { .val = 14, .div = 48 },
+ { .val = 15, .div = 64 },
+ {}
+};
+
+static const struct clk_div_table dcl_div[] = {
+ { .val = 0, .div = 6 },
+ { .val = 1, .div = 12 },
+ { .val = 2, .div = 24 },
+ { .val = 3, .div = 32 },
+ { .val = 4, .div = 48 },
+ { .val = 5, .div = 96 },
+ {}
+};
+
+static const struct clk_parent_data pll_p[] = {
+ { .fw_name = "osc", .name = "osc" },
+};
+static const struct clk_parent_data pllcm_p[] = {
+ { .fw_name = "cpu_cm", .name = "cpu_cm" },
+};
+static const struct clk_parent_data emmc_p[] = {
+ { .fw_name = "emmc4", .name = "emmc4" },
+ { .fw_name = "noc4", .name = "noc4" },
+};
+static const struct clk_parent_data sdxc_p[] = {
+ { .fw_name = "sdxc3", .name = "sdxc3" },
+ { .fw_name = "sdxc2", .name = "sdxc2" },
+};
+static const struct clk_parent_data pcm_p[] = {
+ { .fw_name = "v_docsis", .name = "v_docsis" },
+ { .fw_name = "dcl", .name = "dcl" },
+};
+static const struct clk_parent_data cbphy_p[] = {
+ { .fw_name = "dd_serdes", .name = "dd_serdes" },
+ { .fw_name = "dd_pcie", .name = "dd_pcie" },
+};
+
+static const struct lgm_pll_clk_data lgm_pll_clks[] = {
+ LGM_PLL(LGM_CLK_PLL0CZ, "pll0cz", pll_p, CLK_IGNORE_UNUSED,
+ CGU_PLL0CZ_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_PLL0CM0, "pll0cm0", pllcm_p, CLK_IGNORE_UNUSED,
+ CGU_PLL0CM0_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_PLL0CM1, "pll0cm1", pllcm_p, CLK_IGNORE_UNUSED,
+ CGU_PLL0CM1_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_PLL0B, "pll0b", pll_p, CLK_IGNORE_UNUSED,
+ CGU_PLL0B_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_PLL1, "pll1", pll_p, 0, CGU_PLL1_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_PLL2, "pll2", pll_p, CLK_IGNORE_UNUSED,
+ CGU_PLL2_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_PLLPP, "pllpp", pll_p, 0, CGU_PLLPP_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_LJPLL3, "ljpll3", pll_p, 0, CGU_LJPLL3_CFG0, TYPE_LJPLL),
+ LGM_PLL(LGM_CLK_LJPLL4, "ljpll4", pll_p, 0, CGU_LJPLL4_CFG0, TYPE_LJPLL),
+};
+
+static const struct lgm_clk_branch lgm_branch_clks[] = {
+ LGM_DIV(LGM_CLK_PP_HW, "pp_hw", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0),
+ 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_PP_UC, "pp_uc", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0),
+ 4, PLL_DIV_WIDTH, 25, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_PP_FXD, "pp_fxd", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0),
+ 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_PP_TBM, "pp_tbm", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0),
+ 12, PLL_DIV_WIDTH, 27, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_DDR, "ddr", "pll2", CLK_IGNORE_UNUSED,
+ PLL_DIV(CGU_PLL2_CFG0), 0, PLL_DIV_WIDTH, 24, 1, 0, 0,
+ pll_div),
+ LGM_DIV(LGM_CLK_CM, "cpu_cm", "pll0cz", 0, PLL_DIV(CGU_PLL0CZ_CFG0),
+ 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div),
+
+ LGM_DIV(LGM_CLK_IC, "cpu_ic", "pll0cz", CLK_IGNORE_UNUSED,
+ PLL_DIV(CGU_PLL0CZ_CFG0), 4, PLL_DIV_WIDTH, 25,
+ 1, 0, 0, pll_div),
+
+ LGM_DIV(LGM_CLK_SDXC3, "sdxc3", "pll0cz", 0, PLL_DIV(CGU_PLL0CZ_CFG0),
+ 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div),
+
+ LGM_DIV(LGM_CLK_CPU0, "cm0", "pll0cm0",
+ CLK_IGNORE_UNUSED, PLL_DIV(CGU_PLL0CM0_CFG0),
+ 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_CPU1, "cm1", "pll0cm1",
+ CLK_IGNORE_UNUSED, PLL_DIV(CGU_PLL0CM1_CFG0),
+ 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div),
+
+ /*
+ * Marking ngi_clk (next generation interconnect) and noc_clk
+ * (network on chip peripheral clk) as critical clocks because
+ * these are shared parent clock sources for many different
+ * peripherals.
+ */
+ LGM_DIV(LGM_CLK_NGI, "ngi", "pll0b",
+ (CLK_IGNORE_UNUSED|CLK_IS_CRITICAL), PLL_DIV(CGU_PLL0B_CFG0),
+ 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_NOC4, "noc4", "pll0b",
+ (CLK_IGNORE_UNUSED|CLK_IS_CRITICAL), PLL_DIV(CGU_PLL0B_CFG0),
+ 4, PLL_DIV_WIDTH, 25, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_SW, "switch", "pll0b", 0, PLL_DIV(CGU_PLL0B_CFG0),
+ 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_QSPI, "qspi", "pll0b", 0, PLL_DIV(CGU_PLL0B_CFG0),
+ 12, PLL_DIV_WIDTH, 27, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_CT, "v_ct", "pll1", 0, PLL_DIV(CGU_PLL1_CFG0),
+ 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_DSP, "v_dsp", "pll1", 0, PLL_DIV(CGU_PLL1_CFG0),
+ 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_VIF, "v_ifclk", "pll1", 0, PLL_DIV(CGU_PLL1_CFG0),
+ 12, PLL_DIV_WIDTH, 27, 1, 0, 0, pll_div),
+
+ LGM_FIXED_FACTOR(LGM_CLK_EMMC4, "emmc4", "sdxc3", 0, 0,
+ 0, 0, 0, 0, 1, 4),
+ LGM_FIXED_FACTOR(LGM_CLK_SDXC2, "sdxc2", "noc4", 0, 0,
+ 0, 0, 0, 0, 1, 4),
+ LGM_MUX(LGM_CLK_EMMC, "emmc", emmc_p, 0, CGU_IF_CLK1,
+ 0, 1, CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_MUX(LGM_CLK_SDXC, "sdxc", sdxc_p, 0, CGU_IF_CLK1,
+ 1, 1, CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_FIXED(LGM_CLK_OSC, "osc", NULL, 0, 0, 0, 0, 0, 40000000, 0),
+ LGM_FIXED(LGM_CLK_SLIC, "slic", NULL, 0, CGU_IF_CLK1,
+ 8, 2, CLOCK_FLAG_VAL_INIT, 8192000, 2),
+ LGM_FIXED(LGM_CLK_DOCSIS, "v_docsis", NULL, 0, 0, 0, 0, 0, 16000000, 0),
+ LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", 0, CGU_PCMCR,
+ 25, 3, 0, 0, 0, 0, dcl_div),
+ LGM_MUX(LGM_CLK_PCM, "pcm", pcm_p, 0, CGU_C55_PCMCR,
+ 0, 1, CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_FIXED_FACTOR(LGM_CLK_DDR_PHY, "ddr_phy", "ddr",
+ CLK_IGNORE_UNUSED, 0,
+ 0, 0, 0, 0, 2, 1),
+ LGM_FIXED_FACTOR(LGM_CLK_PONDEF, "pondef", "dd_pool",
+ CLK_SET_RATE_PARENT, 0, 0, 0, 0, 0, 1, 2),
+ LGM_MUX(LGM_CLK_CBPHY0, "cbphy0", cbphy_p, 0, 0,
+ 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_MUX(LGM_CLK_CBPHY1, "cbphy1", cbphy_p, 0, 0,
+ 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_MUX(LGM_CLK_CBPHY2, "cbphy2", cbphy_p, 0, 0,
+ 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_MUX(LGM_CLK_CBPHY3, "cbphy3", cbphy_p, 0, 0,
+ 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0),
+
+ LGM_GATE(LGM_GCLK_C55, "g_c55", NULL, 0, CGU_GATE0,
+ G_C55_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_QSPI, "g_qspi", "qspi", 0, CGU_GATE0,
+ G_QSPI_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_EIP197, "g_eip197", NULL, 0, CGU_GATE0,
+ G_EIP197_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_VAULT, "g_vault130", NULL, 0, CGU_GATE0,
+ G_VAULT130_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_TOE, "g_toe", NULL, 0, CGU_GATE0,
+ G_TOE_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SDXC, "g_sdxc", "sdxc", 0, CGU_GATE0,
+ G_SDXC_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_EMMC, "g_emmc", "emmc", 0, CGU_GATE0,
+ G_EMMC_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SPI_DBG, "g_spidbg", NULL, 0, CGU_GATE0,
+ G_SPIDBG_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_DMA3, "g_dma3", NULL, 0, CGU_GATE0,
+ G_DMA3_SHIFT, 0, 0),
+
+ LGM_GATE(LGM_GCLK_DMA0, "g_dma0", NULL, 0, CGU_GATE1,
+ G_DMA0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_LEDC0, "g_ledc0", NULL, 0, CGU_GATE1,
+ G_LEDC0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_LEDC1, "g_ledc1", NULL, 0, CGU_GATE1,
+ G_LEDC1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_I2S0, "g_i2s0", NULL, 0, CGU_GATE1,
+ G_I2S0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_I2S1, "g_i2s1", NULL, 0, CGU_GATE1,
+ G_I2S1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_EBU, "g_ebu", NULL, 0, CGU_GATE1,
+ G_EBU_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PWM, "g_pwm", NULL, 0, CGU_GATE1,
+ G_PWM_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_I2C0, "g_i2c0", NULL, 0, CGU_GATE1,
+ G_I2C0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_I2C1, "g_i2c1", NULL, 0, CGU_GATE1,
+ G_I2C1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_I2C2, "g_i2c2", NULL, 0, CGU_GATE1,
+ G_I2C2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_I2C3, "g_i2c3", NULL, 0, CGU_GATE1,
+ G_I2C3_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SSC0, "g_ssc0", "noc4", 0, CGU_GATE1,
+ G_SSC0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SSC1, "g_ssc1", "noc4", 0, CGU_GATE1,
+ G_SSC1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SSC2, "g_ssc2", "noc4", 0, CGU_GATE1,
+ G_SSC2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SSC3, "g_ssc3", "noc4", 0, CGU_GATE1,
+ G_SSC3_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_GPTC0, "g_gptc0", "noc4", 0, CGU_GATE1,
+ G_GPTC0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_GPTC1, "g_gptc1", "noc4", 0, CGU_GATE1,
+ G_GPTC1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_GPTC2, "g_gptc2", "noc4", 0, CGU_GATE1,
+ G_GPTC2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_GPTC3, "g_gptc3", "osc", 0, CGU_GATE1,
+ G_GPTC3_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_ASC0, "g_asc0", "noc4", 0, CGU_GATE1,
+ G_ASC0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_ASC1, "g_asc1", "noc4", 0, CGU_GATE1,
+ G_ASC1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_ASC2, "g_asc2", "noc4", 0, CGU_GATE1,
+ G_ASC2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_ASC3, "g_asc3", "osc", 0, CGU_GATE1,
+ G_ASC3_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCM0, "g_pcm0", NULL, 0, CGU_GATE1,
+ G_PCM0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCM1, "g_pcm1", NULL, 0, CGU_GATE1,
+ G_PCM1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCM2, "g_pcm2", NULL, 0, CGU_GATE1,
+ G_PCM2_SHIFT, 0, 0),
+
+ LGM_GATE(LGM_GCLK_PCIE10, "g_pcie10", NULL, 0, CGU_GATE2,
+ G_PCIE10_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE11, "g_pcie11", NULL, 0, CGU_GATE2,
+ G_PCIE11_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE30, "g_pcie30", NULL, 0, CGU_GATE2,
+ G_PCIE30_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE31, "g_pcie31", NULL, 0, CGU_GATE2,
+ G_PCIE31_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE20, "g_pcie20", NULL, 0, CGU_GATE2,
+ G_PCIE20_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE21, "g_pcie21", NULL, 0, CGU_GATE2,
+ G_PCIE21_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE40, "g_pcie40", NULL, 0, CGU_GATE2,
+ G_PCIE40_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE41, "g_pcie41", NULL, 0, CGU_GATE2,
+ G_PCIE41_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_XPCS0, "g_xpcs0", NULL, 0, CGU_GATE2,
+ G_XPCS0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_XPCS1, "g_xpcs1", NULL, 0, CGU_GATE2,
+ G_XPCS1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_XPCS2, "g_xpcs2", NULL, 0, CGU_GATE2,
+ G_XPCS2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_XPCS3, "g_xpcs3", NULL, 0, CGU_GATE2,
+ G_XPCS3_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SATA0, "g_sata0", NULL, 0, CGU_GATE2,
+ G_SATA0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SATA1, "g_sata1", NULL, 0, CGU_GATE2,
+ G_SATA1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SATA2, "g_sata2", NULL, 0, CGU_GATE2,
+ G_SATA2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SATA3, "g_sata3", NULL, 0, CGU_GATE2,
+ G_SATA3_SHIFT, 0, 0),
+
+ LGM_GATE(LGM_GCLK_ARCEM4, "g_arcem4", NULL, 0, CGU_GATE3,
+ G_ARCEM4_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_IDMAR1, "g_idmar1", NULL, 0, CGU_GATE3,
+ G_IDMAR1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_IDMAT0, "g_idmat0", NULL, 0, CGU_GATE3,
+ G_IDMAT0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_IDMAT1, "g_idmat1", NULL, 0, CGU_GATE3,
+ G_IDMAT1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_IDMAT2, "g_idmat2", NULL, 0, CGU_GATE3,
+ G_IDMAT2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PPV4, "g_ppv4", NULL, 0, CGU_GATE3,
+ G_PPV4_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_GSWIPO, "g_gswipo", "switch", 0, CGU_GATE3,
+ G_GSWIPO_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_CQEM, "g_cqem", "switch", 0, CGU_GATE3,
+ G_CQEM_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_XPCS5, "g_xpcs5", NULL, 0, CGU_GATE3,
+ G_XPCS5_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_USB1, "g_usb1", NULL, 0, CGU_GATE3,
+ G_USB1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_USB2, "g_usb2", NULL, 0, CGU_GATE3,
+ G_USB2_SHIFT, 0, 0),
+};
+
+
+static const struct lgm_clk_ddiv_data lgm_ddiv_clks[] = {
+ LGM_DDIV(LGM_CLK_CML, "dd_cml", "ljpll3", 0,
+ PLL_DIV(CGU_LJPLL3_CFG0), 0, PLL_DDIV_WIDTH,
+ 3, PLL_DDIV_WIDTH, 24, 1, 29, 0),
+ LGM_DDIV(LGM_CLK_SERDES, "dd_serdes", "ljpll3", 0,
+ PLL_DIV(CGU_LJPLL3_CFG0), 6, PLL_DDIV_WIDTH,
+ 9, PLL_DDIV_WIDTH, 25, 1, 28, 0),
+ LGM_DDIV(LGM_CLK_POOL, "dd_pool", "ljpll3", 0,
+ PLL_DIV(CGU_LJPLL3_CFG0), 12, PLL_DDIV_WIDTH,
+ 15, PLL_DDIV_WIDTH, 26, 1, 28, 0),
+ LGM_DDIV(LGM_CLK_PTP, "dd_ptp", "ljpll3", 0,
+ PLL_DIV(CGU_LJPLL3_CFG0), 18, PLL_DDIV_WIDTH,
+ 21, PLL_DDIV_WIDTH, 27, 1, 28, 0),
+ LGM_DDIV(LGM_CLK_PCIE, "dd_pcie", "ljpll4", 0,
+ PLL_DIV(CGU_LJPLL4_CFG0), 0, PLL_DDIV_WIDTH,
+ 3, PLL_DDIV_WIDTH, 24, 1, 29, 0),
+};
+
+static int lgm_cgu_probe(struct platform_device *pdev)
+{
+ struct lgm_clk_provider *ctx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ctx = devm_kzalloc(dev, struct_size(ctx, clk_data.hws, CLK_NR_CLKS),
+ GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->clk_data.num = CLK_NR_CLKS;
+
+ ctx->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->membase))
+ return PTR_ERR(ctx->membase);
+
+ ctx->np = np;
+ ctx->dev = dev;
+ spin_lock_init(&ctx->lock);
+
+ ret = lgm_clk_register_plls(ctx, lgm_pll_clks,
+ ARRAY_SIZE(lgm_pll_clks));
+ if (ret)
+ return ret;
+
+ ret = lgm_clk_register_branches(ctx, lgm_branch_clks,
+ ARRAY_SIZE(lgm_branch_clks));
+ if (ret)
+ return ret;
+
+ ret = lgm_clk_register_ddiv(ctx, lgm_ddiv_clks,
+ ARRAY_SIZE(lgm_ddiv_clks));
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &ctx->clk_data);
+}
+
+static const struct of_device_id of_lgm_cgu_match[] = {
+ { .compatible = "intel,cgu-lgm" },
+ {}
+};
+
+static struct platform_driver lgm_cgu_driver = {
+ .probe = lgm_cgu_probe,
+ .driver = {
+ .name = "cgu-lgm",
+ .of_match_table = of_lgm_cgu_match,
+ },
+};
+builtin_platform_driver(lgm_cgu_driver);
diff --git a/drivers/clk/zynqmp/clk-gate-zynqmp.c b/drivers/clk/zynqmp/clk-gate-zynqmp.c
index 83b236f20fff..10c9b889324f 100644
--- a/drivers/clk/zynqmp/clk-gate-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-gate-zynqmp.c
@@ -37,9 +37,8 @@ static int zynqmp_clk_gate_enable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = gate->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_enable(clk_id);
+ ret = zynqmp_pm_clock_enable(clk_id);
if (ret)
pr_warn_once("%s() clock enabled failed for %s, ret = %d\n",
@@ -58,9 +57,8 @@ static void zynqmp_clk_gate_disable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = gate->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_disable(clk_id);
+ ret = zynqmp_pm_clock_disable(clk_id);
if (ret)
pr_warn_once("%s() clock disable failed for %s, ret = %d\n",
@@ -79,9 +77,8 @@ static int zynqmp_clk_gate_is_enabled(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = gate->clk_id;
int state, ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getstate(clk_id, &state);
+ ret = zynqmp_pm_clock_getstate(clk_id, &state);
if (ret) {
pr_warn_once("%s() clock get state failed for %s, ret = %d\n",
__func__, clk_name, ret);
diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
index 0af8f74c5fa5..06194149be83 100644
--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
@@ -47,9 +47,8 @@ static u8 zynqmp_clk_mux_get_parent(struct clk_hw *hw)
u32 clk_id = mux->clk_id;
u32 val;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getparent(clk_id, &val);
+ ret = zynqmp_pm_clock_getparent(clk_id, &val);
if (ret)
pr_warn_once("%s() getparent failed for clock: %s, ret = %d\n",
@@ -71,9 +70,8 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = mux->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_setparent(clk_id, index);
+ ret = zynqmp_pm_clock_setparent(clk_id, index);
if (ret)
pr_warn_once("%s() set parent failed for clock: %s, ret = %d\n",
diff --git a/drivers/clk/zynqmp/clk-zynqmp.h b/drivers/clk/zynqmp/clk-zynqmp.h
index fec9a15c8786..5beeb41b29fa 100644
--- a/drivers/clk/zynqmp/clk-zynqmp.h
+++ b/drivers/clk/zynqmp/clk-zynqmp.h
@@ -30,6 +30,7 @@ struct clock_topology {
u32 type;
u32 flag;
u32 type_flag;
+ u8 custom_type_flag;
};
struct clk_hw *zynqmp_clk_register_pll(const char *name, u32 clk_id,
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index 10e89f23880b..db8d0d7161ce 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -84,6 +84,7 @@ struct name_resp {
struct topology_resp {
#define CLK_TOPOLOGY_TYPE GENMASK(3, 0)
+#define CLK_TOPOLOGY_CUSTOM_TYPE_FLAGS GENMASK(7, 4)
#define CLK_TOPOLOGY_FLAGS GENMASK(23, 8)
#define CLK_TOPOLOGY_TYPE_FLAGS GENMASK(31, 24)
u32 topology[CLK_GET_TOPOLOGY_RESP_WORDS];
@@ -134,7 +135,6 @@ static struct clk_hw *(* const clk_topology[]) (const char *name, u32 clk_id,
static struct zynqmp_clock *clock;
static struct clk_hw_onecell_data *zynqmp_data;
static unsigned int clock_max_idx;
-static const struct zynqmp_eemi_ops *eemi_ops;
/**
* zynqmp_is_valid_clock() - Check whether clock is valid or not
@@ -206,7 +206,7 @@ static int zynqmp_pm_clock_get_num_clocks(u32 *nclocks)
qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
*nclocks = ret_payload[1];
return ret;
@@ -231,7 +231,7 @@ static int zynqmp_pm_clock_get_name(u32 clock_id,
qdata.qid = PM_QID_CLOCK_GET_NAME;
qdata.arg1 = clock_id;
- eemi_ops->query_data(qdata, ret_payload);
+ zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, ret_payload, sizeof(*response));
return 0;
@@ -265,7 +265,7 @@ static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index,
qdata.arg1 = clock_id;
qdata.arg2 = index;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
@@ -296,7 +296,7 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id,
qdata.qid = PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS;
qdata.arg1 = clk_id;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
if (ret)
return ERR_PTR(ret);
@@ -339,7 +339,7 @@ static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index,
qdata.arg1 = clock_id;
qdata.arg2 = index;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
@@ -364,7 +364,7 @@ static int zynqmp_pm_clock_get_attributes(u32 clock_id,
qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
qdata.arg1 = clock_id;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
@@ -396,6 +396,9 @@ static int __zynqmp_clock_get_topology(struct clock_topology *topology,
topology[*nnodes].type_flag =
FIELD_GET(CLK_TOPOLOGY_TYPE_FLAGS,
response->topology[i]);
+ topology[*nnodes].custom_type_flag =
+ FIELD_GET(CLK_TOPOLOGY_CUSTOM_TYPE_FLAGS,
+ response->topology[i]);
(*nnodes)++;
}
@@ -558,7 +561,7 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name,
{
int j;
u32 num_nodes, clk_dev_id;
- char *clk_out = NULL;
+ char *clk_out[MAX_NODES];
struct clock_topology *nodes;
struct clk_hw *hw = NULL;
@@ -572,16 +575,16 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name,
* Intermediate clock names are postfixed with type of clock.
*/
if (j != (num_nodes - 1)) {
- clk_out = kasprintf(GFP_KERNEL, "%s%s", clk_name,
+ clk_out[j] = kasprintf(GFP_KERNEL, "%s%s", clk_name,
clk_type_postfix[nodes[j].type]);
} else {
- clk_out = kasprintf(GFP_KERNEL, "%s", clk_name);
+ clk_out[j] = kasprintf(GFP_KERNEL, "%s", clk_name);
}
if (!clk_topology[nodes[j].type])
continue;
- hw = (*clk_topology[nodes[j].type])(clk_out, clk_dev_id,
+ hw = (*clk_topology[nodes[j].type])(clk_out[j], clk_dev_id,
parent_names,
num_parents,
&nodes[j]);
@@ -590,9 +593,12 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name,
__func__, clk_dev_id, clk_name,
PTR_ERR(hw));
- parent_names[0] = clk_out;
+ parent_names[0] = clk_out[j];
}
- kfree(clk_out);
+
+ for (j = 0; j < num_nodes; j++)
+ kfree(clk_out[j]);
+
return hw;
}
@@ -663,6 +669,11 @@ static void zynqmp_get_clock_info(void)
continue;
clock[i].valid = FIELD_GET(CLK_ATTR_VALID, attr.attr[0]);
+ /* skip query for Invalid clock */
+ ret = zynqmp_is_valid_clock(i);
+ if (ret != CLK_ATTR_VALID)
+ continue;
+
clock[i].type = FIELD_GET(CLK_ATTR_TYPE, attr.attr[0]) ?
CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
@@ -738,10 +749,6 @@ static int zynqmp_clock_probe(struct platform_device *pdev)
int ret;
struct device *dev = &pdev->dev;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
ret = zynqmp_clk_setup(dev->of_node);
return ret;
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 4be2cc76aa2e..66da02b83d39 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -25,7 +25,8 @@
#define to_zynqmp_clk_divider(_hw) \
container_of(_hw, struct zynqmp_clk_divider, hw)
-#define CLK_FRAC BIT(13) /* has a fractional parent */
+#define CLK_FRAC BIT(13) /* has a fractional parent */
+#define CUSTOM_FLAG_CLK_FRAC BIT(0) /* has a fractional parent in custom type flag */
/**
* struct zynqmp_clk_divider - adjustable divider clock
@@ -83,9 +84,8 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
u32 div_type = divider->div_type;
u32 div, value;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getdivider(clk_id, &div);
+ ret = zynqmp_pm_clock_getdivider(clk_id, &div);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
@@ -111,23 +111,30 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
static void zynqmp_get_divider2_val(struct clk_hw *hw,
unsigned long rate,
- unsigned long parent_rate,
struct zynqmp_clk_divider *divider,
int *bestdiv)
{
int div1;
int div2;
long error = LONG_MAX;
- struct clk_hw *parent_hw = clk_hw_get_parent(hw);
- struct zynqmp_clk_divider *pdivider = to_zynqmp_clk_divider(parent_hw);
+ unsigned long div1_prate;
+ struct clk_hw *div1_parent_hw;
+ struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw);
+ struct zynqmp_clk_divider *pdivider =
+ to_zynqmp_clk_divider(div2_parent_hw);
if (!pdivider)
return;
+ div1_parent_hw = clk_hw_get_parent(div2_parent_hw);
+ if (!div1_parent_hw)
+ return;
+
+ div1_prate = clk_hw_get_rate(div1_parent_hw);
*bestdiv = 1;
for (div1 = 1; div1 <= pdivider->max_div;) {
for (div2 = 1; div2 <= divider->max_div;) {
- long new_error = ((parent_rate / div1) / div2) - rate;
+ long new_error = ((div1_prate / div1) / div2) - rate;
if (abs(new_error) < abs(error)) {
*bestdiv = div2;
@@ -163,11 +170,10 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
u32 div_type = divider->div_type;
u32 bestdiv;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- ret = eemi_ops->clock_getdivider(clk_id, &bestdiv);
+ ret = zynqmp_pm_clock_getdivider(clk_id, &bestdiv);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
@@ -192,11 +198,13 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
*/
if (div_type == TYPE_DIV2 &&
(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
- zynqmp_get_divider2_val(hw, rate, *prate, divider, &bestdiv);
+ zynqmp_get_divider2_val(hw, rate, divider, &bestdiv);
}
if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
bestdiv = rate % *prate ? 1 : bestdiv;
+
+ bestdiv = min_t(u32, bestdiv, divider->max_div);
*prate = rate * bestdiv;
return rate;
@@ -219,7 +227,6 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
u32 div_type = divider->div_type;
u32 value, div;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
value = zynqmp_divider_get_val(parent_rate, rate, divider->flags);
if (div_type == TYPE_DIV1) {
@@ -233,7 +240,7 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
div = __ffs(div);
- ret = eemi_ops->clock_setdivider(clk_id, div);
+ ret = zynqmp_pm_clock_setdivider(clk_id, div);
if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
@@ -256,9 +263,8 @@ static const struct clk_ops zynqmp_clk_divider_ops = {
* Return: Maximum divisor of a clock if query data is successful
* U16_MAX in case of query data is not success
*/
-u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
+static u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -266,7 +272,7 @@ u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR;
qdata.arg1 = clk_id;
qdata.arg2 = type;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
/*
* To maintain backward compatibility return maximum possible value
* (0xFFFF) if query for max divisor is not successful.
@@ -311,7 +317,8 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
init.num_parents = 1;
/* struct clk_divider assignments */
- div->is_frac = !!(nodes->flag & CLK_FRAC);
+ div->is_frac = !!((nodes->flag & CLK_FRAC) |
+ (nodes->custom_type_flag & CUSTOM_FLAG_CLK_FRAC));
div->flags = nodes->type_flag;
div->hw.init = &init;
div->clk_id = clk_id;
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index 89b599530105..92f449ed38e5 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -50,10 +50,8 @@ static inline enum pll_mode zynqmp_pll_get_mode(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->ioctl(0, IOCTL_GET_PLL_FRAC_MODE, clk_id, 0,
- ret_payload);
+ ret = zynqmp_pm_get_pll_frac_mode(clk_id, ret_payload);
if (ret)
pr_warn_once("%s() PLL get frac mode failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -73,14 +71,13 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
const char *clk_name = clk_hw_get_name(hw);
int ret;
u32 mode;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (on)
mode = PLL_MODE_FRAC;
else
mode = PLL_MODE_INT;
- ret = eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_MODE, clk_id, mode, NULL);
+ ret = zynqmp_pm_set_pll_frac_mode(clk_id, mode);
if (ret)
pr_warn_once("%s() PLL set frac mode failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -139,17 +136,15 @@ static unsigned long zynqmp_pll_recalc_rate(struct clk_hw *hw,
unsigned long rate, frac;
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getdivider(clk_id, &fbdiv);
+ ret = zynqmp_pm_clock_getdivider(clk_id, &fbdiv);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
rate = parent_rate * fbdiv;
if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
- eemi_ops->ioctl(0, IOCTL_GET_PLL_FRAC_DATA, clk_id, 0,
- ret_payload);
+ zynqmp_pm_get_pll_frac_data(clk_id, ret_payload);
data = ret_payload[1];
frac = (parent_rate * data) / FRAC_DIV;
rate = rate + frac;
@@ -177,7 +172,6 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
u32 fbdiv;
long rate_div, frac, m, f;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
rate_div = (rate * FRAC_DIV) / parent_rate;
@@ -187,21 +181,21 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
rate = parent_rate * m;
frac = (parent_rate * f) / FRAC_DIV;
- ret = eemi_ops->clock_setdivider(clk_id, m);
+ ret = zynqmp_pm_clock_setdivider(clk_id, m);
if (ret == -EUSERS)
WARN(1, "More than allowed devices are using the %s, which is forbidden\n",
clk_name);
else if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
- eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_DATA, clk_id, f, NULL);
+ zynqmp_pm_set_pll_frac_data(clk_id, f);
return rate + frac;
}
fbdiv = DIV_ROUND_CLOSEST(rate, parent_rate);
fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
- ret = eemi_ops->clock_setdivider(clk_id, fbdiv);
+ ret = zynqmp_pm_clock_setdivider(clk_id, fbdiv);
if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -222,9 +216,8 @@ static int zynqmp_pll_is_enabled(struct clk_hw *hw)
u32 clk_id = clk->clk_id;
unsigned int state;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getstate(clk_id, &state);
+ ret = zynqmp_pm_clock_getstate(clk_id, &state);
if (ret) {
pr_warn_once("%s() clock get state failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -246,12 +239,11 @@ static int zynqmp_pll_enable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = clk->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (zynqmp_pll_is_enabled(hw))
return 0;
- ret = eemi_ops->clock_enable(clk_id);
+ ret = zynqmp_pm_clock_enable(clk_id);
if (ret)
pr_warn_once("%s() clock enable failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -269,12 +261,11 @@ static void zynqmp_pll_disable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = clk->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (!zynqmp_pll_is_enabled(hw))
return;
- ret = eemi_ops->clock_disable(clk_id);
+ ret = zynqmp_pm_clock_disable(clk_id);
if (ret)
pr_warn_once("%s() clock disable failed for %s, ret = %d\n",
__func__, clk_name, ret);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index f2142e6bbea3..91418381fcd4 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -120,7 +120,6 @@ config OWL_TIMER
config RDA_TIMER
bool "RDA timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
select TIMER_OF
help
@@ -562,16 +561,16 @@ config CLKSRC_VERSATILE
bool "ARM Versatile (Express) reference platforms clock source" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
select TIMER_OF
- default y if MFD_VEXPRESS_SYSREG
+ default y if (ARCH_VEXPRESS || ARCH_VERSATILE) && ARM
help
This option enables clock source based on free running
counter available in the "System Registers" block of
- ARM Versatile, RealView and Versatile Express reference
- platforms.
+ ARM Versatile and Versatile Express reference platforms.
config CLKSRC_MIPS_GIC
bool
depends on MIPS_GIC
+ select CLOCKSOURCE_WATCHDOG
select TIMER_OF
config CLKSRC_TANGO_XTAL
@@ -709,6 +708,7 @@ config MICROCHIP_PIT64B
bool "Microchip PIT64B support"
depends on OF || COMPILE_TEST
select CLKSRC_MMIO
+ select TIMER_OF
help
This option enables Microchip PIT64B timer for Atmel
based system. It supports the oneshot, the periodic
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 641ba5383ab5..bdda1a2e4097 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
+obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm-systimer.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index b29b5a75333e..de93dd1a8c7b 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -334,10 +334,8 @@ static int __init arc_clockevent_setup(struct device_node *node)
}
ret = arc_get_timer_clk(node);
- if (ret) {
- pr_err("clockevent: missing clk\n");
+ if (ret)
return ret;
- }
/* Needs apriori irq_set_percpu_devid() done in intc map function */
ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 2204a444e801..ecf7b7db2d05 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1588,10 +1588,8 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
arch_timers_present |= ARCH_TIMER_TYPE_CP15;
ret = acpi_gtdt_init(table, &platform_timer_count);
- if (ret) {
- pr_err("Failed to init GTDT table.\n");
+ if (ret)
return ret;
- }
arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index b207a77b0831..f5f24a95ee82 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -222,7 +222,8 @@ static int apbt_next_event(unsigned long delta,
/**
* dw_apb_clockevent_init() - use an APB timer as a clock_event_device
*
- * @cpu: The CPU the events will be targeted at.
+ * @cpu: The CPU the events will be targeted at or -1 if CPU affiliation
+ * isn't required.
* @name: The name used for the timer and the IRQ for it.
* @rating: The rating to give the timer.
* @base: I/O base for the timer registers.
@@ -257,7 +258,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
dw_ced->ced.max_delta_ticks = 0x7fffffff;
dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced);
dw_ced->ced.min_delta_ticks = 5000;
- dw_ced->ced.cpumask = cpumask_of(cpu);
+ dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu);
dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
dw_ced->ced.set_state_shutdown = apbt_shutdown;
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 8c28b127759f..ab3ddebe8344 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -73,7 +73,7 @@ static void __init add_clockevent(struct device_node *event_timer)
timer_get_base_and_rate(event_timer, &iobase, &rate);
- ced = dw_apb_clockevent_init(0, event_timer->name, 300, iobase, irq,
+ ced = dw_apb_clockevent_init(-1, event_timer->name, 300, iobase, irq,
rate);
if (!ced)
panic("Unable to initialise clockevent device");
@@ -147,10 +147,6 @@ static int num_called;
static int __init dw_apb_timer_init(struct device_node *timer)
{
switch (num_called) {
- case 0:
- pr_debug("%s: found clockevent timer\n", __func__);
- add_clockevent(timer);
- break;
case 1:
pr_debug("%s: found clocksource timer\n", __func__);
add_clocksource(timer);
@@ -161,6 +157,8 @@ static int __init dw_apb_timer_init(struct device_node *timer)
#endif
break;
default:
+ pr_debug("%s: found clockevent timer\n", __func__);
+ add_clockevent(timer);
break;
}
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 8b5f8ae723cb..be4175f415ba 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -16,6 +16,7 @@
#include <linux/notifier.h>
#include <linux/of_irq.h>
#include <linux/percpu.h>
+#include <linux/sched_clock.h>
#include <linux/smp.h>
#include <linux/time.h>
#include <asm/mips-cps.h>
@@ -23,14 +24,14 @@
static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
static int gic_timer_irq;
static unsigned int gic_frequency;
+static bool __read_mostly gic_clock_unstable;
-static u64 notrace gic_read_count(void)
+static void gic_clocksource_unstable(char *reason);
+
+static u64 notrace gic_read_count_2x32(void)
{
unsigned int hi, hi2, lo;
- if (mips_cm_is64)
- return read_gic_counter();
-
do {
hi = read_gic_counter_32h();
lo = read_gic_counter_32l();
@@ -40,6 +41,19 @@ static u64 notrace gic_read_count(void)
return (((u64) hi) << 32) + lo;
}
+static u64 notrace gic_read_count_64(void)
+{
+ return read_gic_counter();
+}
+
+static u64 notrace gic_read_count(void)
+{
+ if (mips_cm_is64)
+ return gic_read_count_64();
+
+ return gic_read_count_2x32();
+}
+
static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
{
int cpu = cpumask_first(evt->cpumask);
@@ -114,8 +128,10 @@ static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
{
struct clk_notifier_data *cnd = data;
- if (action == POST_RATE_CHANGE)
+ if (action == POST_RATE_CHANGE) {
+ gic_clocksource_unstable("ref clock rate change");
on_each_cpu(gic_update_frequency, (void *)cnd->new_rate, 1);
+ }
return NOTIFY_OK;
}
@@ -161,6 +177,18 @@ static struct clocksource gic_clocksource = {
.vdso_clock_mode = VDSO_CLOCKMODE_GIC,
};
+static void gic_clocksource_unstable(char *reason)
+{
+ if (gic_clock_unstable)
+ return;
+
+ gic_clock_unstable = true;
+
+ pr_info("GIC timer is unstable due to %s\n", reason);
+
+ clocksource_mark_unstable(&gic_clocksource);
+}
+
static int __init __gic_clocksource_init(void)
{
unsigned int count_width;
@@ -228,6 +256,18 @@ static int __init gic_clocksource_of_init(struct device_node *node)
/* And finally start the counter */
clear_gic_config(GIC_CONFIG_COUNTSTOP);
+ /*
+ * It's safe to use the MIPS GIC timer as a sched clock source only if
+ * its ticks are stable, which is true on either the platforms with
+ * stable CPU frequency or on the platforms with CM3 and CPU frequency
+ * change performed by the CPC core clocks divider.
+ */
+ if (mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ)) {
+ sched_clock_register(mips_cm_is64 ?
+ gic_read_count_64 : gic_read_count_2x32,
+ 64, gic_frequency);
+ }
+
return 0;
}
TIMER_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
index ab0aabfae5f0..73e8aee445da 100644
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -139,7 +139,6 @@ static int
clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
{
u32 alm;
- int status = 0;
unsigned int val;
BUG_ON(delta < 2);
@@ -163,7 +162,7 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
alm += delta;
regmap_write(regmap_st, AT91_ST_RTAR, alm);
- return status;
+ return 0;
}
static struct clock_event_device clkevt = {
diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
index e421946a91c5..bb4eee31ae08 100644
--- a/drivers/clocksource/timer-davinci.c
+++ b/drivers/clocksource/timer-davinci.c
@@ -18,7 +18,7 @@
#include <clocksource/timer-davinci.h>
#undef pr_fmt
-#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+#define pr_fmt(fmt) "%s: " fmt, __func__
#define DAVINCI_TIMER_REG_TIM12 0x10
#define DAVINCI_TIMER_REG_TIM34 0x14
@@ -250,31 +250,29 @@ int __init davinci_timer_register(struct clk *clk,
rv = clk_prepare_enable(clk);
if (rv) {
- pr_err("Unable to prepare and enable the timer clock");
+ pr_err("Unable to prepare and enable the timer clock\n");
return rv;
}
if (!request_mem_region(timer_cfg->reg.start,
resource_size(&timer_cfg->reg),
"davinci-timer")) {
- pr_err("Unable to request memory region");
+ pr_err("Unable to request memory region\n");
return -EBUSY;
}
base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
if (!base) {
- pr_err("Unable to map the register range");
+ pr_err("Unable to map the register range\n");
return -ENOMEM;
}
davinci_timer_init(base);
tick_rate = clk_get_rate(clk);
- clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL | __GFP_NOFAIL);
- if (!clockevent) {
- pr_err("Error allocating memory for clockevent data");
+ clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
+ if (!clockevent)
return -ENOMEM;
- }
clockevent->dev.name = "tim12";
clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
@@ -298,7 +296,7 @@ int __init davinci_timer_register(struct clk *clk,
davinci_timer_irq_timer, IRQF_TIMER,
"clockevent/tim12", clockevent);
if (rv) {
- pr_err("Unable to request the clockevent interrupt");
+ pr_err("Unable to request the clockevent interrupt\n");
return rv;
}
@@ -325,7 +323,7 @@ int __init davinci_timer_register(struct clk *clk,
rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
if (rv) {
- pr_err("Unable to register clocksource");
+ pr_err("Unable to register clocksource\n");
return rv;
}
@@ -343,20 +341,20 @@ static int __init of_davinci_timer_register(struct device_node *np)
rv = of_address_to_resource(np, 0, &timer_cfg.reg);
if (rv) {
- pr_err("Unable to get the register range for timer");
+ pr_err("Unable to get the register range for timer\n");
return rv;
}
rv = of_irq_to_resource_table(np, timer_cfg.irq,
DAVINCI_TIMER_NUM_IRQS);
if (rv != DAVINCI_TIMER_NUM_IRQS) {
- pr_err("Unable to get the interrupts for timer");
+ pr_err("Unable to get the interrupts for timer\n");
return rv;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
- pr_err("Unable to get the timer clock");
+ pr_err("Unable to get the timer clock\n");
return PTR_ERR(clk);
}
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index 6334a35fdc2f..2cdc077a39f5 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -61,17 +61,19 @@ static inline void tpm_irq_acknowledge(void)
writel(TPM_STATUS_CH0F, timer_base + TPM_STATUS);
}
-static struct delay_timer tpm_delay_timer;
-
static inline unsigned long tpm_read_counter(void)
{
return readl(timer_base + TPM_CNT);
}
+#if defined(CONFIG_ARM)
+static struct delay_timer tpm_delay_timer;
+
static unsigned long tpm_read_current_timer(void)
{
return tpm_read_counter();
}
+#endif
static u64 notrace tpm_read_sched_clock(void)
{
@@ -144,9 +146,11 @@ static struct timer_of to_tpm = {
static int __init tpm_clocksource_init(void)
{
+#if defined(CONFIG_ARM)
tpm_delay_timer.read_current_timer = &tpm_read_current_timer;
tpm_delay_timer.freq = timer_of_rate(&to_tpm) >> 3;
register_current_timer_delay(&tpm_delay_timer);
+#endif
sched_clock_register(tpm_read_sched_clock, counter_width,
timer_of_rate(&to_tpm) >> 3);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index c4f15c4068c0..9de1dabfb126 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -12,8 +12,11 @@
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
#include <asm/smp.h>
#include <asm/sbi.h>
@@ -39,6 +42,7 @@ static int riscv_clock_next_event(unsigned long delta,
return 0;
}
+static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
@@ -74,30 +78,36 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
ce->cpumask = cpumask_of(cpu);
+ ce->irq = riscv_clock_event_irq;
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
- csr_set(CSR_IE, IE_TIE);
+ enable_percpu_irq(riscv_clock_event_irq,
+ irq_get_trigger_type(riscv_clock_event_irq));
return 0;
}
static int riscv_timer_dying_cpu(unsigned int cpu)
{
- csr_clear(CSR_IE, IE_TIE);
+ disable_percpu_irq(riscv_clock_event_irq);
return 0;
}
/* called directly from the low-level interrupt handler */
-void riscv_timer_interrupt(void)
+static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev);
+
+ return IRQ_HANDLED;
}
static int __init riscv_timer_init_dt(struct device_node *n)
{
int cpuid, hartid, error;
+ struct device_node *child;
+ struct irq_domain *domain;
hartid = riscv_of_processor_hartid(n);
if (hartid < 0) {
@@ -115,6 +125,25 @@ static int __init riscv_timer_init_dt(struct device_node *n)
if (cpuid != smp_processor_id())
return 0;
+ domain = NULL;
+ child = of_get_compatible_child(n, "riscv,cpu-intc");
+ if (!child) {
+ pr_err("Failed to find INTC node [%pOF]\n", n);
+ return -ENODEV;
+ }
+ domain = irq_find_host(child);
+ of_node_put(child);
+ if (!domain) {
+ pr_err("Failed to find IRQ domain for node [%pOF]\n", n);
+ return -ENODEV;
+ }
+
+ riscv_clock_event_irq = irq_create_mapping(domain, RV_IRQ_TIMER);
+ if (!riscv_clock_event_irq) {
+ pr_err("Failed to map timer interrupt for node [%pOF]\n", n);
+ return -ENODEV;
+ }
+
pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
__func__, cpuid, hartid);
error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
@@ -126,6 +155,14 @@ static int __init riscv_timer_init_dt(struct device_node *n)
sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
+ error = request_percpu_irq(riscv_clock_event_irq,
+ riscv_timer_interrupt,
+ "riscv-timer", &riscv_clock_event);
+ if (error) {
+ pr_err("registering percpu irq failed [%d]\n", error);
+ return error;
+ }
+
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
"clockevents/riscv/timer:starting",
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index abd5f158d6e2..ae12bbf3d68c 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -24,6 +24,7 @@
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
*/
+#include <linux/clk.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/sched_clock.h>
@@ -76,6 +77,49 @@ static u64 notrace omap_32k_read_sched_clock(void)
return ti_32k_read_cycles(&ti_32k_timer.cs);
}
+static void __init ti_32k_timer_enable_clock(struct device_node *np,
+ const char *name)
+{
+ struct clk *clock;
+ int error;
+
+ clock = of_clk_get_by_name(np->parent, name);
+ if (IS_ERR(clock)) {
+ /* Only some SoCs have a separate interface clock */
+ if (PTR_ERR(clock) == -EINVAL && !strncmp("ick", name, 3))
+ return;
+
+ pr_warn("%s: could not get clock %s %li\n",
+ __func__, name, PTR_ERR(clock));
+ return;
+ }
+
+ error = clk_prepare_enable(clock);
+ if (error) {
+ pr_warn("%s: could not enable %s: %i\n",
+ __func__, name, error);
+ return;
+ }
+}
+
+static void __init ti_32k_timer_module_init(struct device_node *np,
+ void __iomem *base)
+{
+ void __iomem *sysc = base + 4;
+
+ if (!of_device_is_compatible(np->parent, "ti,sysc"))
+ return;
+
+ ti_32k_timer_enable_clock(np, "fck");
+ ti_32k_timer_enable_clock(np, "ick");
+
+ /*
+ * Force idle module as wkup domain is active with MPU.
+ * No need to tag the module disabled for ti-sysc probe.
+ */
+ writel_relaxed(0, sysc);
+}
+
static int __init ti_32k_timer_init(struct device_node *np)
{
int ret;
@@ -90,6 +134,7 @@ static int __init ti_32k_timer_init(struct device_node *np)
ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
ti_32k_timer.counter = ti_32k_timer.base;
+ ti_32k_timer_module_init(np, ti_32k_timer.base);
/*
* 32k sync Counter IP register offsets vary between the highlander
@@ -104,6 +149,8 @@ static int __init ti_32k_timer_init(struct device_node *np)
else
ti_32k_timer.counter += OMAP2_32KSYNCNT_CR_OFF_LOW;
+ pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
+
ret = clocksource_register_hz(&ti_32k_timer.cs, 32768);
if (ret) {
pr_err("32k_counter: can't register clocksource\n");
@@ -111,7 +158,6 @@ static int __init ti_32k_timer_init(struct device_node *np)
}
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
- pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
return 0;
}
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
new file mode 100644
index 000000000000..6fd1f219a512
--- /dev/null
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+
+#include <linux/clk/clk-conf.h>
+
+#include <clocksource/timer-ti-dm.h>
+#include <dt-bindings/bus/ti-sysc.h>
+
+/* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */
+#define DMTIMER_TYPE1_ENABLE ((1 << 9) | (SYSC_IDLE_SMART << 3) | \
+ SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE)
+
+#define DMTIMER_TYPE2_ENABLE (SYSC_IDLE_SMART_WKUP << 2)
+#define DMTIMER_RESET_WAIT 100000
+
+#define DMTIMER_INST_DONT_CARE ~0U
+
+static int counter_32k;
+static u32 clocksource;
+static u32 clockevent;
+
+/*
+ * Subset of the timer registers we use. Note that the register offsets
+ * depend on the timer revision detected.
+ */
+struct dmtimer_systimer {
+ void __iomem *base;
+ u8 sysc;
+ u8 irq_stat;
+ u8 irq_ena;
+ u8 pend;
+ u8 load;
+ u8 counter;
+ u8 ctrl;
+ u8 wakeup;
+ u8 ifctrl;
+ unsigned long rate;
+};
+
+struct dmtimer_clockevent {
+ struct clock_event_device dev;
+ struct dmtimer_systimer t;
+ u32 period;
+};
+
+struct dmtimer_clocksource {
+ struct clocksource dev;
+ struct dmtimer_systimer t;
+ unsigned int loadval;
+};
+
+/* Assumes v1 ip if bits [31:16] are zero */
+static bool dmtimer_systimer_revision1(struct dmtimer_systimer *t)
+{
+ u32 tidr = readl_relaxed(t->base);
+
+ return !(tidr >> 16);
+}
+
+static int __init dmtimer_systimer_type1_reset(struct dmtimer_systimer *t)
+{
+ void __iomem *syss = t->base + OMAP_TIMER_V1_SYS_STAT_OFFSET;
+ int ret;
+ u32 l;
+
+ writel_relaxed(BIT(1) | BIT(2), t->base + t->ifctrl);
+ ret = readl_poll_timeout_atomic(syss, l, l & BIT(0), 100,
+ DMTIMER_RESET_WAIT);
+
+ return ret;
+}
+
+/* Note we must use io_base instead of func_base for type2 OCP regs */
+static int __init dmtimer_systimer_type2_reset(struct dmtimer_systimer *t)
+{
+ void __iomem *sysc = t->base + t->sysc;
+ u32 l;
+
+ l = readl_relaxed(sysc);
+ l |= BIT(0);
+ writel_relaxed(l, sysc);
+
+ return readl_poll_timeout_atomic(sysc, l, !(l & BIT(0)), 100,
+ DMTIMER_RESET_WAIT);
+}
+
+static int __init dmtimer_systimer_reset(struct dmtimer_systimer *t)
+{
+ int ret;
+
+ if (dmtimer_systimer_revision1(t))
+ ret = dmtimer_systimer_type1_reset(t);
+ else
+ ret = dmtimer_systimer_type2_reset(t);
+ if (ret < 0) {
+ pr_err("%s failed with %i\n", __func__, ret);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id counter_match_table[] = {
+ { .compatible = "ti,omap-counter32k" },
+ { /* Sentinel */ },
+};
+
+/*
+ * Check if the SoC als has a usable working 32 KiHz counter. The 32 KiHz
+ * counter is handled by timer-ti-32k, but we need to detect it as it
+ * affects the preferred dmtimer system timer configuration. There is
+ * typically no use for a dmtimer clocksource if the 32 KiHz counter is
+ * present, except on am437x as described below.
+ */
+static void __init dmtimer_systimer_check_counter32k(void)
+{
+ struct device_node *np;
+
+ if (counter_32k)
+ return;
+
+ np = of_find_matching_node(NULL, counter_match_table);
+ if (!np) {
+ counter_32k = -ENODEV;
+
+ return;
+ }
+
+ if (of_device_is_available(np))
+ counter_32k = 1;
+ else
+ counter_32k = -ENODEV;
+
+ of_node_put(np);
+}
+
+static const struct of_device_id dmtimer_match_table[] = {
+ { .compatible = "ti,omap2420-timer", },
+ { .compatible = "ti,omap3430-timer", },
+ { .compatible = "ti,omap4430-timer", },
+ { .compatible = "ti,omap5430-timer", },
+ { .compatible = "ti,am335x-timer", },
+ { .compatible = "ti,am335x-timer-1ms", },
+ { .compatible = "ti,dm814-timer", },
+ { .compatible = "ti,dm816-timer", },
+ { /* Sentinel */ },
+};
+
+/*
+ * Checks that system timers are configured to not reset and idle during
+ * the generic timer-ti-dm device driver probe. And that the system timer
+ * source clocks are properly configured. Also, let's not hog any DSP and
+ * PWM capable timers unnecessarily as system timers.
+ */
+static bool __init dmtimer_is_preferred(struct device_node *np)
+{
+ if (!of_device_is_available(np))
+ return false;
+
+ if (!of_property_read_bool(np->parent,
+ "ti,no-reset-on-init"))
+ return false;
+
+ if (!of_property_read_bool(np->parent, "ti,no-idle"))
+ return false;
+
+ /* Secure gptimer12 is always clocked with a fixed source */
+ if (!of_property_read_bool(np, "ti,timer-secure")) {
+ if (!of_property_read_bool(np, "assigned-clocks"))
+ return false;
+
+ if (!of_property_read_bool(np, "assigned-clock-parents"))
+ return false;
+ }
+
+ if (of_property_read_bool(np, "ti,timer-dsp"))
+ return false;
+
+ if (of_property_read_bool(np, "ti,timer-pwm"))
+ return false;
+
+ return true;
+}
+
+/*
+ * Finds the first available usable always-on timer, and assigns it to either
+ * clockevent or clocksource depending if the counter_32k is available on the
+ * SoC or not.
+ *
+ * Some omap3 boards with unreliable oscillator must not use the counter_32k
+ * or dmtimer1 with 32 KiHz source. Additionally, the boards with unreliable
+ * oscillator should really set counter_32k as disabled, and delete dmtimer1
+ * ti,always-on property, but let's not count on it. For these quirky cases,
+ * we prefer using the always-on secure dmtimer12 with the internal 32 KiHz
+ * clock as the clocksource, and any available dmtimer as clockevent.
+ *
+ * For am437x, we are using am335x style dmtimer clocksource. It is unclear
+ * if this quirk handling is really needed, but let's change it separately
+ * based on testing as it might cause side effects.
+ */
+static void __init dmtimer_systimer_assign_alwon(void)
+{
+ struct device_node *np;
+ u32 pa = 0;
+ bool quirk_unreliable_oscillator = false;
+
+ /* Quirk unreliable 32 KiHz oscillator with incomplete dts */
+ if (of_machine_is_compatible("ti,omap3-beagle") ||
+ of_machine_is_compatible("timll,omap3-devkit8000")) {
+ quirk_unreliable_oscillator = true;
+ counter_32k = -ENODEV;
+ }
+
+ /* Quirk am437x using am335x style dmtimer clocksource */
+ if (of_machine_is_compatible("ti,am43"))
+ counter_32k = -ENODEV;
+
+ for_each_matching_node(np, dmtimer_match_table) {
+ if (!dmtimer_is_preferred(np))
+ continue;
+
+ if (of_property_read_bool(np, "ti,timer-alwon")) {
+ const __be32 *addr;
+
+ addr = of_get_address(np, 0, NULL, NULL);
+ pa = of_translate_address(np, addr);
+ if (pa) {
+ /* Quirky omap3 boards must use dmtimer12 */
+ if (quirk_unreliable_oscillator &&
+ pa == 0x48318000)
+ continue;
+
+ of_node_put(np);
+ break;
+ }
+ }
+ }
+
+ /* Usually no need for dmtimer clocksource if we have counter32 */
+ if (counter_32k >= 0) {
+ clockevent = pa;
+ clocksource = 0;
+ } else {
+ clocksource = pa;
+ clockevent = DMTIMER_INST_DONT_CARE;
+ }
+}
+
+/* Finds the first usable dmtimer, used for the don't care case */
+static u32 __init dmtimer_systimer_find_first_available(void)
+{
+ struct device_node *np;
+ const __be32 *addr;
+ u32 pa = 0;
+
+ for_each_matching_node(np, dmtimer_match_table) {
+ if (!dmtimer_is_preferred(np))
+ continue;
+
+ addr = of_get_address(np, 0, NULL, NULL);
+ pa = of_translate_address(np, addr);
+ if (pa) {
+ if (pa == clocksource || pa == clockevent) {
+ pa = 0;
+ continue;
+ }
+
+ of_node_put(np);
+ break;
+ }
+ }
+
+ return pa;
+}
+
+/* Selects the best clocksource and clockevent to use */
+static void __init dmtimer_systimer_select_best(void)
+{
+ dmtimer_systimer_check_counter32k();
+ dmtimer_systimer_assign_alwon();
+
+ if (clockevent == DMTIMER_INST_DONT_CARE)
+ clockevent = dmtimer_systimer_find_first_available();
+
+ pr_debug("%s: counter_32k: %i clocksource: %08x clockevent: %08x\n",
+ __func__, counter_32k, clocksource, clockevent);
+}
+
+/* Interface clocks are only available on some SoCs variants */
+static int __init dmtimer_systimer_init_clock(struct device_node *np,
+ const char *name,
+ unsigned long *rate)
+{
+ struct clk *clock;
+ unsigned long r;
+ int error;
+
+ clock = of_clk_get_by_name(np, name);
+ if ((PTR_ERR(clock) == -EINVAL) && !strncmp(name, "ick", 3))
+ return 0;
+ else if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ error = clk_prepare_enable(clock);
+ if (error)
+ return error;
+
+ r = clk_get_rate(clock);
+ if (!r)
+ return -ENODEV;
+
+ *rate = r;
+
+ return 0;
+}
+
+static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
+{
+ u32 val;
+
+ if (dmtimer_systimer_revision1(t))
+ val = DMTIMER_TYPE1_ENABLE;
+ else
+ val = DMTIMER_TYPE2_ENABLE;
+
+ writel_relaxed(val, t->base + t->sysc);
+}
+
+static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
+{
+ writel_relaxed(0, t->base + t->sysc);
+}
+
+static int __init dmtimer_systimer_setup(struct device_node *np,
+ struct dmtimer_systimer *t)
+{
+ unsigned long rate;
+ u8 regbase;
+ int error;
+
+ if (!of_device_is_compatible(np->parent, "ti,sysc"))
+ return -EINVAL;
+
+ t->base = of_iomap(np, 0);
+ if (!t->base)
+ return -ENXIO;
+
+ /*
+ * Enable optional assigned-clock-parents configured at the timer
+ * node level. For regular device drivers, this is done automatically
+ * by bus related code such as platform_drv_probe().
+ */
+ error = of_clk_set_defaults(np, false);
+ if (error < 0)
+ pr_err("%s: clock source init failed: %i\n", __func__, error);
+
+ /* For ti-sysc, we have timer clocks at the parent module level */
+ error = dmtimer_systimer_init_clock(np->parent, "fck", &rate);
+ if (error)
+ goto err_unmap;
+
+ t->rate = rate;
+
+ error = dmtimer_systimer_init_clock(np->parent, "ick", &rate);
+ if (error)
+ goto err_unmap;
+
+ if (dmtimer_systimer_revision1(t)) {
+ t->irq_stat = OMAP_TIMER_V1_STAT_OFFSET;
+ t->irq_ena = OMAP_TIMER_V1_INT_EN_OFFSET;
+ t->pend = _OMAP_TIMER_WRITE_PEND_OFFSET;
+ regbase = 0;
+ } else {
+ t->irq_stat = OMAP_TIMER_V2_IRQSTATUS;
+ t->irq_ena = OMAP_TIMER_V2_IRQENABLE_SET;
+ regbase = OMAP_TIMER_V2_FUNC_OFFSET;
+ t->pend = regbase + _OMAP_TIMER_WRITE_PEND_OFFSET;
+ }
+
+ t->sysc = OMAP_TIMER_OCP_CFG_OFFSET;
+ t->load = regbase + _OMAP_TIMER_LOAD_OFFSET;
+ t->counter = regbase + _OMAP_TIMER_COUNTER_OFFSET;
+ t->ctrl = regbase + _OMAP_TIMER_CTRL_OFFSET;
+ t->wakeup = regbase + _OMAP_TIMER_WAKEUP_EN_OFFSET;
+ t->ifctrl = regbase + _OMAP_TIMER_IF_CTRL_OFFSET;
+
+ dmtimer_systimer_enable(t);
+ dmtimer_systimer_reset(t);
+ pr_debug("dmtimer rev %08x sysc %08x\n", readl_relaxed(t->base),
+ readl_relaxed(t->base + t->sysc));
+
+ return 0;
+
+err_unmap:
+ iounmap(t->base);
+
+ return error;
+}
+
+/* Clockevent */
+static struct dmtimer_clockevent *
+to_dmtimer_clockevent(struct clock_event_device *clockevent)
+{
+ return container_of(clockevent, struct dmtimer_clockevent, dev);
+}
+
+static irqreturn_t dmtimer_clockevent_interrupt(int irq, void *data)
+{
+ struct dmtimer_clockevent *clkevt = data;
+ struct dmtimer_systimer *t = &clkevt->t;
+
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
+ clkevt->dev.event_handler(&clkevt->dev);
+
+ return IRQ_HANDLED;
+}
+
+static int dmtimer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
+ struct dmtimer_systimer *t = &clkevt->t;
+ void __iomem *pend = t->base + t->pend;
+
+ writel_relaxed(0xffffffff - cycles, t->base + t->counter);
+ while (readl_relaxed(pend) & WP_TCRR)
+ cpu_relax();
+
+ writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
+ while (readl_relaxed(pend) & WP_TCLR)
+ cpu_relax();
+
+ return 0;
+}
+
+static int dmtimer_clockevent_shutdown(struct clock_event_device *evt)
+{
+ struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
+ struct dmtimer_systimer *t = &clkevt->t;
+ void __iomem *ctrl = t->base + t->ctrl;
+ u32 l;
+
+ l = readl_relaxed(ctrl);
+ if (l & OMAP_TIMER_CTRL_ST) {
+ l &= ~BIT(0);
+ writel_relaxed(l, ctrl);
+ /* Flush posted write */
+ l = readl_relaxed(ctrl);
+ /* Wait for functional clock period x 3.5 */
+ udelay(3500000 / t->rate + 1);
+ }
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
+
+ return 0;
+}
+
+static int dmtimer_set_periodic(struct clock_event_device *evt)
+{
+ struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
+ struct dmtimer_systimer *t = &clkevt->t;
+ void __iomem *pend = t->base + t->pend;
+
+ dmtimer_clockevent_shutdown(evt);
+
+ /* Looks like we need to first set the load value separately */
+ writel_relaxed(clkevt->period, t->base + t->load);
+ while (readl_relaxed(pend) & WP_TLDR)
+ cpu_relax();
+
+ writel_relaxed(clkevt->period, t->base + t->counter);
+ while (readl_relaxed(pend) & WP_TCRR)
+ cpu_relax();
+
+ writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
+ t->base + t->ctrl);
+ while (readl_relaxed(pend) & WP_TCLR)
+ cpu_relax();
+
+ return 0;
+}
+
+static void omap_clockevent_idle(struct clock_event_device *evt)
+{
+ struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
+ struct dmtimer_systimer *t = &clkevt->t;
+
+ dmtimer_systimer_disable(t);
+}
+
+static void omap_clockevent_unidle(struct clock_event_device *evt)
+{
+ struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
+ struct dmtimer_systimer *t = &clkevt->t;
+
+ dmtimer_systimer_enable(t);
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
+}
+
+static int __init dmtimer_clockevent_init(struct device_node *np)
+{
+ struct dmtimer_clockevent *clkevt;
+ struct clock_event_device *dev;
+ struct dmtimer_systimer *t;
+ int error;
+
+ clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
+ if (!clkevt)
+ return -ENOMEM;
+
+ t = &clkevt->t;
+ dev = &clkevt->dev;
+
+ /*
+ * We mostly use cpuidle_coupled with ARM local timers for runtime,
+ * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
+ */
+ dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ dev->rating = 300;
+ dev->set_next_event = dmtimer_set_next_event;
+ dev->set_state_shutdown = dmtimer_clockevent_shutdown;
+ dev->set_state_periodic = dmtimer_set_periodic;
+ dev->set_state_oneshot = dmtimer_clockevent_shutdown;
+ dev->tick_resume = dmtimer_clockevent_shutdown;
+ dev->cpumask = cpu_possible_mask;
+
+ dev->irq = irq_of_parse_and_map(np, 0);
+ if (!dev->irq) {
+ error = -ENXIO;
+ goto err_out_free;
+ }
+
+ error = dmtimer_systimer_setup(np, &clkevt->t);
+ if (error)
+ goto err_out_free;
+
+ clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
+
+ /*
+ * For clock-event timers we never read the timer counter and
+ * so we are not impacted by errata i103 and i767. Therefore,
+ * we can safely ignore this errata for clock-event timers.
+ */
+ writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
+
+ error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
+ IRQF_TIMER, "clockevent", clkevt);
+ if (error)
+ goto err_out_unmap;
+
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
+
+ pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
+ of_find_property(np, "ti,timer-alwon", NULL) ?
+ "always-on " : "", t->rate, np->parent);
+
+ clockevents_config_and_register(dev, t->rate,
+ 3, /* Timer internal resynch latency */
+ 0xffffffff);
+
+ if (of_device_is_compatible(np, "ti,am33xx") ||
+ of_device_is_compatible(np, "ti,am43")) {
+ dev->suspend = omap_clockevent_idle;
+ dev->resume = omap_clockevent_unidle;
+ }
+
+ return 0;
+
+err_out_unmap:
+ iounmap(t->base);
+
+err_out_free:
+ kfree(clkevt);
+
+ return error;
+}
+
+/* Clocksource */
+static struct dmtimer_clocksource *
+to_dmtimer_clocksource(struct clocksource *cs)
+{
+ return container_of(cs, struct dmtimer_clocksource, dev);
+}
+
+static u64 dmtimer_clocksource_read_cycles(struct clocksource *cs)
+{
+ struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
+ struct dmtimer_systimer *t = &clksrc->t;
+
+ return (u64)readl_relaxed(t->base + t->counter);
+}
+
+static void __iomem *dmtimer_sched_clock_counter;
+
+static u64 notrace dmtimer_read_sched_clock(void)
+{
+ return readl_relaxed(dmtimer_sched_clock_counter);
+}
+
+static void dmtimer_clocksource_suspend(struct clocksource *cs)
+{
+ struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
+ struct dmtimer_systimer *t = &clksrc->t;
+
+ clksrc->loadval = readl_relaxed(t->base + t->counter);
+ dmtimer_systimer_disable(t);
+}
+
+static void dmtimer_clocksource_resume(struct clocksource *cs)
+{
+ struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
+ struct dmtimer_systimer *t = &clksrc->t;
+
+ dmtimer_systimer_enable(t);
+ writel_relaxed(clksrc->loadval, t->base + t->counter);
+ writel_relaxed(OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR,
+ t->base + t->ctrl);
+}
+
+static int __init dmtimer_clocksource_init(struct device_node *np)
+{
+ struct dmtimer_clocksource *clksrc;
+ struct dmtimer_systimer *t;
+ struct clocksource *dev;
+ int error;
+
+ clksrc = kzalloc(sizeof(*clksrc), GFP_KERNEL);
+ if (!clksrc)
+ return -ENOMEM;
+
+ dev = &clksrc->dev;
+ t = &clksrc->t;
+
+ error = dmtimer_systimer_setup(np, t);
+ if (error)
+ goto err_out_free;
+
+ dev->name = "dmtimer";
+ dev->rating = 300;
+ dev->read = dmtimer_clocksource_read_cycles;
+ dev->mask = CLOCKSOURCE_MASK(32);
+ dev->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ if (of_device_is_compatible(np, "ti,am33xx") ||
+ of_device_is_compatible(np, "ti,am43")) {
+ dev->suspend = dmtimer_clocksource_suspend;
+ dev->resume = dmtimer_clocksource_resume;
+ }
+
+ writel_relaxed(0, t->base + t->counter);
+ writel_relaxed(OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR,
+ t->base + t->ctrl);
+
+ pr_info("TI gptimer clocksource: %s%pOF\n",
+ of_find_property(np, "ti,timer-alwon", NULL) ?
+ "always-on " : "", np->parent);
+
+ if (!dmtimer_sched_clock_counter) {
+ dmtimer_sched_clock_counter = t->base + t->counter;
+ sched_clock_register(dmtimer_read_sched_clock, 32, t->rate);
+ }
+
+ if (clocksource_register_hz(dev, t->rate))
+ pr_err("Could not register clocksource %pOF\n", np);
+
+ return 0;
+
+err_out_free:
+ kfree(clksrc);
+
+ return -ENODEV;
+}
+
+/*
+ * To detect between a clocksource and clockevent, we assume the device tree
+ * has no interrupts configured for a clocksource timer.
+ */
+static int __init dmtimer_systimer_init(struct device_node *np)
+{
+ const __be32 *addr;
+ u32 pa;
+
+ /* One time init for the preferred timer configuration */
+ if (!clocksource && !clockevent)
+ dmtimer_systimer_select_best();
+
+ if (!clocksource && !clockevent) {
+ pr_err("%s: unable to detect system timers, update dtb?\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ addr = of_get_address(np, 0, NULL, NULL);
+ pa = of_translate_address(np, addr);
+ if (!pa)
+ return -EINVAL;
+
+ if (counter_32k <= 0 && clocksource == pa)
+ return dmtimer_clocksource_init(np);
+
+ if (clockevent == pa)
+ return dmtimer_clockevent_init(np);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(systimer_omap2, "ti,omap2420-timer", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_omap3, "ti,omap3430-timer", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_omap4, "ti,omap4430-timer", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_omap5, "ti,omap5430-timer", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_am33x, "ti,am335x-timer", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_am3ms, "ti,am335x-timer-1ms", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_dm814, "ti,dm814-timer", dmtimer_systimer_init);
+TIMER_OF_DECLARE(systimer_dm816, "ti,dm816-timer", dmtimer_systimer_init);
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 2531eab3d6d7..60aff087947a 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -258,9 +258,7 @@ static int omap_dm_timer_prepare(struct omap_dm_timer *timer)
__omap_dm_timer_enable_posted(timer);
omap_dm_timer_disable(timer);
- rc = omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
-
- return rc;
+ return 0;
}
static inline u32 omap_dm_timer_reserved_systimer(int id)
diff --git a/drivers/clocksource/timer-versatile.c b/drivers/clocksource/timer-versatile.c
index e4ebb656d005..f5d017b31afa 100644
--- a/drivers/clocksource/timer-versatile.c
+++ b/drivers/clocksource/timer-versatile.c
@@ -6,6 +6,7 @@
#include <linux/clocksource.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
@@ -22,6 +23,8 @@ static int __init versatile_sched_clock_init(struct device_node *node)
{
void __iomem *base = of_iomap(node, 0);
+ of_node_clear_flag(node, OF_POPULATED);
+
if (!base)
return -ENXIO;
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index d58ce664da84..646ad385e490 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -18,6 +18,7 @@
#include <linux/pid_namespace.h>
#include <linux/cn_proc.h>
+#include <linux/local_lock.h>
/*
* Size of a cn_msg followed by a proc_event structure. Since the
@@ -38,25 +39,31 @@ static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
-/* proc_event_counts is used as the sequence number of the netlink message */
-static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
+/* local_event.count is used as the sequence number of the netlink message */
+struct local_event {
+ local_lock_t lock;
+ __u32 count;
+};
+static DEFINE_PER_CPU(struct local_event, local_event) = {
+ .lock = INIT_LOCAL_LOCK(lock),
+};
static inline void send_msg(struct cn_msg *msg)
{
- preempt_disable();
+ local_lock(&local_event.lock);
- msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
+ msg->seq = __this_cpu_inc_return(local_event.count) - 1;
((struct proc_event *)msg->data)->cpu = smp_processor_id();
/*
- * Preemption remains disabled during send to ensure the messages are
- * ordered according to their sequence numbers.
+ * local_lock() disables preemption during send to ensure the messages
+ * are ordered according to their sequence numbers.
*
* If cn_netlink_send() fails, the data is not sent.
*/
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
- preempt_enable();
+ local_unlock(&local_event.lock);
}
void proc_fork_connector(struct task_struct *task)
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index c3e6bd59e920..e91750132552 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -323,7 +323,8 @@ endif
config QORIQ_CPUFREQ
tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
- depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64)
+ depends on OF && COMMON_CLK
+ depends on PPC_E500MC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
select CLK_QORIQ
help
This adds the CPUFreq driver support for Freescale QorIQ SoCs
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 15c1a1231516..c6cbfc8baf72 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -295,11 +295,11 @@ config ARM_TANGO_CPUFREQ
default y
config ARM_TEGRA20_CPUFREQ
- tristate "Tegra20 CPUFreq support"
- depends on ARCH_TEGRA
+ tristate "Tegra20/30 CPUFreq support"
+ depends on ARCH_TEGRA && CPUFREQ_DT
default y
help
- This adds the CPUFreq driver support for Tegra20 SOCs.
+ This adds the CPUFreq driver support for Tegra20/30 SOCs.
config ARM_TEGRA124_CPUFREQ
bool "Tegra124 CPUFreq support"
@@ -317,6 +317,7 @@ config ARM_TEGRA186_CPUFREQ
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
depends on ARCH_OMAP2PLUS
+ default ARCH_OMAP2PLUS
help
This driver enables valid OPPs on the running platform based on
values contained within the SoC in use. Enable this in order to
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 289e8ce3fd13..429e5a36c08a 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -126,12 +126,12 @@ static void boost_set_msr_each(void *p_en)
boost_set_msr(enable);
}
-static int set_boost(int val)
+static int set_boost(struct cpufreq_policy *policy, int val)
{
- get_online_cpus();
- on_each_cpu(boost_set_msr_each, (void *)(long)val, 1);
- put_online_cpus();
- pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
+ on_each_cpu_mask(policy->cpus, boost_set_msr_each,
+ (void *)(long)val, 1);
+ pr_debug("CPU %*pbl: Core Boosting %sabled.\n",
+ cpumask_pr_args(policy->cpus), val ? "en" : "dis");
return 0;
}
@@ -162,7 +162,9 @@ static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
if (ret || val > 1)
return -EINVAL;
- set_boost(val);
+ get_online_cpus();
+ set_boost(policy, val);
+ put_online_cpus();
return count;
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index bda0b2406fba..257d726a4456 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -37,6 +37,7 @@
* requested etc.
*/
static struct cppc_cpudata **all_cpu_data;
+static bool boost_supported;
struct cppc_workaround_oem_info {
char oem_id[ACPI_OEM_ID_SIZE + 1];
@@ -310,7 +311,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
- policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
+ policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
@@ -318,7 +319,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* nonlinear perf
*/
policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
- policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
+ policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf);
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
policy->shared_type = cpu->shared_type;
@@ -343,6 +344,13 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu->cur_policy = policy;
+ /*
+ * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
+ * is supported.
+ */
+ if (cpu->perf_caps.highest_perf > cpu->perf_caps.nominal_perf)
+ boost_supported = true;
+
/* Set policy->cur to max now. The governors will adjust later. */
policy->cur = cppc_cpufreq_perf_to_khz(cpu,
cpu->perf_caps.highest_perf);
@@ -410,6 +418,32 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
}
+static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
+{
+ struct cppc_cpudata *cpudata;
+ int ret;
+
+ if (!boost_supported) {
+ pr_err("BOOST not supported by CPU or firmware\n");
+ return -EINVAL;
+ }
+
+ cpudata = all_cpu_data[policy->cpu];
+ if (state)
+ policy->max = cppc_cpufreq_perf_to_khz(cpudata,
+ cpudata->perf_caps.highest_perf);
+ else
+ policy->max = cppc_cpufreq_perf_to_khz(cpudata,
+ cpudata->perf_caps.nominal_perf);
+ policy->cpuinfo.max_freq = policy->max;
+
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static struct cpufreq_driver cppc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = cppc_verify_policy,
@@ -417,6 +451,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
.get = cppc_cpufreq_get_rate,
.init = cppc_cpufreq_cpu_init,
.stop_cpu = cppc_cpufreq_stop_cpu,
+ .set_boost = cppc_cpufreq_set_boost,
.name = "cppc_cpufreq",
};
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index cb9db16bea61..e8e20fef400b 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -53,6 +53,7 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "renesas,r7s72100", },
{ .compatible = "renesas,r8a73a4", },
{ .compatible = "renesas,r8a7740", },
+ { .compatible = "renesas,r8a7742", },
{ .compatible = "renesas,r8a7743", },
{ .compatible = "renesas,r8a7744", },
{ .compatible = "renesas,r8a7745", },
@@ -105,6 +106,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "calxeda,highbank", },
{ .compatible = "calxeda,ecx-2000", },
+ { .compatible = "fsl,imx7ulp", },
{ .compatible = "fsl,imx7d", },
{ .compatible = "fsl,imx8mq", },
{ .compatible = "fsl,imx8mm", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 26fe8dfb9ce6..79742bbd221f 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -121,6 +121,10 @@ static int resources_available(void)
clk_put(cpu_clk);
+ ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
+ if (ret)
+ return ret;
+
name = find_supply_name(cpu_dev);
/* Platform doesn't require regulator */
if (!name)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 045f9fe157ce..0128de3603df 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2532,33 +2532,29 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits);
/*********************************************************************
* BOOST *
*********************************************************************/
-static int cpufreq_boost_set_sw(int state)
+static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
{
- struct cpufreq_policy *policy;
- int ret = -EINVAL;
-
- for_each_active_policy(policy) {
- if (!policy->freq_table)
- continue;
+ int ret;
- ret = cpufreq_frequency_table_cpuinfo(policy,
- policy->freq_table);
- if (ret) {
- pr_err("%s: Policy frequency update failed\n",
- __func__);
- break;
- }
+ if (!policy->freq_table)
+ return -ENXIO;
- ret = freq_qos_update_request(policy->max_freq_req, policy->max);
- if (ret < 0)
- break;
+ ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ if (ret) {
+ pr_err("%s: Policy frequency update failed\n", __func__);
+ return ret;
}
- return ret;
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
int cpufreq_boost_trigger_state(int state)
{
+ struct cpufreq_policy *policy;
unsigned long flags;
int ret = 0;
@@ -2569,15 +2565,25 @@ int cpufreq_boost_trigger_state(int state)
cpufreq_driver->boost_enabled = state;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = cpufreq_driver->set_boost(state);
- if (ret) {
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- cpufreq_driver->boost_enabled = !state;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- pr_err("%s: Cannot %s BOOST\n",
- __func__, state ? "enable" : "disable");
+ get_online_cpus();
+ for_each_active_policy(policy) {
+ ret = cpufreq_driver->set_boost(policy, state);
+ if (ret)
+ goto err_reset_state;
}
+ put_online_cpus();
+
+ return 0;
+
+err_reset_state:
+ put_online_cpus();
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver->boost_enabled = !state;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ pr_err("%s: Cannot %s BOOST\n",
+ __func__, state ? "enable" : "disable");
return ret;
}
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index de206d2745fe..3fe9125156b4 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -3,7 +3,9 @@
* Copyright 2019 NXP
*/
+#include <linux/clk.h>
#include <linux/cpu.h>
+#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -12,8 +14,11 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include "cpufreq-dt.h"
+
#define OCOTP_CFG3_SPEED_GRADE_SHIFT 8
#define OCOTP_CFG3_SPEED_GRADE_MASK (0x3 << 8)
#define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8)
@@ -22,20 +27,92 @@
#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_SHIFT 5
#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 5)
+#define IMX7ULP_MAX_RUN_FREQ 528000
+
/* cpufreq-dt device registered by imx-cpufreq-dt */
static struct platform_device *cpufreq_dt_pdev;
static struct opp_table *cpufreq_opp_table;
+static struct device *cpu_dev;
+
+enum IMX7ULP_CPUFREQ_CLKS {
+ ARM,
+ CORE,
+ SCS_SEL,
+ HSRUN_CORE,
+ HSRUN_SCS_SEL,
+ FIRC,
+};
+
+static struct clk_bulk_data imx7ulp_clks[] = {
+ { .id = "arm" },
+ { .id = "core" },
+ { .id = "scs_sel" },
+ { .id = "hsrun_core" },
+ { .id = "hsrun_scs_sel" },
+ { .id = "firc" },
+};
+
+static unsigned int imx7ulp_get_intermediate(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ return clk_get_rate(imx7ulp_clks[FIRC].clk);
+}
+
+static int imx7ulp_target_intermediate(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int newfreq = policy->freq_table[index].frequency;
+
+ clk_set_parent(imx7ulp_clks[SCS_SEL].clk, imx7ulp_clks[FIRC].clk);
+ clk_set_parent(imx7ulp_clks[HSRUN_SCS_SEL].clk, imx7ulp_clks[FIRC].clk);
+
+ if (newfreq > IMX7ULP_MAX_RUN_FREQ)
+ clk_set_parent(imx7ulp_clks[ARM].clk,
+ imx7ulp_clks[HSRUN_CORE].clk);
+ else
+ clk_set_parent(imx7ulp_clks[ARM].clk, imx7ulp_clks[CORE].clk);
+
+ return 0;
+}
+
+static struct cpufreq_dt_platform_data imx7ulp_data = {
+ .target_intermediate = imx7ulp_target_intermediate,
+ .get_intermediate = imx7ulp_get_intermediate,
+};
static int imx_cpufreq_dt_probe(struct platform_device *pdev)
{
- struct device *cpu_dev = get_cpu_device(0);
+ struct platform_device *dt_pdev;
u32 cell_value, supported_hw[2];
int speed_grade, mkt_segment;
int ret;
+ cpu_dev = get_cpu_device(0);
+
if (!of_find_property(cpu_dev->of_node, "cpu-supply", NULL))
return -ENODEV;
+ if (of_machine_is_compatible("fsl,imx7ulp")) {
+ ret = clk_bulk_get(cpu_dev, ARRAY_SIZE(imx7ulp_clks),
+ imx7ulp_clks);
+ if (ret)
+ return ret;
+
+ dt_pdev = platform_device_register_data(NULL, "cpufreq-dt",
+ -1, &imx7ulp_data,
+ sizeof(imx7ulp_data));
+ if (IS_ERR(dt_pdev)) {
+ clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
+ ret = PTR_ERR(dt_pdev);
+ dev_err(&pdev->dev, "Failed to register cpufreq-dt: %d\n", ret);
+ return ret;
+ }
+
+ cpufreq_dt_pdev = dt_pdev;
+
+ return 0;
+ }
+
ret = nvmem_cell_read_u32(cpu_dev, "speed_grade", &cell_value);
if (ret)
return ret;
@@ -98,7 +175,10 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
static int imx_cpufreq_dt_remove(struct platform_device *pdev)
{
platform_device_unregister(cpufreq_dt_pdev);
- dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ if (!of_machine_is_compatible("fsl,imx7ulp"))
+ dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ else
+ clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4d3429b2058f..8e23a698ce04 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2771,6 +2771,8 @@ static int __init intel_pstate_init(void)
pr_info("Invalid MSRs\n");
return -ENODEV;
}
+ /* Without HWP start in the passive mode. */
+ default_driver = &intel_cpufreq;
hwp_cpu_matched:
/*
@@ -2816,7 +2818,6 @@ static int __init intel_pstate_setup(char *str)
if (!strcmp(str, "disable")) {
no_load = 1;
} else if (!strcmp(str, "passive")) {
- pr_info("Passive mode enabled\n");
default_driver = &intel_cpufreq;
no_hwp = 1;
}
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 909f40fbcde2..d05e761d9572 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -20,7 +20,6 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <asm/clock.h>
#include <asm/idle.h>
#include <asm/mach-loongson2ef/loongson.h>
@@ -58,29 +57,20 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
loongson2_clockmod_table[index].driver_data) / 8;
/* setting the cpu frequency */
- clk_set_rate(policy->clk, freq * 1000);
+ loongson2_cpu_set_rate(freq);
return 0;
}
static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- struct clk *cpuclk;
int i;
unsigned long rate;
int ret;
- cpuclk = clk_get(NULL, "cpu_clk");
- if (IS_ERR(cpuclk)) {
- pr_err("couldn't get CPU clk\n");
- return PTR_ERR(cpuclk);
- }
-
rate = cpu_clock_freq / 1000;
- if (!rate) {
- clk_put(cpuclk);
+ if (!rate)
return -EINVAL;
- }
/* clock table init */
for (i = 2;
@@ -88,20 +78,16 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
i++)
loongson2_clockmod_table[i].frequency = (rate * i) / 8;
- ret = clk_set_rate(cpuclk, rate * 1000);
- if (ret) {
- clk_put(cpuclk);
+ ret = loongson2_cpu_set_rate(rate);
+ if (ret)
return ret;
- }
- policy->clk = cpuclk;
cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
return 0;
}
static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
{
- clk_put(policy->clk);
return 0;
}
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index a1b8238872a2..d06b37822c3d 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -277,7 +277,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
if (!np)
return -ENOENT;
- ret = of_device_is_compatible(np, "operating-points-v2-qcom-cpu");
+ ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
if (!ret) {
of_node_put(np);
return -ENOENT;
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 8e436dc75c8b..6b6b20da2bcf 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/smp.h>
+#include <linux/platform_device.h>
/**
* struct cpu_data
@@ -29,12 +30,6 @@ struct cpu_data {
struct cpufreq_frequency_table *table;
};
-/*
- * Don't use cpufreq on this SoC -- used when the SoC would have otherwise
- * matched a more generic compatible.
- */
-#define SOC_BLACKLIST 1
-
/**
* struct soc_data - SoC specific data
* @flags: SOC_xxx
@@ -264,64 +259,51 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
.attr = cpufreq_generic_attr,
};
-static const struct soc_data blacklist = {
- .flags = SOC_BLACKLIST,
-};
-
-static const struct of_device_id node_matches[] __initconst = {
+static const struct of_device_id qoriq_cpufreq_blacklist[] = {
/* e6500 cannot use cpufreq due to erratum A-008083 */
- { .compatible = "fsl,b4420-clockgen", &blacklist },
- { .compatible = "fsl,b4860-clockgen", &blacklist },
- { .compatible = "fsl,t2080-clockgen", &blacklist },
- { .compatible = "fsl,t4240-clockgen", &blacklist },
-
- { .compatible = "fsl,ls1012a-clockgen", },
- { .compatible = "fsl,ls1021a-clockgen", },
- { .compatible = "fsl,ls1028a-clockgen", },
- { .compatible = "fsl,ls1043a-clockgen", },
- { .compatible = "fsl,ls1046a-clockgen", },
- { .compatible = "fsl,ls1088a-clockgen", },
- { .compatible = "fsl,ls2080a-clockgen", },
- { .compatible = "fsl,lx2160a-clockgen", },
- { .compatible = "fsl,p4080-clockgen", },
- { .compatible = "fsl,qoriq-clockgen-1.0", },
- { .compatible = "fsl,qoriq-clockgen-2.0", },
+ { .compatible = "fsl,b4420-clockgen", },
+ { .compatible = "fsl,b4860-clockgen", },
+ { .compatible = "fsl,t2080-clockgen", },
+ { .compatible = "fsl,t4240-clockgen", },
{}
};
-static int __init qoriq_cpufreq_init(void)
+static int qoriq_cpufreq_probe(struct platform_device *pdev)
{
int ret;
- struct device_node *np;
- const struct of_device_id *match;
- const struct soc_data *data;
-
- np = of_find_matching_node(NULL, node_matches);
- if (!np)
- return -ENODEV;
-
- match = of_match_node(node_matches, np);
- data = match->data;
-
- of_node_put(np);
+ struct device_node *np;
- if (data && data->flags & SOC_BLACKLIST)
+ np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist);
+ if (np) {
+ dev_info(&pdev->dev, "Disabling due to erratum A-008083");
return -ENODEV;
+ }
ret = cpufreq_register_driver(&qoriq_cpufreq_driver);
- if (!ret)
- pr_info("Freescale QorIQ CPU frequency scaling driver\n");
+ if (ret)
+ return ret;
- return ret;
+ dev_info(&pdev->dev, "Freescale QorIQ CPU frequency scaling driver\n");
+ return 0;
}
-module_init(qoriq_cpufreq_init);
-static void __exit qoriq_cpufreq_exit(void)
+static int qoriq_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&qoriq_cpufreq_driver);
+
+ return 0;
}
-module_exit(qoriq_cpufreq_exit);
+static struct platform_driver qoriq_cpufreq_platform_driver = {
+ .driver = {
+ .name = "qoriq-cpufreq",
+ },
+ .probe = qoriq_cpufreq_probe,
+ .remove = qoriq_cpufreq_remove,
+};
+module_platform_driver(qoriq_cpufreq_platform_driver);
+
+MODULE_ALIAS("platform:qoriq-cpufreq");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>");
MODULE_DESCRIPTION("cpufreq driver for Freescale QorIQ series SoCs");
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 2e233ad72758..3d2f143748ef 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -93,7 +93,8 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
static struct cpufreq_driver tegra186_cpufreq_driver = {
.name = "tegra186",
- .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra186_cpufreq_set_target,
.init = tegra186_cpufreq_init,
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index f84ecd22f488..8c893043953e 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -7,201 +7,96 @@
* Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
*/
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
+#include <linux/bits.h>
+#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/types.h>
-static struct cpufreq_frequency_table freq_table[] = {
- { .frequency = 216000 },
- { .frequency = 312000 },
- { .frequency = 456000 },
- { .frequency = 608000 },
- { .frequency = 760000 },
- { .frequency = 816000 },
- { .frequency = 912000 },
- { .frequency = 1000000 },
- { .frequency = CPUFREQ_TABLE_END },
-};
-
-struct tegra20_cpufreq {
- struct device *dev;
- struct cpufreq_driver driver;
- struct clk *cpu_clk;
- struct clk *pll_x_clk;
- struct clk *pll_p_clk;
- bool pll_x_prepared;
-};
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
-static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy,
- unsigned int index)
+static bool cpu0_node_has_opp_v2_prop(void)
{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
- unsigned int ifreq = clk_get_rate(cpufreq->pll_p_clk) / 1000;
-
- /*
- * Don't switch to intermediate freq if:
- * - we are already at it, i.e. policy->cur == ifreq
- * - index corresponds to ifreq
- */
- if (freq_table[index].frequency == ifreq || policy->cur == ifreq)
- return 0;
-
- return ifreq;
-}
+ struct device_node *np = of_cpu_device_node_get(0);
+ bool ret = false;
-static int tegra_target_intermediate(struct cpufreq_policy *policy,
- unsigned int index)
-{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
- int ret;
-
- /*
- * Take an extra reference to the main pll so it doesn't turn
- * off when we move the cpu off of it as enabling it again while we
- * switch to it from tegra_target() would take additional time.
- *
- * When target-freq is equal to intermediate freq we don't need to
- * switch to an intermediate freq and so this routine isn't called.
- * Also, we wouldn't be using pll_x anymore and must not take extra
- * reference to it, as it can be disabled now to save some power.
- */
- clk_prepare_enable(cpufreq->pll_x_clk);
-
- ret = clk_set_parent(cpufreq->cpu_clk, cpufreq->pll_p_clk);
- if (ret)
- clk_disable_unprepare(cpufreq->pll_x_clk);
- else
- cpufreq->pll_x_prepared = true;
+ if (of_get_property(np, "operating-points-v2", NULL))
+ ret = true;
+ of_node_put(np);
return ret;
}
-static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
-{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
- unsigned long rate = freq_table[index].frequency;
- unsigned int ifreq = clk_get_rate(cpufreq->pll_p_clk) / 1000;
- int ret;
-
- /*
- * target freq == pll_p, don't need to take extra reference to pll_x_clk
- * as it isn't used anymore.
- */
- if (rate == ifreq)
- return clk_set_parent(cpufreq->cpu_clk, cpufreq->pll_p_clk);
-
- ret = clk_set_rate(cpufreq->pll_x_clk, rate * 1000);
- /* Restore to earlier frequency on error, i.e. pll_x */
- if (ret)
- dev_err(cpufreq->dev, "Failed to change pll_x to %lu\n", rate);
-
- ret = clk_set_parent(cpufreq->cpu_clk, cpufreq->pll_x_clk);
- /* This shouldn't fail while changing or restoring */
- WARN_ON(ret);
-
- /*
- * Drop count to pll_x clock only if we switched to intermediate freq
- * earlier while transitioning to a target frequency.
- */
- if (cpufreq->pll_x_prepared) {
- clk_disable_unprepare(cpufreq->pll_x_clk);
- cpufreq->pll_x_prepared = false;
- }
-
- return ret;
-}
-
-static int tegra_cpu_init(struct cpufreq_policy *policy)
-{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
-
- clk_prepare_enable(cpufreq->cpu_clk);
-
- /* FIXME: what's the actual transition time? */
- cpufreq_generic_init(policy, freq_table, 300 * 1000);
- policy->clk = cpufreq->cpu_clk;
- policy->suspend_freq = freq_table[0].frequency;
- return 0;
-}
-
-static int tegra_cpu_exit(struct cpufreq_policy *policy)
-{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
-
- clk_disable_unprepare(cpufreq->cpu_clk);
- return 0;
-}
-
static int tegra20_cpufreq_probe(struct platform_device *pdev)
{
- struct tegra20_cpufreq *cpufreq;
+ struct platform_device *cpufreq_dt;
+ struct opp_table *opp_table;
+ struct device *cpu_dev;
+ u32 versions[2];
int err;
- cpufreq = devm_kzalloc(&pdev->dev, sizeof(*cpufreq), GFP_KERNEL);
- if (!cpufreq)
- return -ENOMEM;
+ if (!cpu0_node_has_opp_v2_prop()) {
+ dev_err(&pdev->dev, "operating points not found\n");
+ dev_err(&pdev->dev, "please update your device tree\n");
+ return -ENODEV;
+ }
+
+ if (of_machine_is_compatible("nvidia,tegra20")) {
+ versions[0] = BIT(tegra_sku_info.cpu_process_id);
+ versions[1] = BIT(tegra_sku_info.soc_speedo_id);
+ } else {
+ versions[0] = BIT(tegra_sku_info.cpu_process_id);
+ versions[1] = BIT(tegra_sku_info.cpu_speedo_id);
+ }
+
+ dev_info(&pdev->dev, "hardware version 0x%x 0x%x\n",
+ versions[0], versions[1]);
- cpufreq->cpu_clk = clk_get_sys(NULL, "cclk");
- if (IS_ERR(cpufreq->cpu_clk))
- return PTR_ERR(cpufreq->cpu_clk);
+ cpu_dev = get_cpu_device(0);
+ if (WARN_ON(!cpu_dev))
+ return -ENODEV;
- cpufreq->pll_x_clk = clk_get_sys(NULL, "pll_x");
- if (IS_ERR(cpufreq->pll_x_clk)) {
- err = PTR_ERR(cpufreq->pll_x_clk);
- goto put_cpu;
+ opp_table = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
+ err = PTR_ERR_OR_ZERO(opp_table);
+ if (err) {
+ dev_err(&pdev->dev, "failed to set supported hw: %d\n", err);
+ return err;
}
- cpufreq->pll_p_clk = clk_get_sys(NULL, "pll_p");
- if (IS_ERR(cpufreq->pll_p_clk)) {
- err = PTR_ERR(cpufreq->pll_p_clk);
- goto put_pll_x;
+ cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ err = PTR_ERR_OR_ZERO(cpufreq_dt);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to create cpufreq-dt device: %d\n", err);
+ goto err_put_supported_hw;
}
- cpufreq->dev = &pdev->dev;
- cpufreq->driver.get = cpufreq_generic_get;
- cpufreq->driver.attr = cpufreq_generic_attr;
- cpufreq->driver.init = tegra_cpu_init;
- cpufreq->driver.exit = tegra_cpu_exit;
- cpufreq->driver.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK;
- cpufreq->driver.verify = cpufreq_generic_frequency_table_verify;
- cpufreq->driver.suspend = cpufreq_generic_suspend;
- cpufreq->driver.driver_data = cpufreq;
- cpufreq->driver.target_index = tegra_target;
- cpufreq->driver.get_intermediate = tegra_get_intermediate;
- cpufreq->driver.target_intermediate = tegra_target_intermediate;
- snprintf(cpufreq->driver.name, CPUFREQ_NAME_LEN, "tegra");
-
- err = cpufreq_register_driver(&cpufreq->driver);
- if (err)
- goto put_pll_p;
-
- platform_set_drvdata(pdev, cpufreq);
+ platform_set_drvdata(pdev, cpufreq_dt);
return 0;
-put_pll_p:
- clk_put(cpufreq->pll_p_clk);
-put_pll_x:
- clk_put(cpufreq->pll_x_clk);
-put_cpu:
- clk_put(cpufreq->cpu_clk);
+err_put_supported_hw:
+ dev_pm_opp_put_supported_hw(opp_table);
return err;
}
static int tegra20_cpufreq_remove(struct platform_device *pdev)
{
- struct tegra20_cpufreq *cpufreq = platform_get_drvdata(pdev);
+ struct platform_device *cpufreq_dt;
+ struct opp_table *opp_table;
- cpufreq_unregister_driver(&cpufreq->driver);
+ cpufreq_dt = platform_get_drvdata(pdev);
+ platform_device_unregister(cpufreq_dt);
- clk_put(cpufreq->pll_p_clk);
- clk_put(cpufreq->pll_x_clk);
- clk_put(cpufreq->cpu_clk);
+ opp_table = dev_pm_opp_get_opp_table(get_cpu_device(0));
+ dev_pm_opp_put_supported_hw(opp_table);
+ dev_pm_opp_put_opp_table(opp_table);
return 0;
}
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 99a2d72ac02b..51a7e89085c0 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -94,3 +94,16 @@ config ARM_TEGRA_CPUIDLE
select ARM_CPU_SUSPEND
help
Select this to enable cpuidle for NVIDIA Tegra20/30/114/124 SoCs.
+
+config ARM_QCOM_SPM_CPUIDLE
+ bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
+ depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64
+ select ARM_CPU_SUSPEND
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ select DT_IDLE_STATES
+ select QCOM_SCM
+ help
+ Select this to enable cpuidle for Qualcomm processors.
+ The Subsystem Power Manager (SPM) controls low power modes for the
+ CPU and L2 cores. It interface with various system drivers to put
+ the cores in low power modes.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 55a464f6a78b..f07800cbb43f 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle_psci.o
cpuidle_psci-y := cpuidle-psci.o
cpuidle_psci-$(CONFIG_PM_GENERIC_DOMAINS_OF) += cpuidle-psci-domain.o
obj-$(CONFIG_ARM_TEGRA_CPUIDLE) += cpuidle-tegra.o
+obj-$(CONFIG_ARM_QCOM_SPM_CPUIDLE) += cpuidle-qcom-spm.o
###############################################################################
# MIPS drivers
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index bae9140a65a5..d0fb585073c6 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -58,6 +58,10 @@ static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
u32 state;
int ret;
+ ret = cpu_pm_enter();
+ if (ret)
+ return -1;
+
/* Do runtime PM to manage a hierarchical CPU toplogy. */
pm_runtime_put_sync_suspend(pd_dev);
@@ -65,10 +69,12 @@ static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
if (!state)
state = states[idx];
- ret = psci_enter_state(idx, state);
+ ret = psci_cpu_suspend_enter(state) ? -1 : idx;
pm_runtime_get_sync(pd_dev);
+ cpu_pm_exit();
+
/* Clear the domain state to start fresh when back from idle. */
psci_set_domain_state(0);
return ret;
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 74c247972bb3..6513ef2af66a 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -19,6 +19,7 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/runlatch.h>
+#include <asm/idle.h>
#include <asm/plpar_wrappers.h>
struct cpuidle_driver pseries_idle_driver = {
@@ -31,39 +32,15 @@ static struct cpuidle_state *cpuidle_state_table __read_mostly;
static u64 snooze_timeout __read_mostly;
static bool snooze_timeout_en __read_mostly;
-static inline void idle_loop_prolog(unsigned long *in_purr)
-{
- ppc64_runlatch_off();
- *in_purr = mfspr(SPRN_PURR);
- /*
- * Indicate to the HV that we are idle. Now would be
- * a good time to find other work to dispatch.
- */
- get_lppaca()->idle = 1;
-}
-
-static inline void idle_loop_epilog(unsigned long in_purr)
-{
- u64 wait_cycles;
-
- wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
- wait_cycles += mfspr(SPRN_PURR) - in_purr;
- get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
- get_lppaca()->idle = 0;
-
- ppc64_runlatch_on();
-}
-
static int snooze_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- unsigned long in_purr;
u64 snooze_exit_time;
set_thread_flag(TIF_POLLING_NRFLAG);
- idle_loop_prolog(&in_purr);
+ pseries_idle_prolog();
local_irq_enable();
snooze_exit_time = get_tb() + snooze_timeout;
@@ -87,7 +64,7 @@ static int snooze_loop(struct cpuidle_device *dev,
local_irq_disable();
- idle_loop_epilog(in_purr);
+ pseries_idle_epilog();
return index;
}
@@ -113,9 +90,8 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- unsigned long in_purr;
- idle_loop_prolog(&in_purr);
+ pseries_idle_prolog();
get_lppaca()->donate_dedicated_cpu = 1;
HMT_medium();
@@ -124,7 +100,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
local_irq_disable();
get_lppaca()->donate_dedicated_cpu = 0;
- idle_loop_epilog(in_purr);
+ pseries_idle_epilog();
return index;
}
@@ -133,9 +109,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- unsigned long in_purr;
- idle_loop_prolog(&in_purr);
+ pseries_idle_prolog();
/*
* Yield the processor to the hypervisor. We return if
@@ -147,7 +122,7 @@ static int shared_cede_loop(struct cpuidle_device *dev,
check_and_cede_processor();
local_irq_disable();
- idle_loop_epilog(in_purr);
+ pseries_idle_epilog();
return index;
}
diff --git a/drivers/soc/qcom/spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 8e10e02c6aa5..adf91a6e4d7d 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -19,10 +19,11 @@
#include <linux/cpu_pm.h>
#include <linux/qcom_scm.h>
-#include <asm/cpuidle.h>
#include <asm/proc-fns.h>
#include <asm/suspend.h>
+#include "dt_idle_states.h"
+
#define MAX_PMIC_DATA 2
#define MAX_SEQ_DATA 64
#define SPM_CTL_INDEX 0x7f
@@ -62,6 +63,7 @@ struct spm_reg_data {
};
struct spm_driver_data {
+ struct cpuidle_driver cpuidle_driver;
void __iomem *reg_base;
const struct spm_reg_data *reg_data;
};
@@ -107,11 +109,6 @@ static const struct spm_reg_data spm_reg_8064_cpu = {
.start_index[PM_SLEEP_MODE_SPC] = 2,
};
-static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv);
-
-typedef int (*idle_fn)(void);
-static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops);
-
static inline void spm_register_write(struct spm_driver_data *drv,
enum spm_reg reg, u32 val)
{
@@ -172,10 +169,9 @@ static int qcom_pm_collapse(unsigned long int unused)
return -1;
}
-static int qcom_cpu_spc(void)
+static int qcom_cpu_spc(struct spm_driver_data *drv)
{
int ret;
- struct spm_driver_data *drv = __this_cpu_read(cpu_spm_drv);
spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC);
ret = cpu_suspend(0, qcom_pm_collapse);
@@ -190,94 +186,49 @@ static int qcom_cpu_spc(void)
return ret;
}
-static int qcom_idle_enter(unsigned long index)
+static int spm_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
- return __this_cpu_read(qcom_idle_ops)[index]();
+ struct spm_driver_data *data = container_of(drv, struct spm_driver_data,
+ cpuidle_driver);
+
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(qcom_cpu_spc, idx, data);
}
-static const struct of_device_id qcom_idle_state_match[] __initconst = {
- { .compatible = "qcom,idle-state-spc", .data = qcom_cpu_spc },
+static struct cpuidle_driver qcom_spm_idle_driver = {
+ .name = "qcom_spm",
+ .owner = THIS_MODULE,
+ .states[0] = {
+ .enter = spm_enter_idle_state,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .power_usage = UINT_MAX,
+ .name = "WFI",
+ .desc = "ARM WFI",
+ }
+};
+
+static const struct of_device_id qcom_idle_state_match[] = {
+ { .compatible = "qcom,idle-state-spc", .data = spm_enter_idle_state },
{ },
};
-static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu)
+static int spm_cpuidle_init(struct cpuidle_driver *drv, int cpu)
{
- const struct of_device_id *match_id;
- struct device_node *state_node;
- int i;
- int state_count = 1;
- idle_fn idle_fns[CPUIDLE_STATE_MAX];
- idle_fn *fns;
- cpumask_t mask;
- bool use_scm_power_down = false;
-
- if (!qcom_scm_is_available())
- return -EPROBE_DEFER;
-
- for (i = 0; ; i++) {
- state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
- if (!state_node)
- break;
-
- if (!of_device_is_available(state_node))
- continue;
-
- if (i == CPUIDLE_STATE_MAX) {
- pr_warn("%s: cpuidle states reached max possible\n",
- __func__);
- break;
- }
-
- match_id = of_match_node(qcom_idle_state_match, state_node);
- if (!match_id)
- return -ENODEV;
-
- idle_fns[state_count] = match_id->data;
-
- /* Check if any of the states allow power down */
- if (match_id->data == qcom_cpu_spc)
- use_scm_power_down = true;
-
- state_count++;
- }
-
- if (state_count == 1)
- goto check_spm;
-
- fns = devm_kcalloc(get_cpu_device(cpu), state_count, sizeof(*fns),
- GFP_KERNEL);
- if (!fns)
- return -ENOMEM;
-
- for (i = 1; i < state_count; i++)
- fns[i] = idle_fns[i];
+ int ret;
- if (use_scm_power_down) {
- /* We have atleast one power down mode */
- cpumask_clear(&mask);
- cpumask_set_cpu(cpu, &mask);
- qcom_scm_set_warm_boot_addr(cpu_resume_arm, &mask);
- }
+ memcpy(drv, &qcom_spm_idle_driver, sizeof(*drv));
+ drv->cpumask = (struct cpumask *)cpumask_of(cpu);
- per_cpu(qcom_idle_ops, cpu) = fns;
+ /* Parse idle states from device tree */
+ ret = dt_init_idle_driver(drv, qcom_idle_state_match, 1);
+ if (ret <= 0)
+ return ret ? : -ENODEV;
- /*
- * SPM probe for the cpu should have happened by now, if the
- * SPM device does not exist, return -ENXIO to indicate that the
- * cpu does not support idle states.
- */
-check_spm:
- return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
+ /* We have atleast one power down mode */
+ return qcom_scm_set_warm_boot_addr(cpu_resume_arm, drv->cpumask);
}
-static const struct cpuidle_ops qcom_cpuidle_ops __initconst = {
- .suspend = qcom_idle_enter,
- .init = qcom_cpuidle_init,
-};
-
-CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v1, "qcom,kpss-acc-v1", &qcom_cpuidle_ops);
-CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v2, "qcom,kpss-acc-v2", &qcom_cpuidle_ops);
-
static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
int *spm_cpu)
{
@@ -323,11 +274,15 @@ static int spm_dev_probe(struct platform_device *pdev)
struct resource *res;
const struct of_device_id *match_id;
void __iomem *addr;
- int cpu;
+ int cpu, ret;
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
drv = spm_get_drv(pdev, &cpu);
if (!drv)
return -EINVAL;
+ platform_set_drvdata(pdev, drv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
@@ -340,6 +295,10 @@ static int spm_dev_probe(struct platform_device *pdev)
drv->reg_data = match_id->data;
+ ret = spm_cpuidle_init(&drv->cpuidle_driver, cpu);
+ if (ret)
+ return ret;
+
/* Write the SPM sequences first.. */
addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
__iowrite32_copy(addr, drv->reg_data->seq,
@@ -362,13 +321,20 @@ static int spm_dev_probe(struct platform_device *pdev)
/* Set up Standby as the default low power mode */
spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
- per_cpu(cpu_spm_drv, cpu) = drv;
+ return cpuidle_register(&drv->cpuidle_driver, NULL);
+}
+
+static int spm_dev_remove(struct platform_device *pdev)
+{
+ struct spm_driver_data *drv = platform_get_drvdata(pdev);
+ cpuidle_unregister(&drv->cpuidle_driver);
return 0;
}
static struct platform_driver spm_driver = {
.probe = spm_dev_probe,
+ .remove = spm_dev_remove,
.driver = {
.name = "saw",
.of_match_table = spm_match_table,
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index 313b0290e97b..150045849d78 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -365,7 +365,6 @@ static int tegra_cpuidle_probe(struct platform_device *pdev)
break;
case TEGRA30:
- tegra_cpuidle_disable_state(TEGRA_CC6);
break;
case TEGRA114:
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index cdeedbf02646..091d1caceb41 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -18,14 +18,6 @@
#include "cpuidle.h"
-static unsigned int sysfs_switch;
-static int __init cpuidle_sysfs_setup(char *unused)
-{
- sysfs_switch = 1;
- return 1;
-}
-__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
-
static ssize_t show_available_governors(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -35,10 +27,10 @@ static ssize_t show_available_governors(struct device *dev,
mutex_lock(&cpuidle_lock);
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
- if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) -
- CPUIDLE_NAME_LEN - 2))
+ if (i >= (ssize_t) (PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
goto out;
- i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name);
+
+ i += scnprintf(&buf[i], CPUIDLE_NAME_LEN + 1, "%s ", tmp->name);
}
out:
@@ -85,58 +77,43 @@ static ssize_t store_current_governor(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- char gov_name[CPUIDLE_NAME_LEN];
- int ret = -EINVAL;
- size_t len = count;
+ char gov_name[CPUIDLE_NAME_LEN + 1];
+ int ret;
struct cpuidle_governor *gov;
- if (!len || len >= sizeof(gov_name))
+ ret = sscanf(buf, "%" __stringify(CPUIDLE_NAME_LEN) "s", gov_name);
+ if (ret != 1)
return -EINVAL;
- memcpy(gov_name, buf, len);
- gov_name[len] = '\0';
- if (gov_name[len - 1] == '\n')
- gov_name[--len] = '\0';
-
mutex_lock(&cpuidle_lock);
-
+ ret = -EINVAL;
list_for_each_entry(gov, &cpuidle_governors, governor_list) {
- if (strlen(gov->name) == len && !strcmp(gov->name, gov_name)) {
+ if (!strncmp(gov->name, gov_name, CPUIDLE_NAME_LEN)) {
ret = cpuidle_switch_governor(gov);
break;
}
}
-
mutex_unlock(&cpuidle_lock);
- if (ret)
- return ret;
- else
- return count;
+ return ret ? ret : count;
}
-static DEVICE_ATTR(current_driver, 0444, show_current_driver, NULL);
-static DEVICE_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
-
-static struct attribute *cpuidle_default_attrs[] = {
- &dev_attr_current_driver.attr,
- &dev_attr_current_governor_ro.attr,
- NULL
-};
-
static DEVICE_ATTR(available_governors, 0444, show_available_governors, NULL);
+static DEVICE_ATTR(current_driver, 0444, show_current_driver, NULL);
static DEVICE_ATTR(current_governor, 0644, show_current_governor,
- store_current_governor);
+ store_current_governor);
+static DEVICE_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
-static struct attribute *cpuidle_switch_attrs[] = {
+static struct attribute *cpuidle_attrs[] = {
&dev_attr_available_governors.attr,
&dev_attr_current_driver.attr,
&dev_attr_current_governor.attr,
+ &dev_attr_current_governor_ro.attr,
NULL
};
static struct attribute_group cpuidle_attr_group = {
- .attrs = cpuidle_default_attrs,
+ .attrs = cpuidle_attrs,
.name = "cpuidle",
};
@@ -146,9 +123,6 @@ static struct attribute_group cpuidle_attr_group = {
*/
int cpuidle_add_interface(struct device *dev)
{
- if (sysfs_switch)
- cpuidle_attr_group.attrs = cpuidle_switch_attrs;
-
return sysfs_create_group(&dev->kobj, &cpuidle_attr_group);
}
@@ -167,11 +141,6 @@ struct cpuidle_attr {
ssize_t (*store)(struct cpuidle_device *, const char *, size_t count);
};
-#define define_one_ro(_name, show) \
- static struct cpuidle_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
-#define define_one_rw(_name, show, store) \
- static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store)
-
#define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr)
struct cpuidle_device_kobj {
@@ -431,12 +400,12 @@ static inline void cpuidle_remove_s2idle_attr_group(struct cpuidle_state_kobj *k
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
static ssize_t cpuidle_state_show(struct kobject *kobj, struct attribute *attr,
- char * buf)
+ char *buf)
{
int ret = -EIO;
struct cpuidle_state *state = kobj_to_state(kobj);
struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
- struct cpuidle_state_attr * cattr = attr_to_stateattr(attr);
+ struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
if (cattr->show)
ret = cattr->show(state, state_usage, buf);
@@ -515,7 +484,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
&kdev->kobj, "state%d", i);
if (ret) {
- kfree(kobj);
+ kobject_put(&kobj->kobj);
goto error_state;
}
cpuidle_add_s2idle_attr_group(kobj);
@@ -646,7 +615,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
&kdev->kobj, "driver");
if (ret) {
- kfree(kdrv);
+ kobject_put(&kdrv->kobj);
return ret;
}
@@ -740,7 +709,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
"cpuidle");
if (error) {
- kfree(kdev);
+ kobject_put(&kdev->kobj);
return error;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index a5fd8975f3d3..a6abb701bfc6 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -8,7 +8,7 @@
* This file add support for AES cipher with 128,192,256 bits keysize in
* CBC and ECB mode.
*
- * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
#include <linux/crypto.h>
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 3e4e4bbda34c..b957061424a1 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -7,7 +7,7 @@
*
* Core file which registers crypto algorithms supported by the CryptoEngine.
*
- * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
#include <linux/clk.h>
#include <linux/crypto.h>
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 84d52fc3a2da..c89cb2ee2496 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -8,7 +8,7 @@
* This file add support for AES cipher with 128,192,256 bits keysize in
* CBC and ECB mode.
*
- * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
#include <linux/crypto.h>
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 6b301afffd11..5d9d0fedcb06 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -7,7 +7,7 @@
*
* Core file which registers crypto algorithms supported by the SecuritySystem
*
- * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
#include <linux/clk.h>
#include <linux/crypto.h>
@@ -537,10 +537,8 @@ static int sun8i_ss_probe(struct platform_device *pdev)
return err;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(ss->dev, "Cannot get SecuritySystem IRQ\n");
+ if (irq < 0)
return irq;
- }
ss->reset = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(ss->reset)) {
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 9d4ead2f7ebb..411857fad8ba 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -253,10 +253,8 @@ static int meson_crypto_probe(struct platform_device *pdev)
mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL);
for (i = 0; i < MAXFLOW; i++) {
mc->irqs[i] = platform_get_irq(pdev, i);
- if (mc->irqs[i] < 0) {
- dev_err(mc->dev, "Cannot get IRQ for flow %d\n", i);
+ if (mc->irqs[i] < 0)
return mc->irqs[i];
- }
err = devm_request_irq(&pdev->dev, mc->irqs[i], meson_irq_handler, 0,
"gxl-crypto", mc);
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index e536e2a6bbd8..75ccf41a7cb9 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -31,7 +31,6 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/sha.h>
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index fcf1effc7661..62ba0325a618 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2239,16 +2239,12 @@ artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
if (keylen > blocksize) {
- SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
-
- hdesc->tfm = tfm_ctx->child_hash;
-
tfm_ctx->hmac_key_length = blocksize;
- ret = crypto_shash_digest(hdesc, key, keylen,
- tfm_ctx->hmac_key);
+
+ ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen,
+ tfm_ctx->hmac_key);
if (ret)
return ret;
-
} else {
memcpy(tfm_ctx->hmac_key, key, keylen);
tfm_ctx->hmac_key_length = keylen;
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index c8b9408541a9..a353217a0d33 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -308,9 +308,9 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
container_of(areq, struct skcipher_request, base);
struct iproc_ctx_s *ctx = rctx->ctx;
struct spu_cipher_parms cipher_parms;
- int err = 0;
- unsigned int chunksize = 0; /* Num bytes of request to submit */
- int remaining = 0; /* Bytes of request still to process */
+ int err;
+ unsigned int chunksize; /* Num bytes of request to submit */
+ int remaining; /* Bytes of request still to process */
int chunk_start; /* Beginning of data for current SPU msg */
/* IV or ctr value to use in this SPU msg */
@@ -698,7 +698,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
/* number of bytes still to be hashed in this req */
unsigned int nbytes_to_hash = 0;
- int err = 0;
+ int err;
unsigned int chunksize = 0; /* length of hash carry + new data */
/*
* length of new data, not from hash carry, to be submitted in
@@ -1664,7 +1664,7 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
struct spu_hw *spu = &iproc_priv.spu;
struct brcm_message *mssg = msg;
struct iproc_reqctx_s *rctx;
- int err = 0;
+ int err;
rctx = mssg->ctx;
if (unlikely(!rctx)) {
@@ -1967,7 +1967,7 @@ static int ahash_enqueue(struct ahash_request *req)
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
- int err = 0;
+ int err;
const char *alg_name;
flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
@@ -2299,7 +2299,7 @@ ahash_finup_exit:
static int ahash_digest(struct ahash_request *req)
{
- int err = 0;
+ int err;
flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
@@ -4436,7 +4436,7 @@ static int spu_mb_init(struct device *dev)
for (i = 0; i < iproc_priv.spu.num_chan; i++) {
iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
if (IS_ERR(iproc_priv.mbox[i])) {
- err = (int)PTR_ERR(iproc_priv.mbox[i]);
+ err = PTR_ERR(iproc_priv.mbox[i]);
dev_err(dev,
"Mbox channel %d request failed with err %d",
i, err);
@@ -4717,21 +4717,20 @@ static int spu_dt_read(struct platform_device *pdev)
matched_spu_type = of_device_get_match_data(dev);
if (!matched_spu_type) {
- dev_err(&pdev->dev, "Failed to match device\n");
+ dev_err(dev, "Failed to match device\n");
return -ENODEV;
}
spu->spu_type = matched_spu_type->type;
spu->spu_subtype = matched_spu_type->subtype;
- i = 0;
for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
if (IS_ERR(spu->reg_vbase[i])) {
err = PTR_ERR(spu->reg_vbase[i]);
- dev_err(&pdev->dev, "Failed to map registers: %d\n",
+ dev_err(dev, "Failed to map registers: %d\n",
err);
spu->reg_vbase[i] = NULL;
return err;
@@ -4747,7 +4746,7 @@ static int bcm_spu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spu_hw *spu = &iproc_priv.spu;
- int err = 0;
+ int err;
iproc_priv.pdev = pdev;
platform_set_drvdata(iproc_priv.pdev,
@@ -4757,7 +4756,7 @@ static int bcm_spu_probe(struct platform_device *pdev)
if (err < 0)
goto failure;
- err = spu_mb_init(&pdev->dev);
+ err = spu_mb_init(dev);
if (err < 0)
goto failure;
@@ -4766,7 +4765,7 @@ static int bcm_spu_probe(struct platform_device *pdev)
else if (spu->spu_type == SPU_TYPE_SPU2)
iproc_priv.bcm_hdr_len = 0;
- spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
+ spu_functions_register(dev, spu->spu_type, spu->spu_subtype);
spu_counters_init();
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index e91be9b8b083..cee2a2713038 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -278,7 +278,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
struct nitrox_device *nitrox_get_first_device(void)
{
- struct nitrox_device *ndev = NULL;
+ struct nitrox_device *ndev;
mutex_lock(&devlist_lock);
list_for_each_entry(ndev, &ndevlist, list) {
@@ -286,7 +286,7 @@ struct nitrox_device *nitrox_get_first_device(void)
break;
}
mutex_unlock(&devlist_lock);
- if (!ndev)
+ if (&ndev->list == &ndevlist)
return NULL;
refcount_inc(&ndev->refcnt);
@@ -346,7 +346,7 @@ static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
}
/**
- * nitrox_bist_check - Check NITORX BIST registers status
+ * nitrox_bist_check - Check NITROX BIST registers status
* @ndev: NITROX device
*/
static int nitrox_bist_check(struct nitrox_device *ndev)
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index e0a8bd15aa74..32268e239bf1 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -10,10 +10,9 @@ config CRYPTO_DEV_CCP_DD
config CRYPTO_DEV_SP_CCP
bool "Cryptographic Coprocessor device"
default y
- depends on CRYPTO_DEV_CCP_DD
+ depends on CRYPTO_DEV_CCP_DD && DMADEVICES
select HW_RANDOM
select DMA_ENGINE
- select DMADEVICES
select CRYPTO_SHA1
select CRYPTO_SHA256
help
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 474e6f1a6a84..b0cc2bd73af8 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -272,9 +272,6 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
-
- SHASH_DESC_ON_STACK(sdesc, shash);
-
unsigned int block_size = crypto_shash_blocksize(shash);
unsigned int digest_size = crypto_shash_digestsize(shash);
int i, ret;
@@ -289,10 +286,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
if (key_len > block_size) {
/* Must hash the input key */
- sdesc->tfm = shash;
-
- ret = crypto_shash_digest(sdesc, key, key_len,
- ctx->u.sha.key);
+ ret = crypto_shash_tfm_digest(shash, key, key_len,
+ ctx->u.sha.key);
if (ret)
return -EINVAL;
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 896f190b9a50..a2426334be61 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -20,6 +20,7 @@
#include <linux/hw_random.h>
#include <linux/ccp.h>
#include <linux/firmware.h>
+#include <linux/gfp.h>
#include <asm/smp.h>
@@ -44,6 +45,14 @@ MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during
static bool psp_dead;
static int psp_timeout;
+/* Trusted Memory Region (TMR):
+ * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
+ * to allocate the memory, which will return aligned memory for the specified
+ * allocation order.
+ */
+#define SEV_ES_TMR_SIZE (1024 * 1024)
+static void *sev_es_tmr;
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -214,6 +223,20 @@ static int __sev_platform_init_locked(int *error)
if (sev->state == SEV_STATE_INIT)
return 0;
+ if (sev_es_tmr) {
+ u64 tmr_pa;
+
+ /*
+ * Do not include the encryption mask on the physical
+ * address of the TMR (firmware should clear it anyway).
+ */
+ tmr_pa = __pa(sev_es_tmr);
+
+ sev->init_cmd_buf.flags |= SEV_INIT_FLAGS_SEV_ES;
+ sev->init_cmd_buf.tmr_address = tmr_pa;
+ sev->init_cmd_buf.tmr_len = SEV_ES_TMR_SIZE;
+ }
+
rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
if (rc)
return rc;
@@ -371,8 +394,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
goto cmd;
/* allocate a physically contiguous buffer to store the CSR blob */
- if (!access_ok(input.address, input.length) ||
- input.length > SEV_FW_BLOB_MAX_SIZE) {
+ if (input.length > SEV_FW_BLOB_MAX_SIZE) {
ret = -EFAULT;
goto e_free;
}
@@ -609,12 +631,6 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
- /* Check if we have write access to the userspace buffer */
- if (input.address &&
- input.length &&
- !access_ok(input.address, input.length))
- return -EFAULT;
-
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -730,15 +746,13 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
goto cmd;
/* Allocate a physically contiguous buffer to store the PDH blob. */
- if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.pdh_cert_address, input.pdh_cert_len)) {
+ if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) {
ret = -EFAULT;
goto e_free;
}
/* Allocate a physically contiguous buffer to store the cert chain blob. */
- if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.cert_chain_address, input.cert_chain_len)) {
+ if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) {
ret = -EFAULT;
goto e_free;
}
@@ -1012,6 +1026,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
+ struct page *tmr_page;
int error, rc;
if (!sev)
@@ -1041,6 +1056,16 @@ void sev_pci_init(void)
sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
+ /* Obtain the TMR memory area for SEV-ES use */
+ tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE));
+ if (tmr_page) {
+ sev_es_tmr = page_address(tmr_page);
+ } else {
+ sev_es_tmr = NULL;
+ dev_warn(sev->dev,
+ "SEV: TMR allocation failed, SEV-ES support unavailable\n");
+ }
+
/* Initialize the platform */
rc = sev_platform_init(&error);
if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
@@ -1075,4 +1100,13 @@ void sev_pci_exit(void)
return;
sev_platform_shutdown(NULL);
+
+ if (sev_es_tmr) {
+ /* The TMR area was encrypted, flush it from the cache */
+ wbinvd_on_all_cpus();
+
+ free_pages((unsigned long)sev_es_tmr,
+ get_order(SEV_ES_TMR_SIZE));
+ sev_es_tmr = NULL;
+ }
}
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index a84335328f37..872ea3ff1c6b 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -427,12 +427,9 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
int key_len = keylen >> 1;
int err;
- SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
-
- desc->tfm = ctx_p->shash_tfm;
-
- err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
- ctx_p->user.key + key_len);
+ err = crypto_shash_tfm_digest(ctx_p->shash_tfm,
+ ctx_p->user.key, key_len,
+ ctx_p->user.key + key_len);
if (err) {
dev_err(dev, "Failed to hash ESSIV key.\n");
return err;
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index c454afce7781..7083767602fc 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -26,7 +26,7 @@ static struct debugfs_reg32 ver_sig_regs[] = {
{ .name = "VERSION" }, /* Must be 1st */
};
-static struct debugfs_reg32 pid_cid_regs[] = {
+static const struct debugfs_reg32 pid_cid_regs[] = {
CC_DEBUG_REG(PERIPHERAL_ID_0),
CC_DEBUG_REG(PERIPHERAL_ID_1),
CC_DEBUG_REG(PERIPHERAL_ID_2),
@@ -38,7 +38,7 @@ static struct debugfs_reg32 pid_cid_regs[] = {
CC_DEBUG_REG(COMPONENT_ID_3),
};
-static struct debugfs_reg32 debug_regs[] = {
+static const struct debugfs_reg32 debug_regs[] = {
CC_DEBUG_REG(HOST_IRR),
CC_DEBUG_REG(HOST_POWER_DOWN_EN),
CC_DEBUG_REG(AXIM_MON_ERR),
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index c29b80dd30d8..f26a7a15551a 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -44,7 +44,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/highmem.h>
@@ -256,7 +255,7 @@ static void get_aes_decrypt_key(unsigned char *dec_key,
return;
}
for (i = 0; i < nk; i++)
- w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+ w_ring[i] = get_unaligned_be32(&key[i * 4]);
i = 0;
temp = w_ring[nk - 1];
@@ -275,7 +274,7 @@ static void get_aes_decrypt_key(unsigned char *dec_key,
}
i--;
for (k = 0, j = i % nk; k < nk; k++) {
- *((u32 *)dec_key + k) = htonl(w_ring[j]);
+ put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
j--;
if (j < 0)
j += nk;
@@ -1054,8 +1053,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
u32 temp = be32_to_cpu(*--b);
temp = ~temp;
- c = (u64)temp + 1; // No of block can processed withou overflow
- if ((bytes / AES_BLOCK_SIZE) > c)
+ c = (u64)temp + 1; // No of block can processed without overflow
+ if ((bytes / AES_BLOCK_SIZE) >= c)
bytes = c * AES_BLOCK_SIZE;
return bytes;
}
@@ -1077,7 +1076,14 @@ static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
keylen = ablkctx->enckey_len / 2;
key = ablkctx->key + keylen;
- ret = aes_expandkey(&aes, key, keylen);
+ /* For a 192 bit key remove the padded zeroes which was
+ * added in chcr_xts_setkey
+ */
+ if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
+ == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
+ ret = aes_expandkey(&aes, key, keylen - 8);
+ else
+ ret = aes_expandkey(&aes, key, keylen);
if (ret)
return ret;
aes_encrypt(&aes, iv, iv);
@@ -1158,15 +1164,16 @@ static int chcr_final_cipher_iv(struct skcipher_request *req,
static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err)
{
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chcr_context *ctx = c_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
- struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
- struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
- struct cipher_wr_param wrparam;
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct chcr_dev *dev = c_ctx(tfm)->dev;
+ struct chcr_context *ctx = c_ctx(tfm);
+ struct adapter *adap = padap(ctx->dev);
+ struct cipher_wr_param wrparam;
+ struct sk_buff *skb;
int bytes;
if (err)
@@ -1197,6 +1204,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
+ memcpy(req->iv, reqctx->init_iv, IV);
+ atomic_inc(&adap->chcr_stats.fallback);
err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags,
req->src,
@@ -1248,20 +1257,28 @@ static int process_cipher(struct skcipher_request *req,
struct sk_buff **skb,
unsigned short op_type)
{
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
- struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+ struct adapter *adap = padap(c_ctx(tfm)->dev);
struct cipher_wr_param wrparam;
int bytes, err = -EINVAL;
+ int subtype;
reqctx->processed = 0;
reqctx->partial_req = 0;
if (!req->iv)
goto error;
+ subtype = get_cryptoalg_subtype(tfm);
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
(req->cryptlen == 0) ||
(req->cryptlen % crypto_skcipher_blocksize(tfm))) {
+ if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
+ goto fallback;
+ else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
+ subtype == CRYPTO_ALG_SUB_TYPE_XTS)
+ goto fallback;
pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
ablkctx->enckey_len, req->cryptlen, ivsize);
goto error;
@@ -1302,12 +1319,10 @@ static int process_cipher(struct skcipher_request *req,
} else {
bytes = req->cryptlen;
}
- if (get_cryptoalg_subtype(tfm) ==
- CRYPTO_ALG_SUB_TYPE_CTR) {
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->iv, bytes);
}
- if (get_cryptoalg_subtype(tfm) ==
- CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
CTR_RFC3686_IV_SIZE);
@@ -1315,20 +1330,25 @@ static int process_cipher(struct skcipher_request *req,
/* initialize counter portion of counter block */
*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+ memcpy(reqctx->init_iv, reqctx->iv, IV);
} else {
memcpy(reqctx->iv, req->iv, IV);
+ memcpy(reqctx->init_iv, req->iv, IV);
}
if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
+fallback: atomic_inc(&adap->chcr_stats.fallback);
err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags,
req->src,
req->dst,
req->cryptlen,
- reqctx->iv,
+ subtype ==
+ CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
+ reqctx->iv : req->iv,
op_type);
goto error;
}
@@ -1443,6 +1463,7 @@ static int chcr_device_init(struct chcr_context *ctx)
if (!ctx->dev) {
u_ctx = assign_chcr_device();
if (!u_ctx) {
+ err = -ENXIO;
pr_err("chcr device assignment fails\n");
goto out;
}
@@ -1757,7 +1778,7 @@ static int chcr_ahash_final(struct ahash_request *req)
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
struct chcr_context *ctx = h_ctx(rtfm);
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- int error = -EINVAL;
+ int error;
unsigned int cpu;
cpu = get_cpu();
@@ -1984,7 +2005,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
req_ctx->data_len += params.bfr_len + params.sg_len;
if (req->nbytes == 0) {
- create_last_hash_block(req_ctx->reqbfr, bs, 0);
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.more = 1;
params.bfr_len = bs;
}
@@ -2250,12 +2271,28 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
ablkctx->enckey_len = key_len;
get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
- ablkctx->key_ctx_hdr =
+ /* Both keys for xts must be aligned to 16 byte boundary
+ * by padding with zeros. So for 24 byte keys padding 8 zeroes.
+ */
+ if (key_len == 48) {
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
+ + 16) >> 4;
+ memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
+ memset(ablkctx->key + 24, 0, 8);
+ memset(ablkctx->key + 56, 0, 8);
+ ablkctx->enckey_len = 64;
+ ablkctx->key_ctx_hdr =
+ FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
+ CHCR_KEYCTX_NO_KEY, 1,
+ 0, context_size);
+ } else {
+ ablkctx->key_ctx_hdr =
FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
CHCR_KEYCTX_NO_KEY, 1,
0, context_size);
+ }
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
return 0;
badkey_err:
@@ -2556,7 +2593,7 @@ int chcr_aead_dma_map(struct device *dev,
int dst_size;
dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
+ 0 : authsize);
if (!req->cryptlen || !dst_size)
return 0;
reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
@@ -2603,15 +2640,16 @@ void chcr_aead_dma_unmap(struct device *dev,
int dst_size;
dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
+ 0 : authsize);
if (!req->cryptlen || !dst_size)
return;
dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL);
if (req->src == req->dst) {
- dma_unmap_sg(dev, req->src, sg_nents(req->src),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src,
+ sg_nents_for_len(req->src, dst_size),
+ DMA_BIDIRECTIONAL);
} else {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
@@ -2888,8 +2926,7 @@ static int ccm_format_packet(struct aead_request *req,
memcpy(ivptr, req->iv, 16);
}
if (assoclen)
- *((unsigned short *)(reqctx->scratch_pad + 16)) =
- htons(assoclen);
+ put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
rc = generate_b0(req, ivptr, op_type);
/* zero the ctr value */
@@ -2910,7 +2947,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
unsigned int ccm_xtra;
- unsigned char tag_offset = 0, auth_offset = 0;
+ unsigned int tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
@@ -3163,8 +3200,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
} else {
memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
}
- *((unsigned int *)(ivptr + 12)) = htonl(0x01);
-
+ put_unaligned_be32(0x01, &ivptr[12]);
ulptx = (struct ulptx_sgl *)(ivptr + 16);
chcr_add_aead_dst_ent(req, phys_cpl, qid);
@@ -3702,6 +3738,13 @@ static int chcr_aead_op(struct aead_request *req,
return -ENOSPC;
}
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
+ crypto_ipsec_check_assoclen(req->assoclen) != 0) {
+ pr_err("RFC4106: Invalid value of assoclen %d\n",
+ req->assoclen);
+ return -EINVAL;
+ }
+
/* Form a WR from req */
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index f58c2b5c7fc5..d4f6e010dc79 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -389,10 +389,6 @@ static inline void copy_hash_init_values(char *key, int digestsize)
}
}
-static const u8 sgl_lengths[20] = {
- 0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15
-};
-
/* Number of len fields(8) * size of one addr field */
#define PHYSDSGL_MAX_LEN_SIZE 16
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index ffd4ec0c7374..bd8dac806e7a 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -33,6 +33,13 @@ static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
+#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+static const struct tlsdev_ops chcr_ktls_ops = {
+ .tls_dev_add = chcr_ktls_dev_add,
+ .tls_dev_del = chcr_ktls_dev_del,
+};
+#endif
+
#ifdef CONFIG_CHELSIO_IPSEC_INLINE
static void update_netdev_features(void);
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
@@ -56,6 +63,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
.tx_handler = chcr_uld_tx_handler,
#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
+#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+ .tlsdev_ops = &chcr_ktls_ops,
+#endif
};
static void detach_work_fn(struct work_struct *work)
@@ -207,11 +217,6 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
}
u_ctx->lldi = *lld;
chcr_dev_init(u_ctx);
-
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- if (lld->ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
- chcr_enable_ktls(padap(&u_ctx->dev));
-#endif
out:
return u_ctx;
}
@@ -348,20 +353,12 @@ static void __exit chcr_crypto_exit(void)
list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
adap = padap(&u_ctx->dev);
memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
- chcr_disable_ktls(adap);
-#endif
list_del(&u_ctx->entry);
kfree(u_ctx);
}
list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
adap = padap(&u_ctx->dev);
memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
- chcr_disable_ktls(adap);
-#endif
list_del(&u_ctx->entry);
kfree(u_ctx);
}
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 2c09672e00a4..67d77abd6775 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -37,6 +37,7 @@
#define __CHCR_CORE_H__
#include <crypto/algapi.h>
+#include <net/tls.h>
#include "t4_hw.h"
#include "cxgb4.h"
#include "t4_msg.h"
@@ -223,10 +224,15 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
#ifdef CONFIG_CHELSIO_TLS_DEVICE
-void chcr_enable_ktls(struct adapter *adap);
-void chcr_disable_ktls(struct adapter *adap);
int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
+extern int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn);
+extern void chcr_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction);
#endif
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 542bebae001f..b3fdbdc25acb 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -302,6 +302,7 @@ struct chcr_skcipher_req_ctx {
unsigned int op;
u16 imm;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+ u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN];
u16 txqidx;
u16 rxqidx;
};
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 9fd3b9d1ec2f..967babd67a51 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -40,7 +40,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/highmem.h>
@@ -294,9 +293,6 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
return false;
}
- /* Inline single pdu */
- if (skb_shinfo(skb)->gso_size)
- return false;
return true;
}
@@ -406,7 +402,7 @@ inline void *copy_esn_pktxt(struct sk_buff *skb,
xo = xfrm_offload(skb);
aadiv->spi = (esphdr->spi);
- seqlo = htonl(esphdr->seq_no);
+ seqlo = ntohl(esphdr->seq_no);
seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
memcpy(aadiv->seq_no, &seqno, 8);
iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c
index 43d9e2420110..91dee616d15e 100644
--- a/drivers/crypto/chelsio/chcr_ktls.c
+++ b/drivers/crypto/chelsio/chcr_ktls.c
@@ -221,6 +221,7 @@ static int chcr_ktls_act_open_req(struct sock *sk,
return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
}
+#if IS_ENABLED(CONFIG_IPV6)
/*
* chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection.
* @sk - tcp socket.
@@ -270,6 +271,7 @@ static int chcr_ktls_act_open_req6(struct sock *sk,
return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
}
+#endif /* #if IS_ENABLED(CONFIG_IPV6) */
/*
* chcr_setup_connection: create a TCB entry so that TP will form tcp packets.
@@ -290,20 +292,26 @@ static int chcr_setup_connection(struct sock *sk,
tx_info->atid = atid;
tx_info->ip_family = sk->sk_family;
- if (sk->sk_family == AF_INET ||
- (sk->sk_family == AF_INET6 && !sk->sk_ipv6only &&
- ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) {
+ if (sk->sk_family == AF_INET) {
tx_info->ip_family = AF_INET;
ret = chcr_ktls_act_open_req(sk, tx_info, atid);
+#if IS_ENABLED(CONFIG_IPV6)
} else {
- tx_info->ip_family = AF_INET6;
- ret =
- cxgb4_clip_get(tx_info->netdev,
- (const u32 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8,
- 1);
- if (ret)
- goto out;
- ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
+ if (!sk->sk_ipv6only &&
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
+ tx_info->ip_family = AF_INET;
+ ret = chcr_ktls_act_open_req(sk, tx_info, atid);
+ } else {
+ tx_info->ip_family = AF_INET6;
+ ret = cxgb4_clip_get(tx_info->netdev,
+ (const u32 *)
+ &sk->sk_v6_rcv_saddr.s6_addr,
+ 1);
+ if (ret)
+ goto out;
+ ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
+ }
+#endif
}
/* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret
@@ -373,9 +381,9 @@ static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
* @tls_cts - tls context.
* @direction - TX/RX crypto direction
*/
-static void chcr_ktls_dev_del(struct net_device *netdev,
- struct tls_context *tls_ctx,
- enum tls_offload_ctx_dir direction)
+void chcr_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
{
struct chcr_ktls_ofld_ctx_tx *tx_ctx =
chcr_get_ktls_tx_context(tls_ctx);
@@ -394,11 +402,13 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
if (tx_info->l2te)
cxgb4_l2t_release(tx_info->l2te);
+#if IS_ENABLED(CONFIG_IPV6)
/* clear clip entry */
if (tx_info->ip_family == AF_INET6)
cxgb4_clip_release(netdev,
(const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8,
1);
+#endif
/* clear tid */
if (tx_info->tid != -1) {
@@ -411,6 +421,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_connection_close);
kvfree(tx_info);
tx_ctx->chcr_info = NULL;
+ /* release module refcount */
+ module_put(THIS_MODULE);
}
/*
@@ -422,10 +434,10 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
* @direction - TX/RX crypto direction
* return: SUCCESS/FAILURE.
*/
-static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn)
+int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
@@ -489,12 +501,16 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
goto out2;
/* get peer ip */
- if (sk->sk_family == AF_INET ||
- (sk->sk_family == AF_INET6 && !sk->sk_ipv6only &&
- ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) {
+ if (sk->sk_family == AF_INET) {
memcpy(daaddr, &sk->sk_daddr, 4);
+#if IS_ENABLED(CONFIG_IPV6)
} else {
- memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
+ if (!sk->sk_ipv6only &&
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)
+ memcpy(daaddr, &sk->sk_daddr, 4);
+ else
+ memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
+#endif
}
/* get the l2t index */
@@ -528,6 +544,12 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
if (ret)
goto out2;
+ /* Driver shouldn't be removed until any single connection exists */
+ if (!try_module_get(THIS_MODULE)) {
+ ret = -EINVAL;
+ goto out2;
+ }
+
atomic64_inc(&adap->chcr_stats.ktls_tx_connection_open);
return 0;
out2:
@@ -537,43 +559,6 @@ out:
return ret;
}
-static const struct tlsdev_ops chcr_ktls_ops = {
- .tls_dev_add = chcr_ktls_dev_add,
- .tls_dev_del = chcr_ktls_dev_del,
-};
-
-/*
- * chcr_enable_ktls: add NETIF_F_HW_TLS_TX flag in all the ports.
- */
-void chcr_enable_ktls(struct adapter *adap)
-{
- struct net_device *netdev;
- int i;
-
- for_each_port(adap, i) {
- netdev = adap->port[i];
- netdev->features |= NETIF_F_HW_TLS_TX;
- netdev->hw_features |= NETIF_F_HW_TLS_TX;
- netdev->tlsdev_ops = &chcr_ktls_ops;
- }
-}
-
-/*
- * chcr_disable_ktls: remove NETIF_F_HW_TLS_TX flag from all the ports.
- */
-void chcr_disable_ktls(struct adapter *adap)
-{
- struct net_device *netdev;
- int i;
-
- for_each_port(adap, i) {
- netdev = adap->port[i];
- netdev->features &= ~NETIF_F_HW_TLS_TX;
- netdev->hw_features &= ~NETIF_F_HW_TLS_TX;
- netdev->tlsdev_ops = NULL;
- }
-}
-
/*
* chcr_init_tcb_fields: Initialize tcb fields to handle TCP seq number
* handling.
@@ -932,7 +917,9 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
u32 ctrl, iplen, maclen;
+#if IS_ENABLED(CONFIG_IPV6)
struct ipv6hdr *ip6;
+#endif
unsigned int ndesc;
struct tcphdr *tcp;
int len16, pktlen;
@@ -987,9 +974,11 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen);
+#if IS_ENABLED(CONFIG_IPV6)
} else {
ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen);
+#endif
}
/* now take care of the tcp header, if fin is not set then clear push
* bit as well, and if fin is set, it will be sent at the last so we
diff --git a/drivers/crypto/chelsio/chcr_ktls.h b/drivers/crypto/chelsio/chcr_ktls.h
index 5a7ae2ca446e..5cbd84b1da05 100644
--- a/drivers/crypto/chelsio/chcr_ktls.h
+++ b/drivers/crypto/chelsio/chcr_ktls.h
@@ -89,10 +89,15 @@ static inline int chcr_get_first_rx_qid(struct adapter *adap)
return u_ctx->lldi.rxq_ids[0];
}
-void chcr_enable_ktls(struct adapter *adap);
-void chcr_disable_ktls(struct adapter *adap);
int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
+int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn);
+void chcr_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction);
#endif /* CONFIG_CHELSIO_TLS_DEVICE */
#endif /* __CHCR_KTLS_H__ */
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index d5720a859443..f200fae6f7cb 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -18,13 +18,20 @@
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/if_vlan.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
+#include <net/ip6_route.h>
#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/dst.h>
#include <net/tls.h>
+#include <net/addrconf.h>
+#include <net/secure_seq.h>
#include "chtls.h"
#include "chtls_cm.h"
+#include "clip_tbl.h"
/*
* State transitions and actions for close. Note that if we are in SYN_SENT
@@ -82,15 +89,40 @@ static void chtls_sock_release(struct kref *ref)
kfree(csk);
}
-static struct net_device *chtls_ipv4_netdev(struct chtls_dev *cdev,
+static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
struct sock *sk)
{
struct net_device *ndev = cdev->ports[0];
+#if IS_ENABLED(CONFIG_IPV6)
+ struct net_device *temp;
+ int addr_type;
+#endif
+
+ switch (sk->sk_family) {
+ case PF_INET:
+ if (likely(!inet_sk(sk)->inet_rcv_saddr))
+ return ndev;
+ ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case PF_INET6:
+ addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
+ if (likely(addr_type == IPV6_ADDR_ANY))
+ return ndev;
+
+ for_each_netdev_rcu(&init_net, temp) {
+ if (ipv6_chk_addr(&init_net, (struct in6_addr *)
+ &sk->sk_v6_rcv_saddr, temp, 1)) {
+ ndev = temp;
+ break;
+ }
+ }
+ break;
+#endif
+ default:
+ return NULL;
+ }
- if (likely(!inet_sk(sk)->inet_rcv_saddr))
- return ndev;
-
- ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
if (!ndev)
return NULL;
@@ -446,7 +478,12 @@ void chtls_destroy_sock(struct sock *sk)
free_tls_keyid(sk);
kref_put(&csk->kref, chtls_sock_release);
csk->cdev = NULL;
- sk->sk_prot = &tcp_prot;
+ if (sk->sk_family == AF_INET)
+ sk->sk_prot = &tcp_prot;
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ sk->sk_prot = &tcpv6_prot;
+#endif
sk->sk_prot->destroy(sk);
}
@@ -473,7 +510,8 @@ static void chtls_disconnect_acceptq(struct sock *listen_sk)
while (*pprev) {
struct request_sock *req = *pprev;
- if (req->rsk_ops == &chtls_rsk_ops) {
+ if (req->rsk_ops == &chtls_rsk_ops ||
+ req->rsk_ops == &chtls_rsk_opsv6) {
struct sock *child = req->sk;
*pprev = req->dl_next;
@@ -597,17 +635,17 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
{
struct net_device *ndev;
+#if IS_ENABLED(CONFIG_IPV6)
+ bool clip_valid = false;
+#endif
struct listen_ctx *ctx;
struct adapter *adap;
struct port_info *pi;
+ int ret = 0;
int stid;
- int ret;
-
- if (sk->sk_family != PF_INET)
- return -EAGAIN;
rcu_read_lock();
- ndev = chtls_ipv4_netdev(cdev, sk);
+ ndev = chtls_find_netdev(cdev, sk);
rcu_read_unlock();
if (!ndev)
return -EBADF;
@@ -638,16 +676,39 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
if (!listen_hash_add(cdev, sk, stid))
goto free_stid;
- ret = cxgb4_create_server(ndev, stid,
- inet_sk(sk)->inet_rcv_saddr,
- inet_sk(sk)->inet_sport, 0,
- cdev->lldi->rxq_ids[0]);
+ if (sk->sk_family == PF_INET) {
+ ret = cxgb4_create_server(ndev, stid,
+ inet_sk(sk)->inet_rcv_saddr,
+ inet_sk(sk)->inet_sport, 0,
+ cdev->lldi->rxq_ids[0]);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ int addr_type;
+
+ addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
+ if (addr_type != IPV6_ADDR_ANY) {
+ ret = cxgb4_clip_get(ndev, (const u32 *)
+ &sk->sk_v6_rcv_saddr, 1);
+ if (ret)
+ goto del_hash;
+ clip_valid = true;
+ }
+ ret = cxgb4_create_server6(ndev, stid,
+ &sk->sk_v6_rcv_saddr,
+ inet_sk(sk)->inet_sport,
+ cdev->lldi->rxq_ids[0]);
+#endif
+ }
if (ret > 0)
ret = net_xmit_errno(ret);
if (ret)
goto del_hash;
return 0;
del_hash:
+#if IS_ENABLED(CONFIG_IPV6)
+ if (clip_valid)
+ cxgb4_clip_release(ndev, (const u32 *)&sk->sk_v6_rcv_saddr, 1);
+#endif
listen_hash_del(cdev, sk);
free_stid:
cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
@@ -671,7 +732,21 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
chtls_reset_synq(listen_ctx);
cxgb4_remove_server(cdev->lldi->ports[0], stid,
- cdev->lldi->rxq_ids[0], 0);
+ cdev->lldi->rxq_ids[0], sk->sk_family == PF_INET6);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == PF_INET6) {
+ struct chtls_sock *csk;
+ int addr_type = 0;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ addr_type = ipv6_addr_type((const struct in6_addr *)
+ &sk->sk_v6_rcv_saddr);
+ if (addr_type != IPV6_ADDR_ANY)
+ cxgb4_clip_release(csk->egress_dev, (const u32 *)
+ &sk->sk_v6_rcv_saddr, 1);
+ }
+#endif
chtls_disconnect_acceptq(sk);
}
@@ -880,7 +955,12 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
tp = tcp_sk(sk);
tcpoptsz = 0;
- iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ iphdrsz = sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
+ else
+#endif
+ iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
if (req->tcpopt.tstamp)
tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
@@ -1027,13 +1107,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
+ struct neighbour *n = NULL;
struct inet_sock *newinet;
const struct iphdr *iph;
struct tls_context *ctx;
struct net_device *ndev;
struct chtls_sock *csk;
struct dst_entry *dst;
- struct neighbour *n;
struct tcp_sock *tp;
struct sock *newsk;
u16 port_id;
@@ -1045,11 +1125,31 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (!newsk)
goto free_oreq;
- dst = inet_csk_route_child_sock(lsk, newsk, oreq);
- if (!dst)
- goto free_sk;
+ if (lsk->sk_family == AF_INET) {
+ dst = inet_csk_route_child_sock(lsk, newsk, oreq);
+ if (!dst)
+ goto free_sk;
- n = dst_neigh_lookup(dst, &iph->saddr);
+ n = dst_neigh_lookup(dst, &iph->saddr);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ const struct ipv6hdr *ip6h;
+ struct flowi6 fl6;
+
+ ip6h = (const struct ipv6hdr *)network_hdr;
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_TCP;
+ fl6.saddr = ip6h->daddr;
+ fl6.daddr = ip6h->saddr;
+ fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port;
+ fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num);
+ security_req_classify_flow(oreq, flowi6_to_flowi(&fl6));
+ dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL);
+ if (IS_ERR(dst))
+ goto free_sk;
+ n = dst_neigh_lookup(dst, &ip6h->saddr);
+#endif
+ }
if (!n)
goto free_sk;
@@ -1072,9 +1172,30 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
tp = tcp_sk(newsk);
newinet = inet_sk(newsk);
- newinet->inet_daddr = iph->saddr;
- newinet->inet_rcv_saddr = iph->daddr;
- newinet->inet_saddr = iph->daddr;
+ if (iph->version == 0x4) {
+ newinet->inet_daddr = iph->saddr;
+ newinet->inet_rcv_saddr = iph->daddr;
+ newinet->inet_saddr = iph->daddr;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct tcp6_sock *newtcp6sk = (struct tcp6_sock *)newsk;
+ struct inet_request_sock *treq = inet_rsk(oreq);
+ struct ipv6_pinfo *newnp = inet6_sk(newsk);
+ struct ipv6_pinfo *np = inet6_sk(lsk);
+
+ inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
+ memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ newsk->sk_v6_daddr = treq->ir_v6_rmt_addr;
+ newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr;
+ inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr;
+ newnp->ipv6_fl_list = NULL;
+ newnp->pktoptions = NULL;
+ newsk->sk_bound_dev_if = treq->ir_iif;
+ newinet->inet_opt = NULL;
+ newinet->inet_daddr = LOOPBACK4_IPV6;
+ newinet->inet_saddr = LOOPBACK4_IPV6;
+#endif
+ }
oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
sk_setup_caps(newsk, dst);
@@ -1156,6 +1277,7 @@ static void chtls_pass_accept_request(struct sock *sk,
struct sk_buff *reply_skb;
struct chtls_sock *csk;
struct chtls_dev *cdev;
+ struct ipv6hdr *ip6h;
struct tcphdr *tcph;
struct sock *newsk;
struct ethhdr *eh;
@@ -1196,37 +1318,52 @@ static void chtls_pass_accept_request(struct sock *sk,
if (sk_acceptq_is_full(sk))
goto reject;
- oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
- if (!oreq)
- goto reject;
-
- oreq->rsk_rcv_wnd = 0;
- oreq->rsk_window_clamp = 0;
- oreq->cookie_ts = 0;
- oreq->mss = 0;
- oreq->ts_recent = 0;
eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1);
+ ip6h = (struct ipv6hdr *)(eh + 1);
network_hdr = (void *)(eh + 1);
} else {
vlan_eh = (struct vlan_ethhdr *)(req + 1);
iph = (struct iphdr *)(vlan_eh + 1);
+ ip6h = (struct ipv6hdr *)(vlan_eh + 1);
network_hdr = (void *)(vlan_eh + 1);
}
- if (iph->version != 0x4)
- goto free_oreq;
- tcph = (struct tcphdr *)(iph + 1);
- skb_set_network_header(skb, (void *)iph - (void *)req);
+ if (iph->version == 0x4) {
+ tcph = (struct tcphdr *)(iph + 1);
+ skb_set_network_header(skb, (void *)iph - (void *)req);
+ oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
+ } else {
+ tcph = (struct tcphdr *)(ip6h + 1);
+ skb_set_network_header(skb, (void *)ip6h - (void *)req);
+ oreq = inet_reqsk_alloc(&chtls_rsk_opsv6, sk, false);
+ }
+
+ if (!oreq)
+ goto reject;
+
+ oreq->rsk_rcv_wnd = 0;
+ oreq->rsk_window_clamp = 0;
+ oreq->cookie_ts = 0;
+ oreq->mss = 0;
+ oreq->ts_recent = 0;
tcp_rsk(oreq)->tfo_listener = false;
tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
chtls_set_req_port(oreq, tcph->source, tcph->dest);
- chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
- ip_dsfield = ipv4_get_dsfield(iph);
+ if (iph->version == 0x4) {
+ chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
+ ip_dsfield = ipv4_get_dsfield(iph);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ inet_rsk(oreq)->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ inet_rsk(oreq)->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+ ip_dsfield = ipv6_get_dsfield(ipv6_hdr(skb));
+#endif
+ }
if (req->tcpopt.wsf <= 14 &&
sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
inet_rsk(oreq)->wscale_ok = 1;
@@ -1243,7 +1380,7 @@ static void chtls_pass_accept_request(struct sock *sk,
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)
- goto reject;
+ goto free_oreq;
if (chtls_get_module(newsk))
goto reject;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
index 3fac0c74a41f..47ba81e42f5d 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
@@ -79,6 +79,7 @@ enum {
typedef void (*defer_handler_t)(struct chtls_dev *dev, struct sk_buff *skb);
extern struct request_sock_ops chtls_rsk_ops;
+extern struct request_sock_ops chtls_rsk_opsv6;
struct deferred_skb_cb {
defer_handler_t handler;
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 2110d0893bc7..d98b89d0fa6e 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -13,6 +13,8 @@
#include <linux/net.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
#include <net/tcp.h>
#include <net/tls.h>
@@ -30,8 +32,8 @@ static DEFINE_MUTEX(cdev_mutex);
static DEFINE_MUTEX(notify_mutex);
static RAW_NOTIFIER_HEAD(listen_notify_list);
-static struct proto chtls_cpl_prot;
-struct request_sock_ops chtls_rsk_ops;
+static struct proto chtls_cpl_prot, chtls_cpl_protv6;
+struct request_sock_ops chtls_rsk_ops, chtls_rsk_opsv6;
static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT;
static void register_listen_notifier(struct notifier_block *nb)
@@ -586,7 +588,10 @@ static struct cxgb4_uld_info chtls_uld_info = {
void chtls_install_cpl_ops(struct sock *sk)
{
- sk->sk_prot = &chtls_cpl_prot;
+ if (sk->sk_family == AF_INET)
+ sk->sk_prot = &chtls_cpl_prot;
+ else
+ sk->sk_prot = &chtls_cpl_protv6;
}
static void __init chtls_init_ulp_ops(void)
@@ -603,6 +608,11 @@ static void __init chtls_init_ulp_ops(void)
chtls_cpl_prot.recvmsg = chtls_recvmsg;
chtls_cpl_prot.setsockopt = chtls_setsockopt;
chtls_cpl_prot.getsockopt = chtls_getsockopt;
+#if IS_ENABLED(CONFIG_IPV6)
+ chtls_cpl_protv6 = chtls_cpl_prot;
+ chtls_init_rsk_ops(&chtls_cpl_protv6, &chtls_rsk_opsv6,
+ &tcpv6_prot, PF_INET6);
+#endif
}
static int __init chtls_register(void)
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index f09c6cf7823e..9c3b3ca815e6 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -29,6 +29,7 @@ config CRYPTO_DEV_HISI_SEC2
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on ACPI
help
Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
It provides AES, SM4, and 3DES algorithms with ECB
@@ -42,6 +43,7 @@ config CRYPTO_DEV_HISI_QM
depends on ARM64 || COMPILE_TEST
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
+ depends on ACPI
help
HiSilicon accelerator engines use a common queue management
interface. Specific engine driver may use this module.
@@ -52,6 +54,7 @@ config CRYPTO_DEV_HISI_ZIP
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
depends on UACCE || UACCE=n
+ depends on ACPI
select CRYPTO_DEV_HISI_QM
help
Support for HiSilicon ZIP Driver
@@ -61,6 +64,7 @@ config CRYPTO_DEV_HISI_HPRE
depends on PCI && PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on ACPI
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 03d512ec6336..ed730d173e95 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -25,6 +25,17 @@ enum hpre_ctrl_dbgfs_file {
HPRE_DEBUG_FILE_NUM,
};
+enum hpre_dfx_dbgfs_file {
+ HPRE_SEND_CNT,
+ HPRE_RECV_CNT,
+ HPRE_SEND_FAIL_CNT,
+ HPRE_SEND_BUSY_CNT,
+ HPRE_OVER_THRHLD_CNT,
+ HPRE_OVERTIME_THRHLD,
+ HPRE_INVALID_REQ_CNT,
+ HPRE_DFX_FILE_NUM
+};
+
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
struct hpre_debugfs_file {
@@ -34,6 +45,11 @@ struct hpre_debugfs_file {
struct hpre_debug *debug;
};
+struct hpre_dfx {
+ atomic64_t value;
+ enum hpre_dfx_dbgfs_file type;
+};
+
/*
* One HPRE controller has one PF and multiple VFs, some global configurations
* which PF has need this structure.
@@ -41,13 +57,13 @@ struct hpre_debugfs_file {
*/
struct hpre_debug {
struct dentry *debug_root;
+ struct hpre_dfx dfx[HPRE_DFX_FILE_NUM];
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
};
struct hpre {
struct hisi_qm qm;
struct hpre_debug debug;
- u32 num_vfs;
unsigned long status;
};
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 65425250b2e9..7b5cb27d473d 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -10,6 +10,7 @@
#include <linux/dma-mapping.h>
#include <linux/fips.h>
#include <linux/module.h>
+#include <linux/time.h>
#include "hpre.h"
struct hpre_ctx;
@@ -32,6 +33,9 @@ struct hpre_ctx;
#define HPRE_SQE_DONE_SHIFT 30
#define HPRE_DH_MAX_P_SZ 512
+#define HPRE_DFX_SEC_TO_US 1000000
+#define HPRE_DFX_US_TO_NS 1000
+
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@@ -68,6 +72,7 @@ struct hpre_dh_ctx {
struct hpre_ctx {
struct hisi_qp *qp;
struct hpre_asym_request **req_list;
+ struct hpre *hpre;
spinlock_t req_lock;
unsigned int key_sz;
bool crt_g2_mode;
@@ -90,6 +95,7 @@ struct hpre_asym_request {
int err;
int req_id;
hpre_cb cb;
+ struct timespec64 req_time;
};
static DEFINE_MUTEX(hpre_alg_lock);
@@ -119,6 +125,7 @@ static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
{
struct hpre_ctx *ctx;
+ struct hpre_dfx *dfx;
int id;
ctx = hpre_req->ctx;
@@ -129,6 +136,10 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx->req_list[id] = hpre_req;
hpre_req->req_id = id;
+ dfx = ctx->hpre->debug.dfx;
+ if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
+ ktime_get_ts64(&hpre_req->req_time);
+
return id;
}
@@ -309,12 +320,16 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
{
+ struct hpre *hpre;
+
if (!ctx || !qp || qlen < 0)
return -EINVAL;
spin_lock_init(&ctx->req_lock);
ctx->qp = qp;
+ hpre = container_of(ctx->qp->qm, struct hpre, qm);
+ ctx->hpre = hpre;
ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
if (!ctx->req_list)
return -ENOMEM;
@@ -337,38 +352,80 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
ctx->key_sz = 0;
}
+static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
+ u64 overtime_thrhld)
+{
+ struct timespec64 reply_time;
+ u64 time_use_us;
+
+ ktime_get_ts64(&reply_time);
+ time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
+ HPRE_DFX_SEC_TO_US +
+ (reply_time.tv_nsec - req->req_time.tv_nsec) /
+ HPRE_DFX_US_TO_NS;
+
+ if (time_use_us <= overtime_thrhld)
+ return false;
+
+ return true;
+}
+
static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct kpp_request *areq;
+ u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
areq = req->areq.dh;
areq->dst_len = ctx->key_sz;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct akcipher_request *areq;
+ u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
areq = req->areq.rsa;
areq->dst_len = ctx->key_sz;
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
akcipher_request_complete(areq, ret);
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
{
struct hpre_ctx *ctx = qp->qp_ctx;
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_sqe *sqe = resp;
+ struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
- ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp);
+
+ if (unlikely(!req)) {
+ atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
+ return;
+ }
+
+ req->cb(ctx, resp);
}
static int hpre_ctx_init(struct hpre_ctx *ctx)
@@ -436,6 +493,29 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
return 0;
}
+static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
+{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ int ctr = 0;
+ int ret;
+
+ do {
+ atomic64_inc(&dfx[HPRE_SEND_CNT].value);
+ ret = hisi_qp_send(ctx->qp, msg);
+ if (ret != -EBUSY)
+ break;
+ atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
+ } while (ctr++ < HPRE_TRY_SEND_TIMES);
+
+ if (likely(!ret))
+ return ret;
+
+ if (ret != -EBUSY)
+ atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
+
+ return ret;
+}
+
#ifdef CONFIG_CRYPTO_DH
static int hpre_dh_compute_value(struct kpp_request *req)
{
@@ -444,7 +524,6 @@ static int hpre_dh_compute_value(struct kpp_request *req)
void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
ret = hpre_msg_request_set(ctx, req, false);
@@ -465,11 +544,9 @@ static int hpre_dh_compute_value(struct kpp_request *req)
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -647,7 +724,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@@ -677,11 +753,8 @@ static int hpre_rsa_enc(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
-
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -699,7 +772,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@@ -736,11 +808,8 @@ static int hpre_rsa_dec(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
-
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 88be53bf4a38..a3ee127a70e3 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -59,10 +59,6 @@
#define HPRE_HAC_ECC2_CNT 0x301a08
#define HPRE_HAC_INT_STATUS 0x301800
#define HPRE_HAC_SOURCE_INT 0x301600
-#define MASTER_GLOBAL_CTRL_SHUTDOWN 1
-#define MASTER_TRANS_RETURN_RW 3
-#define HPRE_MASTER_TRANS_RETURN 0x300150
-#define HPRE_MASTER_GLOBAL_CTRL 0x300000
#define HPRE_CLSTR_ADDR_INTRVL 0x1000
#define HPRE_CLUSTER_INQURY 0x100
#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
@@ -80,7 +76,16 @@
#define HPRE_BD_USR_MASK 0x3
#define HPRE_CLUSTER_CORE_MASK 0xf
+#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
+#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
+#define HPRE_WR_MSI_PORT BIT(2)
+
+#define HPRE_CORE_ECC_2BIT_ERR BIT(1)
+#define HPRE_OOO_ECC_2BIT_ERR BIT(5)
+
#define HPRE_VIA_MSI_DSM 1
+#define HPRE_SQE_MASK_OFFSET 8
+#define HPRE_SQE_MASK_LEN 24
static struct hisi_qm_list hpre_devices;
static const char hpre_name[] = "hisi_hpre";
@@ -131,7 +136,7 @@ static const u64 hpre_cluster_offsets[] = {
HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
};
-static struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
+static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
{"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},
{"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},
{"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},
@@ -139,7 +144,7 @@ static struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
{"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},
};
-static struct debugfs_reg32 hpre_com_dfx_regs[] = {
+static const struct debugfs_reg32 hpre_com_dfx_regs[] = {
{"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},
{"AXQOS ", HPRE_VFG_AXQOS},
{"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},
@@ -156,44 +161,38 @@ static struct debugfs_reg32 hpre_com_dfx_regs[] = {
{"INT_STATUS ", HPRE_INT_STATUS},
};
-static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp)
-{
- struct pci_dev *pdev;
- u32 n, q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL);
- if (!pdev) {
- q_num = HPRE_QUEUE_NUM_V2;
- pr_info("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- if (rev_id != QM_HW_V2)
- return -EINVAL;
-
- q_num = HPRE_QUEUE_NUM_V2;
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n == 0 || n > q_num)
- return -EINVAL;
+static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
+ "send_cnt",
+ "recv_cnt",
+ "send_fail_cnt",
+ "send_busy_cnt",
+ "over_thrhld_cnt",
+ "overtime_thrhld",
+ "invalid_req_cnt"
+};
- return param_set_int(val, kp);
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
}
static const struct kernel_param_ops hpre_pf_q_num_ops = {
- .set = hpre_pf_q_num_set,
+ .set = pf_q_num_set,
.get = param_get_int,
};
-static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM;
-module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444);
-MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)");
+static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;
+module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)");
+
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
+ .get = param_get_int,
+};
+
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
struct hisi_qp *hpre_create_qp(void)
{
@@ -232,9 +231,8 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
return 0;
}
-static int hpre_set_user_domain_and_cache(struct hpre *hpre)
+static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
unsigned long offset;
int ret, i;
@@ -324,17 +322,34 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
static void hpre_hw_error_disable(struct hisi_qm *qm)
{
+ u32 val;
+
/* disable hpre hw error interrupts */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
+
+ /* disable HPRE block master OOO when m-bit error occur */
+ val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ val &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
+ writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
+ u32 val;
+
+ /* clear HPRE hw error source if having */
+ writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
+
/* enable hpre hw error interrupts */
writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
+
+ /* enable HPRE block master OOO when m-bit error occur */
+ val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ val |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
+ writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -354,9 +369,7 @@ static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
- struct hpre_debug *debug = file->debug;
- struct hpre *hpre = container_of(debug, struct hpre, debug);
- u32 num_vfs = hpre->num_vfs;
+ u32 num_vfs = qm->vfs_num;
u32 vfq_num, tmp;
@@ -523,6 +536,33 @@ static const struct file_operations hpre_ctrl_debug_fops = {
.write = hpre_ctrl_debug_write,
};
+static int hpre_debugfs_atomic64_get(void *data, u64 *val)
+{
+ struct hpre_dfx *dfx_item = data;
+
+ *val = atomic64_read(&dfx_item->value);
+
+ return 0;
+}
+
+static int hpre_debugfs_atomic64_set(void *data, u64 val)
+{
+ struct hpre_dfx *dfx_item = data;
+ struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
+
+ if (val)
+ return -EINVAL;
+
+ if (dfx_item->type == HPRE_OVERTIME_THRHLD)
+ atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
+ atomic64_set(&dfx_item->value, val);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
+ hpre_debugfs_atomic64_set, "%llu\n");
+
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
@@ -620,6 +660,22 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug)
return hpre_cluster_debugfs_init(debug);
}
+static void hpre_dfx_debug_init(struct hpre_debug *debug)
+{
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hpre_dfx *dfx = hpre->debug.dfx;
+ struct hisi_qm *qm = &hpre->qm;
+ struct dentry *parent;
+ int i;
+
+ parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
+ for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
+ dfx[i].type = i;
+ debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
+ &hpre_atomic64_ops);
+ }
+}
+
static int hpre_debugfs_init(struct hpre *hpre)
{
struct hisi_qm *qm = &hpre->qm;
@@ -629,6 +685,8 @@ static int hpre_debugfs_init(struct hpre *hpre)
dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
qm->debug.debug_root = dir;
+ qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
ret = hisi_qm_debug_init(qm);
if (ret)
@@ -640,6 +698,9 @@ static int hpre_debugfs_init(struct hpre *hpre)
if (ret)
goto failed_to_create;
}
+
+ hpre_dfx_debug_init(&hpre->debug);
+
return 0;
failed_to_create:
@@ -654,32 +715,27 @@ static void hpre_debugfs_exit(struct hpre *hpre)
debugfs_remove_recursive(qm->debug.debug_root);
}
-static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id < 0)
- return -ENODEV;
-
- if (rev_id == QM_HW_V1) {
+ if (pdev->revision == QM_HW_V1) {
pci_warn(pdev, "HPRE version 1 is not supported!\n");
return -EINVAL;
}
qm->pdev = pdev;
- qm->ver = rev_id;
+ qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
+
qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
- QM_HW_PF : QM_HW_VF;
- if (pdev->is_physfn) {
+ QM_HW_PF : QM_HW_VF;
+ if (qm->fun_type == QM_HW_PF) {
qm->qp_base = HPRE_PF_DEF_Q_BASE;
- qm->qp_num = hpre_pf_q_num;
+ qm->qp_num = pf_q_num;
+ qm->qm_list = &hpre_devices;
}
- qm->use_dma_api = true;
- return 0;
+ return hisi_qm_init(qm);
}
static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
@@ -693,8 +749,6 @@ static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
err->msg, err->int_msk);
err++;
}
-
- writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
}
static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
@@ -702,16 +756,38 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
return readl(qm->io_base + HPRE_HAC_INT_STATUS);
}
+static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
+}
+
+static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 value;
+
+ value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,
+ HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,
+ HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+}
+
static const struct hisi_qm_err_ini hpre_err_ini = {
+ .hw_init = hpre_set_user_domain_and_cache,
.hw_err_enable = hpre_hw_error_enable,
.hw_err_disable = hpre_hw_error_disable,
.get_dev_hw_err_status = hpre_get_hw_err_status,
+ .clear_dev_hw_err_status = hpre_clear_hw_err_status,
.log_dev_hw_err = hpre_log_hw_error,
+ .open_axi_master_ooo = hpre_open_axi_master_ooo,
.err_info = {
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
.fe = 0,
- .msi = QM_DB_RANDOM_INVALID,
+ .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
+ HPRE_OOO_ECC_2BIT_ERR,
+ .msi_wr_port = HPRE_WR_MSI_PORT,
+ .acpi_rst = "HRST",
}
};
@@ -722,7 +798,7 @@ static int hpre_pf_probe_init(struct hpre *hpre)
qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
- ret = hpre_set_user_domain_and_cache(hpre);
+ ret = hpre_set_user_domain_and_cache(qm);
if (ret)
return ret;
@@ -732,6 +808,20 @@ static int hpre_pf_probe_init(struct hpre *hpre)
return 0;
}
+static int hpre_probe_init(struct hpre *hpre)
+{
+ struct hisi_qm *qm = &hpre->qm;
+ int ret;
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = hpre_pf_probe_init(hpre);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_qm *qm;
@@ -742,26 +832,17 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!hpre)
return -ENOMEM;
- pci_set_drvdata(pdev, hpre);
-
qm = &hpre->qm;
- ret = hpre_qm_pre_init(qm, pdev);
- if (ret)
- return ret;
-
- ret = hisi_qm_init(qm);
- if (ret)
+ ret = hpre_qm_init(qm, pdev);
+ if (ret) {
+ pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret);
return ret;
+ }
- if (pdev->is_physfn) {
- ret = hpre_pf_probe_init(hpre);
- if (ret)
- goto err_with_qm_init;
- } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) {
- /* v2 starts to support get vft by mailbox */
- ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- if (ret)
- goto err_with_qm_init;
+ ret = hpre_probe_init(hpre);
+ if (ret) {
+ pci_err(pdev, "Failed to probe (%d)!\n", ret);
+ goto err_with_qm_init;
}
ret = hisi_qm_start(qm);
@@ -779,8 +860,18 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_err(pdev, "fail to register algs to crypto!\n");
goto err_with_qm_start;
}
+
+ if (qm->fun_type == QM_HW_PF && vfs_num) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_with_crypto_register;
+ }
+
return 0;
+err_with_crypto_register:
+ hpre_algs_unregister();
+
err_with_qm_start:
hisi_qm_del_from_list(qm, &hpre_devices);
hisi_qm_stop(qm);
@@ -794,107 +885,6 @@ err_with_qm_init:
return ret;
}
-static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs)
-{
- struct hisi_qm *qm = &hpre->qm;
- u32 qp_num = qm->qp_num;
- int q_num, remain_q_num, i;
- u32 q_base = qp_num;
- int ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_qp_num - qp_num;
-
- /* If remaining queues are not enough, return error. */
- if (remain_q_num < num_vfs)
- return -EINVAL;
-
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num);
- if (ret)
- return ret;
- q_base += q_num;
- }
-
- return 0;
-}
-
-static int hpre_clear_vft_config(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- u32 num_vfs = hpre->num_vfs;
- int ret;
- u32 i;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
- hpre->num_vfs = 0;
-
- return 0;
-}
-
-static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- int pre_existing_vfs, num_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
- if (pre_existing_vfs) {
- pci_err(pdev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(int, max_vfs, HPRE_VF_NUM);
- ret = hpre_vf_q_assign(hpre, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- hpre->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't enable VF!\n");
- hpre_clear_vft_config(hpre);
- return ret;
- }
-
- return num_vfs;
-}
-
-static int hpre_sriov_disable(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- /* remove in hpre_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
-
- return hpre_clear_vft_config(hpre);
-}
-
-static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- if (num_vfs)
- return hpre_sriov_enable(pdev, num_vfs);
- else
- return hpre_sriov_disable(pdev);
-}
-
static void hpre_remove(struct pci_dev *pdev)
{
struct hpre *hpre = pci_get_drvdata(pdev);
@@ -903,8 +893,8 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_algs_unregister();
hisi_qm_del_from_list(qm, &hpre_devices);
- if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) {
- ret = hpre_sriov_disable(pdev);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
+ ret = hisi_qm_sriov_disable(pdev);
if (ret) {
pci_err(pdev, "Disable SRIOV fail!\n");
return;
@@ -924,6 +914,9 @@ static void hpre_remove(struct pci_dev *pdev)
static const struct pci_error_handlers hpre_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hpre_pci_driver = {
@@ -931,7 +924,7 @@ static struct pci_driver hpre_pci_driver = {
.id_table = hpre_dev_ids,
.probe = hpre_probe,
.remove = hpre_remove,
- .sriov_configure = hpre_sriov_configure,
+ .sriov_configure = hisi_qm_sriov_configure,
.err_handler = &hpre_err_handler,
};
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index f795fb557630..9bb263cec6c3 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1,9 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <asm/page.h>
+#include <linux/acpi.h>
+#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
+#include <linux/idr.h>
#include <linux/io.h>
#include <linux/irqreturn.h>
#include <linux/log2.h>
@@ -53,6 +56,7 @@
#define QM_SQ_TYPE_SHIFT 8
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
+#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
/* cqc shift */
#define QM_CQ_HOP_NUM_SHIFT 0
@@ -64,6 +68,7 @@
#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
#define QM_QC_CQE_SIZE 4
+#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
/* eqc shift */
#define QM_EQE_AEQE_SIZE (2UL << 12)
@@ -122,9 +127,11 @@
#define QM_DFX_CNT_CLR_CE 0x100118
#define QM_ABNORMAL_INT_SOURCE 0x100000
+#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0)
#define QM_ABNORMAL_INT_MASK 0x100004
#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff
#define QM_ABNORMAL_INT_STATUS 0x100008
+#define QM_ABNORMAL_INT_SET 0x10000c
#define QM_ABNORMAL_INF00 0x100010
#define QM_FIFO_OVERFLOW_TYPE 0xc0
#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
@@ -140,6 +147,27 @@
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_RAS_MSI_INT_SEL 0x1040f4
+#define QM_DEV_RESET_FLAG 0
+#define QM_RESET_WAIT_TIMEOUT 400
+#define QM_PEH_VENDOR_ID 0x1000d8
+#define ACC_VENDOR_ID_VALUE 0x5a5a
+#define QM_PEH_DFX_INFO0 0x1000fc
+#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
+#define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
+#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
+#define ACC_MASTER_TRANS_RETURN_RW 3
+#define ACC_MASTER_TRANS_RETURN 0x300150
+#define ACC_MASTER_GLOBAL_CTRL 0x300000
+#define ACC_AM_CFG_PORT_WR_EN 0x30001c
+#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
+#define ACC_AM_ROB_ECC_INT_STS 0x300104
+#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
+
+#define POLL_PERIOD 10
+#define POLL_TIMEOUT 1000
+#define WAIT_PERIOD_US_MAX 200
+#define WAIT_PERIOD_US_MIN 100
+#define MAX_WAIT_COUNTS 1000
#define QM_CACHE_WB_START 0x204
#define QM_CACHE_WB_DONE 0x208
@@ -147,7 +175,12 @@
#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
#define QMC_ALIGN(sz) ALIGN(sz, 32)
+#define QM_DBG_READ_LEN 256
+#define QM_DBG_WRITE_LEN 1024
#define QM_DBG_TMP_BUF_LEN 22
+#define QM_PCI_COMMAND_INVALID ~0
+
+#define QM_SQE_ADDR_MASK GENMASK(7, 0)
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
@@ -190,6 +223,12 @@ enum vft_type {
CQC_VFT,
};
+enum acc_err_result {
+ ACC_ERR_NONE,
+ ACC_ERR_NEED_RESET,
+ ACC_ERR_RECOVERED,
+};
+
struct qm_cqe {
__le32 rsvd0;
__le16 cmd_id;
@@ -284,10 +323,22 @@ struct hisi_qm_hw_ops {
u8 cmd, u16 index, u8 priority);
u32 (*get_irq_num)(struct hisi_qm *qm);
int (*debug_init)(struct hisi_qm *qm);
- void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi);
+ void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
void (*hw_error_uninit)(struct hisi_qm *qm);
- pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm);
+ enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
+};
+
+struct qm_dfx_item {
+ const char *name;
+ u32 offset;
+};
+
+static struct qm_dfx_item qm_dfx_files[] = {
+ {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
+ {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
+ {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
+ {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
+ {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
};
static const char * const qm_debug_file_name[] = {
@@ -325,6 +376,93 @@ static const char * const qm_fifo_overflow[] = {
"cq", "eq", "aeq",
};
+static const char * const qm_s[] = {
+ "init", "start", "close", "stop",
+};
+
+static const char * const qp_s[] = {
+ "none", "init", "start", "stop", "close",
+};
+
+static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
+{
+ enum qm_state curr = atomic_read(&qm->status.flags);
+ bool avail = false;
+
+ switch (curr) {
+ case QM_INIT:
+ if (new == QM_START || new == QM_CLOSE)
+ avail = true;
+ break;
+ case QM_START:
+ if (new == QM_STOP)
+ avail = true;
+ break;
+ case QM_STOP:
+ if (new == QM_CLOSE || new == QM_START)
+ avail = true;
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
+ qm_s[curr], qm_s[new]);
+
+ if (!avail)
+ dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
+ qm_s[curr], qm_s[new]);
+
+ return avail;
+}
+
+static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
+ enum qp_state new)
+{
+ enum qm_state qm_curr = atomic_read(&qm->status.flags);
+ enum qp_state qp_curr = 0;
+ bool avail = false;
+
+ if (qp)
+ qp_curr = atomic_read(&qp->qp_status.flags);
+
+ switch (new) {
+ case QP_INIT:
+ if (qm_curr == QM_START || qm_curr == QM_INIT)
+ avail = true;
+ break;
+ case QP_START:
+ if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
+ (qm_curr == QM_START && qp_curr == QP_STOP))
+ avail = true;
+ break;
+ case QP_STOP:
+ if ((qm_curr == QM_START && qp_curr == QP_START) ||
+ (qp_curr == QP_INIT))
+ avail = true;
+ break;
+ case QP_CLOSE:
+ if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
+ (qm_curr == QM_START && qp_curr == QP_STOP) ||
+ (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
+ (qm_curr == QM_STOP && qp_curr == QP_INIT))
+ avail = true;
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
+ qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
+
+ if (!avail)
+ dev_warn(&qm->pdev->dev,
+ "Can not change qp state from %s to %s in QM %s\n",
+ qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
+
+ return avail;
+}
+
/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
static int qm_wait_mb_ready(struct hisi_qm *qm)
{
@@ -393,6 +531,8 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
busy_unlock:
mutex_unlock(&qm->mailbox_lock);
+ if (ret)
+ atomic64_inc(&qm->debug.dfx.mb_err_cnt);
return ret;
}
@@ -460,7 +600,7 @@ static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
{
u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
- return qm->qp_array[cqn];
+ return &qm->qp_array[cqn];
}
static void qm_cq_head_update(struct hisi_qp *qp)
@@ -510,8 +650,7 @@ static void qm_work_process(struct work_struct *work)
while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
eqe_num++;
qp = qm_to_hisi_qp(qm, eqe);
- if (qp)
- qm_poll_qp(qp, qm);
+ qm_poll_qp(qp, qm);
if (qm->status.eq_head == QM_Q_DEPTH - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
@@ -551,6 +690,7 @@ static irqreturn_t qm_irq(int irq, void *data)
if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
return do_qm_irq(irq, data);
+ atomic64_inc(&qm->debug.dfx.err_irq_cnt);
dev_err(&qm->pdev->dev, "invalid int source\n");
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
@@ -563,6 +703,7 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
u32 type;
+ atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
return IRQ_NONE;
@@ -590,79 +731,20 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t qm_abnormal_irq(int irq, void *data)
-{
- const struct hisi_qm_hw_error *err = qm_hw_error;
- struct hisi_qm *qm = data;
- struct device *dev = &qm->pdev->dev;
- u32 error_status, tmp;
-
- /* read err sts */
- tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
- error_status = qm->msi_mask & tmp;
-
- while (err->msg) {
- if (err->int_msk & error_status)
- dev_err(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
-
- err++;
- }
-
- /* clear err sts */
- writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
-
- return IRQ_HANDLED;
-}
-
-static int qm_irq_register(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
- qm_irq, IRQF_SHARED, qm->dev_name, qm);
- if (ret)
- return ret;
-
- if (qm->ver == QM_HW_V2) {
- ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
- qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
- if (ret)
- goto err_aeq_irq;
-
- if (qm->fun_type == QM_HW_PF) {
- ret = request_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR),
- qm_abnormal_irq, IRQF_SHARED,
- qm->dev_name, qm);
- if (ret)
- goto err_abonormal_irq;
- }
- }
-
- return 0;
-
-err_abonormal_irq:
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-err_aeq_irq:
- free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
- return ret;
-}
-
static void qm_irq_unregister(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
- if (qm->ver == QM_HW_V2) {
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
+ if (qm->ver == QM_HW_V1)
+ return;
+
+ free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
- }
+ if (qm->fun_type == QM_HW_PF)
+ free_irq(pci_irq_vector(pdev,
+ QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
}
static void qm_init_qp_status(struct hisi_qp *qp)
@@ -672,7 +754,7 @@ static void qm_init_qp_status(struct hisi_qp *qp)
qp_status->sq_tail = 0;
qp_status->cq_head = 0;
qp_status->cqc_phase = true;
- qp_status->flags = 0;
+ atomic_set(&qp_status->flags, 0);
}
static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
@@ -683,36 +765,26 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
if (number > 0) {
switch (type) {
case SQC_VFT:
- switch (qm->ver) {
- case QM_HW_V1:
+ if (qm->ver == QM_HW_V1) {
tmp = QM_SQC_VFT_BUF_SIZE |
QM_SQC_VFT_SQC_SIZE |
QM_SQC_VFT_INDEX_NUMBER |
QM_SQC_VFT_VALID |
(u64)base << QM_SQC_VFT_START_SQN_SHIFT;
- break;
- case QM_HW_V2:
+ } else {
tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
QM_SQC_VFT_VALID |
(u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
- break;
- case QM_HW_UNKNOWN:
- break;
}
break;
case CQC_VFT:
- switch (qm->ver) {
- case QM_HW_V1:
+ if (qm->ver == QM_HW_V1) {
tmp = QM_CQC_VFT_BUF_SIZE |
QM_CQC_VFT_SQC_SIZE |
QM_CQC_VFT_INDEX_NUMBER |
QM_CQC_VFT_VALID;
- break;
- case QM_HW_V2:
+ } else {
tmp = QM_CQC_VFT_VALID;
- break;
- case QM_HW_UNKNOWN:
- break;
}
break;
}
@@ -986,6 +1058,473 @@ static const struct file_operations qm_regs_fops = {
.release = single_release,
};
+static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ char buf[QM_DBG_READ_LEN];
+ int len;
+
+ if (*pos)
+ return 0;
+
+ if (count < QM_DBG_READ_LEN)
+ return -ENOSPC;
+
+ len = snprintf(buf, QM_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
+
+ if (copy_to_user(buffer, buf, len))
+ return -EFAULT;
+
+ return (*pos = len);
+}
+
+static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
+ dma_addr_t *dma_addr)
+{
+ struct device *dev = &qm->pdev->dev;
+ void *ctx_addr;
+
+ ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
+ if (!ctx_addr)
+ return ERR_PTR(-ENOMEM);
+
+ *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, *dma_addr)) {
+ dev_err(dev, "DMA mapping error!\n");
+ kfree(ctx_addr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return ctx_addr;
+}
+
+static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
+ const void *ctx_addr, dma_addr_t *dma_addr)
+{
+ struct device *dev = &qm->pdev->dev;
+
+ dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
+ kfree(ctx_addr);
+}
+
+static int dump_show(struct hisi_qm *qm, void *info,
+ unsigned int info_size, char *info_name)
+{
+ struct device *dev = &qm->pdev->dev;
+ u8 *info_buf, *info_curr = info;
+ u32 i;
+#define BYTE_PER_DW 4
+
+ info_buf = kzalloc(info_size, GFP_KERNEL);
+ if (!info_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < info_size; i++, info_curr++) {
+ if (i % BYTE_PER_DW == 0)
+ info_buf[i + 3UL] = *info_curr;
+ else if (i % BYTE_PER_DW == 1)
+ info_buf[i + 1UL] = *info_curr;
+ else if (i % BYTE_PER_DW == 2)
+ info_buf[i - 1] = *info_curr;
+ else if (i % BYTE_PER_DW == 3)
+ info_buf[i - 3] = *info_curr;
+ }
+
+ dev_info(dev, "%s DUMP\n", info_name);
+ for (i = 0; i < info_size; i += BYTE_PER_DW) {
+ pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
+ info_buf[i], info_buf[i + 1UL],
+ info_buf[i + 2UL], info_buf[i + 3UL]);
+ }
+
+ kfree(info_buf);
+
+ return 0;
+}
+
+static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
+{
+ return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
+}
+
+static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
+{
+ return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
+}
+
+static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_sqc *sqc, *sqc_curr;
+ dma_addr_t sqc_dma;
+ u32 qp_id;
+ int ret;
+
+ if (!s)
+ return -EINVAL;
+
+ ret = kstrtou32(s, 0, &qp_id);
+ if (ret || qp_id >= qm->qp_num) {
+ dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
+ return -EINVAL;
+ }
+
+ sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
+ if (IS_ERR(sqc))
+ return PTR_ERR(sqc);
+
+ ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
+ if (ret) {
+ down_read(&qm->qps_lock);
+ if (qm->sqc) {
+ sqc_curr = qm->sqc + qp_id;
+
+ ret = dump_show(qm, sqc_curr, sizeof(*sqc),
+ "SOFT SQC");
+ if (ret)
+ dev_info(dev, "Show soft sqc failed!\n");
+ }
+ up_read(&qm->qps_lock);
+
+ goto err_free_ctx;
+ }
+
+ ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
+ if (ret)
+ dev_info(dev, "Show hw sqc failed!\n");
+
+err_free_ctx:
+ qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
+ return ret;
+}
+
+static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_cqc *cqc, *cqc_curr;
+ dma_addr_t cqc_dma;
+ u32 qp_id;
+ int ret;
+
+ if (!s)
+ return -EINVAL;
+
+ ret = kstrtou32(s, 0, &qp_id);
+ if (ret || qp_id >= qm->qp_num) {
+ dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
+ return -EINVAL;
+ }
+
+ cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
+ if (IS_ERR(cqc))
+ return PTR_ERR(cqc);
+
+ ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
+ if (ret) {
+ down_read(&qm->qps_lock);
+ if (qm->cqc) {
+ cqc_curr = qm->cqc + qp_id;
+
+ ret = dump_show(qm, cqc_curr, sizeof(*cqc),
+ "SOFT CQC");
+ if (ret)
+ dev_info(dev, "Show soft cqc failed!\n");
+ }
+ up_read(&qm->qps_lock);
+
+ goto err_free_ctx;
+ }
+
+ ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
+ if (ret)
+ dev_info(dev, "Show hw cqc failed!\n");
+
+err_free_ctx:
+ qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
+ return ret;
+}
+
+static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
+ int cmd, char *name)
+{
+ struct device *dev = &qm->pdev->dev;
+ dma_addr_t xeqc_dma;
+ void *xeqc;
+ int ret;
+
+ if (strsep(&s, " ")) {
+ dev_err(dev, "Please do not input extra characters!\n");
+ return -EINVAL;
+ }
+
+ xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
+ if (IS_ERR(xeqc))
+ return PTR_ERR(xeqc);
+
+ ret = qm_mb(qm, cmd, xeqc_dma, 0, 1);
+ if (ret)
+ goto err_free_ctx;
+
+ ret = dump_show(qm, xeqc, size, name);
+ if (ret)
+ dev_info(dev, "Show hw %s failed!\n", name);
+
+err_free_ctx:
+ qm_ctx_free(qm, size, xeqc, &xeqc_dma);
+ return ret;
+}
+
+static int q_dump_param_parse(struct hisi_qm *qm, char *s,
+ u32 *e_id, u32 *q_id)
+{
+ struct device *dev = &qm->pdev->dev;
+ unsigned int qp_num = qm->qp_num;
+ char *presult;
+ int ret;
+
+ presult = strsep(&s, " ");
+ if (!presult) {
+ dev_err(dev, "Please input qp number!\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(presult, 0, q_id);
+ if (ret || *q_id >= qp_num) {
+ dev_err(dev, "Please input qp num (0-%d)", qp_num - 1);
+ return -EINVAL;
+ }
+
+ presult = strsep(&s, " ");
+ if (!presult) {
+ dev_err(dev, "Please input sqe number!\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(presult, 0, e_id);
+ if (ret || *e_id >= QM_Q_DEPTH) {
+ dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
+ return -EINVAL;
+ }
+
+ if (strsep(&s, " ")) {
+ dev_err(dev, "Please do not input extra characters!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_sq_dump(struct hisi_qm *qm, char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+ void *sqe, *sqe_curr;
+ struct hisi_qp *qp;
+ u32 qp_id, sqe_id;
+ int ret;
+
+ ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
+ if (ret)
+ return ret;
+
+ sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
+ if (!sqe)
+ return -ENOMEM;
+
+ qp = &qm->qp_array[qp_id];
+ memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
+ sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
+ memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
+ qm->debug.sqe_mask_len);
+
+ ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
+ if (ret)
+ dev_info(dev, "Show sqe failed!\n");
+
+ kfree(sqe);
+
+ return ret;
+}
+
+static int qm_cq_dump(struct hisi_qm *qm, char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct qm_cqe *cqe_curr;
+ struct hisi_qp *qp;
+ u32 qp_id, cqe_id;
+ int ret;
+
+ ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
+ if (ret)
+ return ret;
+
+ qp = &qm->qp_array[qp_id];
+ cqe_curr = qp->cqe + cqe_id;
+ ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
+ if (ret)
+ dev_info(dev, "Show cqe failed!\n");
+
+ return ret;
+}
+
+static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
+ size_t size, char *name)
+{
+ struct device *dev = &qm->pdev->dev;
+ void *xeqe;
+ u32 xeqe_id;
+ int ret;
+
+ if (!s)
+ return -EINVAL;
+
+ ret = kstrtou32(s, 0, &xeqe_id);
+ if (ret || xeqe_id >= QM_Q_DEPTH) {
+ dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
+ return -EINVAL;
+ }
+
+ down_read(&qm->qps_lock);
+
+ if (qm->eqe && !strcmp(name, "EQE")) {
+ xeqe = qm->eqe + xeqe_id;
+ } else if (qm->aeqe && !strcmp(name, "AEQE")) {
+ xeqe = qm->aeqe + xeqe_id;
+ } else {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = dump_show(qm, xeqe, size, name);
+ if (ret)
+ dev_info(dev, "Show %s failed!\n", name);
+
+err_unlock:
+ up_read(&qm->qps_lock);
+ return ret;
+}
+
+static int qm_dbg_help(struct hisi_qm *qm, char *s)
+{
+ struct device *dev = &qm->pdev->dev;
+
+ if (strsep(&s, " ")) {
+ dev_err(dev, "Please do not input extra characters!\n");
+ return -EINVAL;
+ }
+
+ dev_info(dev, "available commands:\n");
+ dev_info(dev, "sqc <num>\n");
+ dev_info(dev, "cqc <num>\n");
+ dev_info(dev, "eqc\n");
+ dev_info(dev, "aeqc\n");
+ dev_info(dev, "sq <num> <e>\n");
+ dev_info(dev, "cq <num> <e>\n");
+ dev_info(dev, "eq <e>\n");
+ dev_info(dev, "aeq <e>\n");
+
+ return 0;
+}
+
+static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *presult, *s;
+ int ret;
+
+ s = kstrdup(cmd_buf, GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ presult = strsep(&s, " ");
+ if (!presult) {
+ kfree(s);
+ return -EINVAL;
+ }
+
+ if (!strcmp(presult, "sqc"))
+ ret = qm_sqc_dump(qm, s);
+ else if (!strcmp(presult, "cqc"))
+ ret = qm_cqc_dump(qm, s);
+ else if (!strcmp(presult, "eqc"))
+ ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
+ QM_MB_CMD_EQC, "EQC");
+ else if (!strcmp(presult, "aeqc"))
+ ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
+ QM_MB_CMD_AEQC, "AEQC");
+ else if (!strcmp(presult, "sq"))
+ ret = qm_sq_dump(qm, s);
+ else if (!strcmp(presult, "cq"))
+ ret = qm_cq_dump(qm, s);
+ else if (!strcmp(presult, "eq"))
+ ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
+ else if (!strcmp(presult, "aeq"))
+ ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
+ else if (!strcmp(presult, "help"))
+ ret = qm_dbg_help(qm, s);
+ else
+ ret = -EINVAL;
+
+ if (ret)
+ dev_info(dev, "Please echo help\n");
+
+ kfree(s);
+
+ return ret;
+}
+
+static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct hisi_qm *qm = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp;
+ int ret;
+
+ if (*pos)
+ return 0;
+
+ /* Judge if the instance is being reset. */
+ if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
+ return 0;
+
+ if (count > QM_DBG_WRITE_LEN)
+ return -ENOSPC;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOMEM;
+
+ if (copy_from_user(cmd_buf, buffer, count)) {
+ kfree(cmd_buf);
+ return -EFAULT;
+ }
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ ret = qm_cmd_write_dump(qm, cmd_buf);
+ if (ret) {
+ kfree(cmd_buf);
+ return ret;
+ }
+
+ kfree(cmd_buf);
+
+ return count;
+}
+
+static const struct file_operations qm_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qm_cmd_read,
+ .write = qm_cmd_write,
+};
+
static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
{
struct dentry *qm_d = qm->debug.qm_d;
@@ -1001,20 +1540,21 @@ static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
return 0;
}
-static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi)
+static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
{
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi)
+static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
{
- u32 irq_enable = ce | nfe | fe | msi;
+ u32 irq_enable = ce | nfe | fe;
u32 irq_unmask = ~irq_enable;
qm->error_mask = ce | nfe | fe;
- qm->msi_mask = msi;
+
+ /* clear QM hw residual error source */
+ writel(QM_ABNORMAL_INT_SOURCE_CLR,
+ qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
@@ -1022,9 +1562,6 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
- /* use RAS irq default, so only set QM_RAS_MSI_INT_SEL for MSI */
- writel(msi, qm->io_base + QM_RAS_MSI_INT_SEL);
-
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
@@ -1071,7 +1608,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
}
}
-static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm)
+static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
u32 error_status, tmp;
@@ -1080,15 +1617,20 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm)
error_status = qm->error_mask & tmp;
if (error_status) {
- qm_log_hw_error(qm, error_status);
+ if (error_status & QM_ECC_MBIT)
+ qm->err_status.is_qm_ecc_mbit = true;
- /* clear err sts */
- writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ qm_log_hw_error(qm, error_status);
+ if (error_status == QM_DB_RANDOM_INVALID) {
+ writel(error_status, qm->io_base +
+ QM_ABNORMAL_INT_SOURCE);
+ return ACC_ERR_RECOVERED;
+ }
- return PCI_ERS_RESULT_NEED_RESET;
+ return ACC_ERR_NEED_RESET;
}
- return PCI_ERS_RESULT_RECOVERED;
+ return ACC_ERR_RECOVERED;
}
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
@@ -1117,68 +1659,61 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
return qp->sqe + sq_tail * qp->qm->sqe_size;
}
-/**
- * hisi_qm_create_qp() - Create a queue pair from qm.
- * @qm: The qm we create a qp from.
- * @alg_type: Accelerator specific algorithm type in sqc.
- *
- * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
- * qp memory fails.
- */
-struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
+static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
{
struct device *dev = &qm->pdev->dev;
struct hisi_qp *qp;
- int qp_id, ret;
-
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ int qp_id;
- write_lock(&qm->qps_lock);
+ if (!qm_qp_avail_state(qm, NULL, QP_INIT))
+ return ERR_PTR(-EPERM);
- qp_id = find_first_zero_bit(qm->qp_bitmap, qm->qp_num);
- if (qp_id >= qm->qp_num) {
- write_unlock(&qm->qps_lock);
- dev_info(&qm->pdev->dev, "QM all queues are busy!\n");
- ret = -EBUSY;
- goto err_free_qp;
+ if (qm->qp_in_used == qm->qp_num) {
+ dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
+ qm->qp_num);
+ atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
+ return ERR_PTR(-EBUSY);
}
- set_bit(qp_id, qm->qp_bitmap);
- qm->qp_array[qp_id] = qp;
- qm->qp_in_used++;
- write_unlock(&qm->qps_lock);
-
- qp->qm = qm;
+ qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
+ if (qp_id < 0) {
+ dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
+ qm->qp_num);
+ atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
+ return ERR_PTR(-EBUSY);
+ }
- if (qm->use_dma_api) {
- qp->qdma.size = qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH;
- qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size,
- &qp->qdma.dma, GFP_KERNEL);
- if (!qp->qdma.va) {
- ret = -ENOMEM;
- goto err_clear_bit;
- }
+ qp = &qm->qp_array[qp_id];
- dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n",
- qp->qdma.va, &qp->qdma.dma, qp->qdma.size);
- }
+ memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
+ qp->event_cb = NULL;
+ qp->req_cb = NULL;
qp->qp_id = qp_id;
qp->alg_type = alg_type;
+ qm->qp_in_used++;
+ atomic_set(&qp->qp_status.flags, QP_INIT);
return qp;
+}
+
+/**
+ * hisi_qm_create_qp() - Create a queue pair from qm.
+ * @qm: The qm we create a qp from.
+ * @alg_type: Accelerator specific algorithm type in sqc.
+ *
+ * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
+ * qp memory fails.
+ */
+struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
+{
+ struct hisi_qp *qp;
-err_clear_bit:
- write_lock(&qm->qps_lock);
- qm->qp_array[qp_id] = NULL;
- clear_bit(qp_id, qm->qp_bitmap);
- write_unlock(&qm->qps_lock);
-err_free_qp:
- kfree(qp);
- return ERR_PTR(ret);
+ down_write(&qm->qps_lock);
+ qp = qm_create_qp_nolock(qm, alg_type);
+ up_write(&qm->qps_lock);
+
+ return qp;
}
EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
@@ -1191,19 +1726,18 @@ EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
void hisi_qm_release_qp(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
- struct qm_dma *qdma = &qp->qdma;
- struct device *dev = &qm->pdev->dev;
- if (qm->use_dma_api && qdma->va)
- dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
+ down_write(&qm->qps_lock);
+
+ if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
+ up_write(&qm->qps_lock);
+ return;
+ }
- write_lock(&qm->qps_lock);
- qm->qp_array[qp->qp_id] = NULL;
- clear_bit(qp->qp_id, qm->qp_bitmap);
qm->qp_in_used--;
- write_unlock(&qm->qps_lock);
+ idr_remove(&qm->qp_idr, qp->qp_id);
- kfree(qp);
+ up_write(&qm->qps_lock);
}
EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
@@ -1234,7 +1768,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
if (ver == QM_HW_V1) {
sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
- } else if (ver == QM_HW_V2) {
+ } else {
sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
sqc->w8 = 0; /* rand_qc */
}
@@ -1261,7 +1795,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
if (ver == QM_HW_V1) {
cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4));
cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
- } else if (ver == QM_HW_V2) {
+ } else {
cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4));
cqc->w8 = 0;
}
@@ -1274,6 +1808,27 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
return ret;
}
+static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
+{
+ struct hisi_qm *qm = qp->qm;
+ struct device *dev = &qm->pdev->dev;
+ int qp_id = qp->qp_id;
+ int pasid = arg;
+ int ret;
+
+ if (!qm_qp_avail_state(qm, qp, QP_START))
+ return -EPERM;
+
+ ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
+ if (ret)
+ return ret;
+
+ atomic_set(&qp->qp_status.flags, QP_START);
+ dev_dbg(dev, "queue %d started\n", qp_id);
+
+ return 0;
+}
+
/**
* hisi_qm_start_qp() - Start a qp into running.
* @qp: The qp we want to start to run.
@@ -1285,48 +1840,112 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
{
struct hisi_qm *qm = qp->qm;
- struct device *dev = &qm->pdev->dev;
- enum qm_hw_ver ver = qm->ver;
- int qp_id = qp->qp_id;
- int pasid = arg;
- size_t off = 0;
int ret;
-#define QP_INIT_BUF(qp, type, size) do { \
- (qp)->type = ((qp)->qdma.va + (off)); \
- (qp)->type##_dma = (qp)->qdma.dma + (off); \
- off += (size); \
-} while (0)
+ down_write(&qm->qps_lock);
+ ret = qm_start_qp_nolock(qp, arg);
+ up_write(&qm->qps_lock);
- if (!qp->qdma.dma) {
- dev_err(dev, "cannot get qm dma buffer\n");
- return -EINVAL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
+
+/**
+ * Determine whether the queue is cleared by judging the tail pointers of
+ * sq and cq.
+ */
+static int qm_drain_qp(struct hisi_qp *qp)
+{
+ size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
+ struct hisi_qm *qm = qp->qm;
+ struct device *dev = &qm->pdev->dev;
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ dma_addr_t dma_addr;
+ int ret = 0, i = 0;
+ void *addr;
+
+ /*
+ * No need to judge if ECC multi-bit error occurs because the
+ * master OOO will be blocked.
+ */
+ if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
+ return 0;
+
+ addr = qm_ctx_alloc(qm, size, &dma_addr);
+ if (IS_ERR(addr)) {
+ dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
+ return -ENOMEM;
}
- /* sq need 128 bytes alignment */
- if (qp->qdma.dma & QM_SQE_DATA_ALIGN_MASK) {
- dev_err(dev, "qm sq is not aligned to 128 byte\n");
- return -EINVAL;
+ while (++i) {
+ ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
+ if (ret) {
+ dev_err_ratelimited(dev, "Failed to dump sqc!\n");
+ break;
+ }
+ sqc = addr;
+
+ ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
+ qp->qp_id);
+ if (ret) {
+ dev_err_ratelimited(dev, "Failed to dump cqc!\n");
+ break;
+ }
+ cqc = addr + sizeof(struct qm_sqc);
+
+ if ((sqc->tail == cqc->tail) &&
+ (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
+ break;
+
+ if (i == MAX_WAIT_COUNTS) {
+ dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
+ ret = -EBUSY;
+ break;
+ }
+
+ usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
}
- QP_INIT_BUF(qp, sqe, qm->sqe_size * QM_Q_DEPTH);
- QP_INIT_BUF(qp, cqe, sizeof(struct qm_cqe) * QM_Q_DEPTH);
+ qm_ctx_free(qm, size, addr, &dma_addr);
- dev_dbg(dev, "init qp buffer(v%d):\n"
- " sqe (%pK, %lx)\n"
- " cqe (%pK, %lx)\n",
- ver, qp->sqe, (unsigned long)qp->sqe_dma,
- qp->cqe, (unsigned long)qp->cqe_dma);
+ return ret;
+}
- ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
+static int qm_stop_qp_nolock(struct hisi_qp *qp)
+{
+ struct device *dev = &qp->qm->pdev->dev;
+ int ret;
+
+ /*
+ * It is allowed to stop and release qp when reset, If the qp is
+ * stopped when reset but still want to be released then, the
+ * is_resetting flag should be set negative so that this qp will not
+ * be restarted after reset.
+ */
+ if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
+ qp->is_resetting = false;
+ return 0;
+ }
+
+ if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
+ return -EPERM;
+
+ atomic_set(&qp->qp_status.flags, QP_STOP);
+
+ ret = qm_drain_qp(qp);
if (ret)
- return ret;
+ dev_err(dev, "Failed to drain out data for stopping!\n");
- dev_dbg(dev, "queue %d started\n", qp_id);
+ if (qp->qm->wq)
+ flush_workqueue(qp->qm->wq);
+ else
+ flush_work(&qp->qm->work);
+
+ dev_dbg(dev, "stop queue %u!", qp->qp_id);
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
/**
* hisi_qm_stop_qp() - Stop a qp in qm.
@@ -1336,27 +1955,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
*/
int hisi_qm_stop_qp(struct hisi_qp *qp)
{
- struct device *dev = &qp->qm->pdev->dev;
- int i = 0;
-
- /* it is stopped */
- if (test_bit(QP_STOP, &qp->qp_status.flags))
- return 0;
-
- while (atomic_read(&qp->qp_status.used)) {
- i++;
- msleep(20);
- if (i == 10) {
- dev_err(dev, "Cannot drain out data for stopping, Force to stop!\n");
- return 0;
- }
- }
-
- set_bit(QP_STOP, &qp->qp_status.flags);
+ int ret;
- dev_dbg(dev, "stop queue %u!", qp->qp_id);
+ down_write(&qp->qm->qps_lock);
+ ret = qm_stop_qp_nolock(qp);
+ up_write(&qp->qm->qps_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
@@ -1367,6 +1972,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
*
* This function will return -EBUSY if qp is currently full, and -EAGAIN
* if qp related qm is resetting.
+ *
+ * Note: This function may run with qm_irq_thread and ACC reset at same time.
+ * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
+ * reset may happen, we have no lock here considering performance. This
+ * causes current qm_db sending fail or can not receive sended sqe. QM
+ * sync/async receive function should handle the error sqe. ACC reset
+ * done function should clear used sqe to 0.
*/
int hisi_qp_send(struct hisi_qp *qp, const void *msg)
{
@@ -1375,7 +1987,9 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg)
u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
void *sqe = qm_get_avail_sqe(qp);
- if (unlikely(test_bit(QP_STOP, &qp->qp_status.flags))) {
+ if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
+ atomic_read(&qp->qm->status.flags) == QM_STOP ||
+ qp->is_resetting)) {
dev_info(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
return -EAGAIN;
}
@@ -1397,12 +2011,13 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm)
{
unsigned int val;
- if (qm->ver == QM_HW_V2) {
- writel(0x1, qm->io_base + QM_CACHE_WB_START);
- if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
- val, val & BIT(0), 10, 1000))
- dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
- }
+ if (qm->ver == QM_HW_V1)
+ return;
+
+ writel(0x1, qm->io_base + QM_CACHE_WB_START);
+ if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
+ val, val & BIT(0), 10, 1000))
+ dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
}
static void qm_qp_event_notifier(struct hisi_qp *qp)
@@ -1412,16 +2027,7 @@ static void qm_qp_event_notifier(struct hisi_qp *qp)
static int hisi_qm_get_available_instances(struct uacce_device *uacce)
{
- int i, ret;
- struct hisi_qm *qm = uacce->priv;
-
- read_lock(&qm->qps_lock);
- for (i = 0, ret = 0; i < qm->qp_num; i++)
- if (!qm->qp_array[i])
- ret++;
- read_unlock(&qm->qps_lock);
-
- return ret;
+ return hisi_qm_get_free_qp_num(uacce->priv);
}
static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
@@ -1468,12 +2074,12 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
switch (qfr->type) {
case UACCE_QFRT_MMIO:
- if (qm->ver == QM_HW_V2) {
- if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
- QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
+ if (qm->ver == QM_HW_V1) {
+ if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
return -EINVAL;
} else {
- if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
+ if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
+ QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
return -EINVAL;
}
@@ -1519,9 +2125,9 @@ static int qm_set_sqctype(struct uacce_queue *q, u16 type)
struct hisi_qm *qm = q->uacce->priv;
struct hisi_qp *qp = q->priv;
- write_lock(&qm->qps_lock);
+ down_write(&qm->qps_lock);
qp->alg_type = type;
- write_unlock(&qm->qps_lock);
+ up_write(&qm->qps_lock);
return 0;
}
@@ -1623,107 +2229,121 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
{
int ret;
- read_lock(&qm->qps_lock);
+ down_read(&qm->qps_lock);
ret = qm->qp_num - qm->qp_in_used;
- read_unlock(&qm->qps_lock);
+ up_read(&qm->qps_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
-/**
- * hisi_qm_init() - Initialize configures about qm.
- * @qm: The qm needing init.
- *
- * This function init qm, then we can call hisi_qm_start to put qm into work.
- */
-int hisi_qm_init(struct hisi_qm *qm)
+static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
{
- struct pci_dev *pdev = qm->pdev;
- struct device *dev = &pdev->dev;
- unsigned int num_vec;
- int ret;
+ struct device *dev = &qm->pdev->dev;
+ struct qm_dma *qdma;
+ int i;
- switch (qm->ver) {
- case QM_HW_V1:
- qm->ops = &qm_hw_ops_v1;
- break;
- case QM_HW_V2:
- qm->ops = &qm_hw_ops_v2;
- break;
- default:
- return -EINVAL;
+ for (i = num - 1; i >= 0; i--) {
+ qdma = &qm->qp_array[i].qdma;
+ dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
}
- ret = qm_alloc_uacce(qm);
- if (ret < 0)
- dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
+ kfree(qm->qp_array);
+}
- ret = pci_enable_device_mem(pdev);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to enable device mem!\n");
- goto err_remove_uacce;
- }
+static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
+{
+ struct device *dev = &qm->pdev->dev;
+ size_t off = qm->sqe_size * QM_Q_DEPTH;
+ struct hisi_qp *qp;
- ret = pci_request_mem_regions(pdev, qm->dev_name);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to request mem regions!\n");
- goto err_disable_pcidev;
- }
+ qp = &qm->qp_array[id];
+ qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
+ GFP_KERNEL);
+ if (!qp->qdma.va)
+ return -ENOMEM;
- qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
- qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
- qm->io_base = ioremap(qm->phys_base, qm->phys_size);
- if (!qm->io_base) {
- ret = -EIO;
- goto err_release_mem_regions;
- }
+ qp->sqe = qp->qdma.va;
+ qp->sqe_dma = qp->qdma.dma;
+ qp->cqe = qp->qdma.va + off;
+ qp->cqe_dma = qp->qdma.dma + off;
+ qp->qdma.size = dma_size;
+ qp->qm = qm;
+ qp->qp_id = id;
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (ret < 0)
- goto err_iounmap;
- pci_set_master(pdev);
+ return 0;
+}
- if (!qm->ops->get_irq_num) {
- ret = -EOPNOTSUPP;
- goto err_iounmap;
+static int hisi_qm_memory_init(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ size_t qp_dma_size, off = 0;
+ int i, ret = 0;
+
+#define QM_INIT_BUF(qm, type, num) do { \
+ (qm)->type = ((qm)->qdma.va + (off)); \
+ (qm)->type##_dma = (qm)->qdma.dma + (off); \
+ off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
+} while (0)
+
+ idr_init(&qm->qp_idr);
+ qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) +
+ QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
+ QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
+ QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
+ qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
+ GFP_ATOMIC);
+ dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
+ if (!qm->qdma.va)
+ return -ENOMEM;
+
+ QM_INIT_BUF(qm, eqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, sqc, qm->qp_num);
+ QM_INIT_BUF(qm, cqc, qm->qp_num);
+
+ qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
+ if (!qm->qp_array) {
+ ret = -ENOMEM;
+ goto err_alloc_qp_array;
}
- num_vec = qm->ops->get_irq_num(qm);
- ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
- if (ret < 0) {
- dev_err(dev, "Failed to enable MSI vectors!\n");
- goto err_iounmap;
+
+ /* one more page for device or qp statuses */
+ qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
+ sizeof(struct qm_cqe) * QM_Q_DEPTH;
+ qp_dma_size = PAGE_ALIGN(qp_dma_size);
+ for (i = 0; i < qm->qp_num; i++) {
+ ret = hisi_qp_memory_init(qm, qp_dma_size, i);
+ if (ret)
+ goto err_init_qp_mem;
+
+ dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
}
- ret = qm_irq_register(qm);
- if (ret)
- goto err_free_irq_vectors;
+ return ret;
- qm->qp_in_used = 0;
- mutex_init(&qm->mailbox_lock);
- rwlock_init(&qm->qps_lock);
- INIT_WORK(&qm->work, qm_work_process);
+err_init_qp_mem:
+ hisi_qp_memory_uninit(qm, i);
+err_alloc_qp_array:
+ dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
- dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf",
- qm->use_dma_api ? "dma api" : "iommu api");
+ return ret;
+}
- return 0;
+static void hisi_qm_pre_init(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
-err_free_irq_vectors:
- pci_free_irq_vectors(pdev);
-err_iounmap:
- iounmap(qm->io_base);
-err_release_mem_regions:
- pci_release_mem_regions(pdev);
-err_disable_pcidev:
- pci_disable_device(pdev);
-err_remove_uacce:
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
+ if (qm->ver == QM_HW_V1)
+ qm->ops = &qm_hw_ops_v1;
+ else
+ qm->ops = &qm_hw_ops_v2;
- return ret;
+ pci_set_drvdata(pdev, qm);
+ mutex_init(&qm->mailbox_lock);
+ init_rwsem(&qm->qps_lock);
+ qm->qp_in_used = 0;
}
-EXPORT_SYMBOL_GPL(hisi_qm_init);
/**
* hisi_qm_uninit() - Uninitialize qm.
@@ -1736,10 +2356,20 @@ void hisi_qm_uninit(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
+ down_write(&qm->qps_lock);
+
+ if (!qm_avail_state(qm, QM_CLOSE)) {
+ up_write(&qm->qps_lock);
+ return;
+ }
+
uacce_remove(qm->uacce);
qm->uacce = NULL;
- if (qm->use_dma_api && qm->qdma.va) {
+ hisi_qp_memory_uninit(qm, qm->qp_num);
+ idr_destroy(&qm->qp_idr);
+
+ if (qm->qdma.va) {
hisi_qm_cache_wb(qm);
dma_free_coherent(dev, qm->qdma.size,
qm->qdma.va, qm->qdma.dma);
@@ -1751,6 +2381,8 @@ void hisi_qm_uninit(struct hisi_qm *qm)
iounmap(qm->io_base);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
+
+ up_write(&qm->qps_lock);
}
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
@@ -1781,12 +2413,6 @@ int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
/**
- * hisi_qm_set_vft() - Set "virtual function table" for a qm.
- * @fun_num: Number of operated function.
- * @qm: The qm in which to set vft, alway in a PF.
- * @base: The base number of queue in vft.
- * @number: The number of queues in vft. 0 means invalid vft.
- *
* This function is alway called in PF driver, it is used to assign queues
* among PF and VFs.
*
@@ -1794,7 +2420,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
* Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
* (VF function number 0x2)
*/
-int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
+static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
u32 number)
{
u32 max_q_num = qm->ctrl_qp_num;
@@ -1805,7 +2431,6 @@ int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_vft);
static void qm_init_eq_aeq_status(struct hisi_qm *qm)
{
@@ -1872,22 +2497,10 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
static int __hisi_qm_start(struct hisi_qm *qm)
{
- struct pci_dev *pdev = qm->pdev;
- struct device *dev = &pdev->dev;
- size_t off = 0;
int ret;
-#define QM_INIT_BUF(qm, type, num) do { \
- (qm)->type = ((qm)->qdma.va + (off)); \
- (qm)->type##_dma = (qm)->qdma.dma + (off); \
- off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
-} while (0)
-
WARN_ON(!qm->qdma.dma);
- if (qm->qp_num == 0)
- return -EINVAL;
-
if (qm->fun_type == QM_HW_PF) {
ret = qm_dev_mem_reset(qm);
if (ret)
@@ -1898,21 +2511,6 @@ static int __hisi_qm_start(struct hisi_qm *qm)
return ret;
}
- QM_INIT_BUF(qm, eqe, QM_Q_DEPTH);
- QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
- QM_INIT_BUF(qm, sqc, qm->qp_num);
- QM_INIT_BUF(qm, cqc, qm->qp_num);
-
- dev_dbg(dev, "init qm buffer:\n"
- " eqe (%pK, %lx)\n"
- " aeqe (%pK, %lx)\n"
- " sqc (%pK, %lx)\n"
- " cqc (%pK, %lx)\n",
- qm->eqe, (unsigned long)qm->eqe_dma,
- qm->aeqe, (unsigned long)qm->aeqe_dma,
- qm->sqc, (unsigned long)qm->sqc_dma,
- qm->cqc, (unsigned long)qm->cqc_dma);
-
ret = qm_eq_ctx_cfg(qm);
if (ret)
return ret;
@@ -1940,43 +2538,102 @@ static int __hisi_qm_start(struct hisi_qm *qm)
int hisi_qm_start(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
+ int ret = 0;
+
+ down_write(&qm->qps_lock);
+
+ if (!qm_avail_state(qm, QM_START)) {
+ up_write(&qm->qps_lock);
+ return -EPERM;
+ }
dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num);
if (!qm->qp_num) {
dev_err(dev, "qp_num should not be 0\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unlock;
}
- if (!qm->qp_bitmap) {
- qm->qp_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(qm->qp_num),
- sizeof(long), GFP_KERNEL);
- qm->qp_array = devm_kcalloc(dev, qm->qp_num,
- sizeof(struct hisi_qp *),
- GFP_KERNEL);
- if (!qm->qp_bitmap || !qm->qp_array)
- return -ENOMEM;
+ ret = __hisi_qm_start(qm);
+ if (!ret)
+ atomic_set(&qm->status.flags, QM_START);
+
+err_unlock:
+ up_write(&qm->qps_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_start);
+
+static int qm_restart(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct hisi_qp *qp;
+ int ret, i;
+
+ ret = hisi_qm_start(qm);
+ if (ret < 0)
+ return ret;
+
+ down_write(&qm->qps_lock);
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = &qm->qp_array[i];
+ if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
+ qp->is_resetting == true) {
+ ret = qm_start_qp_nolock(qp, 0);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start qp%d!\n", i);
+
+ up_write(&qm->qps_lock);
+ return ret;
+ }
+ qp->is_resetting = false;
+ }
}
+ up_write(&qm->qps_lock);
- if (!qm->use_dma_api) {
- dev_dbg(&qm->pdev->dev, "qm delay start\n");
- return 0;
- } else if (!qm->qdma.va) {
- qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
- QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
- qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size,
- &qm->qdma.dma, GFP_KERNEL);
- dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%zx)\n",
- qm->qdma.va, &qm->qdma.dma, qm->qdma.size);
- if (!qm->qdma.va)
- return -ENOMEM;
+ return 0;
+}
+
+/* Stop started qps in reset flow */
+static int qm_stop_started_qp(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct hisi_qp *qp;
+ int i, ret;
+
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = &qm->qp_array[i];
+ if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
+ qp->is_resetting = true;
+ ret = qm_stop_qp_nolock(qp);
+ if (ret < 0) {
+ dev_err(dev, "Failed to stop qp%d!\n", i);
+ return ret;
+ }
+ }
}
- return __hisi_qm_start(qm);
+ return 0;
+}
+
+/**
+ * This function clears all queues memory in a qm. Reset of accelerator can
+ * use this to clear queues.
+ */
+static void qm_clear_queues(struct hisi_qm *qm)
+{
+ struct hisi_qp *qp;
+ int i;
+
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = &qm->qp_array[i];
+ if (qp->is_resetting)
+ memset(qp->qdma.va, 0, qp->qdma.size);
+ }
+
+ memset(qm->qdma.va, 0, qm->qdma.size);
}
-EXPORT_SYMBOL_GPL(hisi_qm_start);
/**
* hisi_qm_stop() - Stop a qm.
@@ -1988,43 +2645,98 @@ EXPORT_SYMBOL_GPL(hisi_qm_start);
*/
int hisi_qm_stop(struct hisi_qm *qm)
{
- struct device *dev;
- struct hisi_qp *qp;
- int ret = 0, i;
+ struct device *dev = &qm->pdev->dev;
+ int ret = 0;
- if (!qm || !qm->pdev) {
- WARN_ON(1);
- return -EINVAL;
+ down_write(&qm->qps_lock);
+
+ if (!qm_avail_state(qm, QM_STOP)) {
+ ret = -EPERM;
+ goto err_unlock;
}
- dev = &qm->pdev->dev;
+ if (qm->status.stop_reason == QM_SOFT_RESET ||
+ qm->status.stop_reason == QM_FLR) {
+ ret = qm_stop_started_qp(qm);
+ if (ret < 0) {
+ dev_err(dev, "Failed to stop started qp!\n");
+ goto err_unlock;
+ }
+ }
/* Mask eq and aeq irq */
writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
- /* Stop all qps belong to this qm */
- for (i = 0; i < qm->qp_num; i++) {
- qp = qm->qp_array[i];
- if (qp) {
- ret = hisi_qm_stop_qp(qp);
- if (ret < 0) {
- dev_err(dev, "Failed to stop qp%d!\n", i);
- return -EBUSY;
- }
- }
- }
-
if (qm->fun_type == QM_HW_PF) {
ret = hisi_qm_set_vft(qm, 0, 0, 0);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "Failed to set vft!\n");
+ ret = -EBUSY;
+ goto err_unlock;
+ }
}
+ qm_clear_queues(qm);
+ atomic_set(&qm->status.flags, QM_STOP);
+
+err_unlock:
+ up_write(&qm->qps_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_stop);
+static ssize_t qm_status_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct hisi_qm *qm = filp->private_data;
+ char buf[QM_DBG_READ_LEN];
+ int val, cp_len, len;
+
+ if (*pos)
+ return 0;
+
+ if (count < QM_DBG_READ_LEN)
+ return -ENOSPC;
+
+ val = atomic_read(&qm->status.flags);
+ len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
+ if (!len)
+ return -EFAULT;
+
+ cp_len = copy_to_user(buffer, buf, len);
+ if (cp_len)
+ return -EFAULT;
+
+ return (*pos = len);
+}
+
+static const struct file_operations qm_status_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qm_status_read,
+};
+
+static int qm_debugfs_atomic64_set(void *data, u64 val)
+{
+ if (val)
+ return -EINVAL;
+
+ atomic64_set((atomic64_t *)data, 0);
+
+ return 0;
+}
+
+static int qm_debugfs_atomic64_get(void *data, u64 *val)
+{
+ *val = atomic64_read((atomic64_t *)data);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
+ qm_debugfs_atomic64_set, "%llu\n");
+
/**
* hisi_qm_debug_init() - Initialize qm related debugfs files.
* @qm: The qm for which we want to add debugfs files.
@@ -2033,7 +2745,9 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop);
*/
int hisi_qm_debug_init(struct hisi_qm *qm)
{
+ struct qm_dfx *dfx = &qm->debug.dfx;
struct dentry *qm_d;
+ void *data;
int i, ret;
qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
@@ -2047,7 +2761,20 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
goto failed_to_create;
}
- debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
+ debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
+
+ debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops);
+
+ debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
+ &qm_status_fops);
+ for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
+ data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
+ debugfs_create_file(qm_dfx_files[i].name,
+ 0644,
+ qm_d,
+ data,
+ &qm_atomic64_ops);
+ }
return 0;
@@ -2095,8 +2822,7 @@ static void qm_hw_error_init(struct hisi_qm *qm)
return;
}
- qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe,
- err_info->fe, err_info->msi);
+ qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
}
static void qm_hw_error_uninit(struct hisi_qm *qm)
@@ -2109,36 +2835,17 @@ static void qm_hw_error_uninit(struct hisi_qm *qm)
qm->ops->hw_error_uninit(qm);
}
-static pci_ers_result_t qm_hw_error_handle(struct hisi_qm *qm)
+static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_handle) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
- return PCI_ERS_RESULT_NONE;
+ return ACC_ERR_NONE;
}
return qm->ops->hw_error_handle(qm);
}
/**
- * hisi_qm_get_hw_version() - Get hardware version of a qm.
- * @pdev: The device which hardware version we want to get.
- *
- * This function gets the hardware version of a qm. Return QM_HW_UNKNOWN
- * if the hardware version is not supported.
- */
-enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev)
-{
- switch (pdev->revision) {
- case QM_HW_V1:
- case QM_HW_V2:
- return pdev->revision;
- default:
- return QM_HW_UNKNOWN;
- }
-}
-EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version);
-
-/**
* hisi_qm_dev_err_init() - Initialize device error configuration.
* @qm: The qm for which we want to do error initialization.
*
@@ -2299,34 +3006,163 @@ err:
}
EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
-static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm)
+static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
+{
+ u32 remain_q_num, q_num, i, j;
+ u32 q_base = qm->qp_num;
+ int ret;
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ remain_q_num = qm->ctrl_qp_num - qm->qp_num;
+
+ /* If remain queues not enough, return error. */
+ if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs)
+ return -EINVAL;
+
+ q_num = remain_q_num / num_vfs;
+ for (i = 1; i <= num_vfs; i++) {
+ if (i == num_vfs)
+ q_num += remain_q_num % num_vfs;
+ ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+ if (ret) {
+ for (j = i; j > 0; j--)
+ hisi_qm_set_vft(qm, j, 0, 0);
+ return ret;
+ }
+ q_base += q_num;
+ }
+
+ return 0;
+}
+
+static int qm_clear_vft_config(struct hisi_qm *qm)
+{
+ int ret;
+ u32 i;
+
+ for (i = 1; i <= qm->vfs_num; i++) {
+ ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (ret)
+ return ret;
+ }
+ qm->vfs_num = 0;
+
+ return 0;
+}
+
+/**
+ * hisi_qm_sriov_enable() - enable virtual functions
+ * @pdev: the PCIe device
+ * @max_vfs: the number of virtual functions to enable
+ *
+ * Returns the number of enabled VFs. If there are VFs enabled already or
+ * max_vfs is more than the total number of device can be enabled, returns
+ * failure.
+ */
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int pre_existing_vfs, num_vfs, total_vfs, ret;
+
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+ pre_existing_vfs = pci_num_vf(pdev);
+ if (pre_existing_vfs) {
+ pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
+ pre_existing_vfs);
+ return 0;
+ }
+
+ num_vfs = min_t(int, max_vfs, total_vfs);
+ ret = qm_vf_q_assign(qm, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't assign queues for VF!\n");
+ return ret;
+ }
+
+ qm->vfs_num = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't enable VF!\n");
+ qm_clear_vft_config(qm);
+ return ret;
+ }
+
+ pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
+
+ return num_vfs;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
+
+/**
+ * hisi_qm_sriov_disable - disable virtual functions
+ * @pdev: the PCI device
+ *
+ * Return failure if there are VFs assigned already.
+ */
+int hisi_qm_sriov_disable(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+
+ if (pci_vfs_assigned(pdev)) {
+ pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
+ return -EPERM;
+ }
+
+ /* remove in hpre_pci_driver will be called to free VF resources */
+ pci_disable_sriov(pdev);
+ return qm_clear_vft_config(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
+
+/**
+ * hisi_qm_sriov_configure - configure the number of VFs
+ * @pdev: The PCI device
+ * @num_vfs: The number of VFs need enabled
+ *
+ * Enable SR-IOV according to num_vfs, 0 means disable.
+ */
+int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs == 0)
+ return hisi_qm_sriov_disable(pdev);
+ else
+ return hisi_qm_sriov_enable(pdev, num_vfs);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
+
+static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
{
u32 err_sts;
if (!qm->err_ini->get_dev_hw_err_status) {
dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
- return PCI_ERS_RESULT_NONE;
+ return ACC_ERR_NONE;
}
/* get device hardware error status */
err_sts = qm->err_ini->get_dev_hw_err_status(qm);
if (err_sts) {
+ if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+
if (!qm->err_ini->log_dev_hw_err) {
dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
- return PCI_ERS_RESULT_NEED_RESET;
+ return ACC_ERR_NEED_RESET;
}
qm->err_ini->log_dev_hw_err(qm, err_sts);
- return PCI_ERS_RESULT_NEED_RESET;
+ return ACC_ERR_NEED_RESET;
}
- return PCI_ERS_RESULT_RECOVERED;
+ return ACC_ERR_RECOVERED;
}
-static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev)
+static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
{
- struct hisi_qm *qm = pci_get_drvdata(pdev);
- pci_ers_result_t qm_ret, dev_ret;
+ enum acc_err_result qm_ret, dev_ret;
/* log qm error */
qm_ret = qm_hw_error_handle(qm);
@@ -2334,9 +3170,9 @@ static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev)
/* log device error */
dev_ret = qm_dev_err_handle(qm);
- return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
- dev_ret == PCI_ERS_RESULT_NEED_RESET) ?
- PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+ return (qm_ret == ACC_ERR_NEED_RESET ||
+ dev_ret == ACC_ERR_NEED_RESET) ?
+ ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
}
/**
@@ -2350,6 +3186,9 @@ static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev)
pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ enum acc_err_result ret;
+
if (pdev->is_virtfn)
return PCI_ERS_RESULT_NONE;
@@ -2357,10 +3196,756 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- return qm_process_dev_error(pdev);
+ ret = qm_process_dev_error(qm);
+ if (ret == ACC_ERR_NEED_RESET)
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ return PCI_ERS_RESULT_RECOVERED;
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
+static int qm_get_hw_error_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
+}
+
+static int qm_check_req_recv(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+ u32 val;
+
+ writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
+ (val == ACC_VENDOR_ID_VALUE),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(&pdev->dev, "Fails to read QM reg!\n");
+ return ret;
+ }
+
+ writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
+ (val == PCI_VENDOR_ID_HUAWEI),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret)
+ dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
+
+ return ret;
+}
+
+static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u16 cmd;
+ int i;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (set)
+ cmd |= PCI_COMMAND_MEMORY;
+ else
+ cmd &= ~PCI_COMMAND_MEMORY;
+
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ for (i = 0; i < MAX_WAIT_COUNTS; i++) {
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u16 sriov_ctrl;
+ int pos;
+ int i;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
+ if (set)
+ sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
+ else
+ sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
+ pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
+
+ for (i = 0; i < MAX_WAIT_COUNTS; i++) {
+ pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
+ if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
+ ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int qm_set_msi(struct hisi_qm *qm, bool set)
+{
+ struct pci_dev *pdev = qm->pdev;
+
+ if (set) {
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
+ 0);
+ } else {
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
+ ACC_PEH_MSI_DISABLE);
+ if (qm->err_status.is_qm_ecc_mbit ||
+ qm->err_status.is_dev_ecc_mbit)
+ return 0;
+
+ mdelay(1);
+ if (readl(qm->io_base + QM_PEH_DFX_INFO0))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int qm_vf_reset_prepare(struct hisi_qm *qm)
+{
+ struct hisi_qm_list *qm_list = qm->qm_list;
+ int stop_reason = qm->status.stop_reason;
+ struct pci_dev *pdev = qm->pdev;
+ struct pci_dev *virtfn;
+ struct hisi_qm *vf_qm;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(vf_qm, &qm_list->list, list) {
+ virtfn = vf_qm->pdev;
+ if (virtfn == pdev)
+ continue;
+
+ if (pci_physfn(virtfn) == pdev) {
+ vf_qm->status.stop_reason = stop_reason;
+ ret = hisi_qm_stop(vf_qm);
+ if (ret)
+ goto stop_fail;
+ }
+ }
+
+stop_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+static int qm_reset_prepare_ready(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ int delay = 0;
+
+ /* All reset requests need to be queued for processing */
+ while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qm_controller_reset_prepare(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset not ready!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_reset_prepare(qm);
+ if (ret) {
+ pci_err(pdev, "Fails to stop VFs!\n");
+ return ret;
+ }
+ }
+
+ qm->status.stop_reason = QM_SOFT_RESET;
+ ret = hisi_qm_stop(qm);
+ if (ret) {
+ pci_err(pdev, "Fails to stop QM!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
+{
+ u32 nfe_enb = 0;
+
+ if (!qm->err_status.is_dev_ecc_mbit &&
+ qm->err_status.is_qm_ecc_mbit &&
+ qm->err_ini->close_axi_master_ooo) {
+
+ qm->err_ini->close_axi_master_ooo(qm);
+
+ } else if (qm->err_status.is_dev_ecc_mbit &&
+ !qm->err_status.is_qm_ecc_mbit &&
+ !qm->err_ini->close_axi_master_ooo) {
+
+ nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+ qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
+ }
+}
+
+static int qm_soft_reset(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+ u32 val;
+
+ /* Ensure all doorbells and mailboxes received by QM */
+ ret = qm_check_req_recv(qm);
+ if (ret)
+ return ret;
+
+ if (qm->vfs_num) {
+ ret = qm_set_vf_mse(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable vf MSE bit.\n");
+ return ret;
+ }
+ }
+
+ ret = qm_set_msi(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable PEH MSI bit.\n");
+ return ret;
+ }
+
+ qm_dev_ecc_mbit_handle(qm);
+
+ /* OOO register set and check */
+ writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
+ qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+
+ /* If bus lock, reset chip */
+ ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
+ val,
+ (val == ACC_MASTER_TRANS_RETURN_RW),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ pci_emerg(pdev, "Bus lock! Please reset system.\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable pf MSE bit.\n");
+ return ret;
+ }
+
+ /* The reset related sub-control registers are not in PCI BAR */
+ if (ACPI_HANDLE(&pdev->dev)) {
+ unsigned long long value = 0;
+ acpi_status s;
+
+ s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ qm->err_ini->err_info.acpi_rst,
+ NULL, &value);
+ if (ACPI_FAILURE(s)) {
+ pci_err(pdev, "NO controller reset method!\n");
+ return -EIO;
+ }
+
+ if (value) {
+ pci_err(pdev, "Reset step %llu failed!\n", value);
+ return -EIO;
+ }
+ } else {
+ pci_err(pdev, "No reset method!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_vf_reset_done(struct hisi_qm *qm)
+{
+ struct hisi_qm_list *qm_list = qm->qm_list;
+ struct pci_dev *pdev = qm->pdev;
+ struct pci_dev *virtfn;
+ struct hisi_qm *vf_qm;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(vf_qm, &qm_list->list, list) {
+ virtfn = vf_qm->pdev;
+ if (virtfn == pdev)
+ continue;
+
+ if (pci_physfn(virtfn) == pdev) {
+ ret = qm_restart(vf_qm);
+ if (ret)
+ goto restart_fail;
+ }
+ }
+
+restart_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+static int qm_get_dev_err_status(struct hisi_qm *qm)
+{
+ return qm->err_ini->get_dev_hw_err_status(qm);
+}
+
+static int qm_dev_hw_init(struct hisi_qm *qm)
+{
+ return qm->err_ini->hw_init(qm);
+}
+
+static void qm_restart_prepare(struct hisi_qm *qm)
+{
+ u32 value;
+
+ if (!qm->err_status.is_qm_ecc_mbit &&
+ !qm->err_status.is_dev_ecc_mbit)
+ return;
+
+ /* temporarily close the OOO port used for PEH to write out MSI */
+ value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+ writel(value & ~qm->err_ini->err_info.msi_wr_port,
+ qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+
+ /* clear dev ecc 2bit error source if having */
+ value = qm_get_dev_err_status(qm) &
+ qm->err_ini->err_info.ecc_2bits_mask;
+ if (value && qm->err_ini->clear_dev_hw_err_status)
+ qm->err_ini->clear_dev_hw_err_status(qm, value);
+
+ /* clear QM ecc mbit error source */
+ writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+
+ /* clear AM Reorder Buffer ecc mbit source */
+ writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
+
+ if (qm->err_ini->open_axi_master_ooo)
+ qm->err_ini->open_axi_master_ooo(qm);
+}
+
+static void qm_restart_done(struct hisi_qm *qm)
+{
+ u32 value;
+
+ if (!qm->err_status.is_qm_ecc_mbit &&
+ !qm->err_status.is_dev_ecc_mbit)
+ return;
+
+ /* open the OOO port for PEH to write out MSI */
+ value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+ value |= qm->err_ini->err_info.msi_wr_port;
+ writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
+
+ qm->err_status.is_qm_ecc_mbit = false;
+ qm->err_status.is_dev_ecc_mbit = false;
+}
+
+static int qm_controller_reset_done(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_set_msi(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable PEH MSI bit!\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable pf MSE bit!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_set_vf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable vf MSE bit!\n");
+ return ret;
+ }
+ }
+
+ ret = qm_dev_hw_init(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to init device\n");
+ return ret;
+ }
+
+ qm_restart_prepare(qm);
+
+ ret = qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_q_assign(qm, qm->vfs_num);
+ if (ret) {
+ pci_err(pdev, "Failed to assign queue!\n");
+ return ret;
+ }
+ }
+
+ ret = qm_vf_reset_done(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start VFs!\n");
+ return -EPERM;
+ }
+
+ hisi_qm_dev_err_init(qm);
+ qm_restart_done(qm);
+
+ clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
+
+ return 0;
+}
+
+static int qm_controller_reset(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ pci_info(pdev, "Controller resetting...\n");
+
+ ret = qm_controller_reset_prepare(qm);
+ if (ret)
+ return ret;
+
+ ret = qm_soft_reset(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qm_controller_reset_done(qm);
+ if (ret)
+ return ret;
+
+ pci_info(pdev, "Controller reset complete\n");
+
+ return 0;
+}
+
+/**
+ * hisi_qm_dev_slot_reset() - slot reset
+ * @pdev: the PCIe device
+ *
+ * This function offers QM relate PCIe device reset interface. Drivers which
+ * use QM can use this function as slot_reset in its struct pci_error_handlers.
+ */
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ pci_aer_clear_nonfatal_status(pdev);
+
+ /* reset pcie device controller */
+ ret = qm_controller_reset(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
+
+/* check the interrupt is ecc-mbit error or not */
+static int qm_check_dev_error(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT;
+ if (ret)
+ return ret;
+
+ return (qm_get_dev_err_status(qm) &
+ qm->err_ini->err_info.ecc_2bits_mask);
+}
+
+void hisi_qm_reset_prepare(struct pci_dev *pdev)
+{
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ u32 delay = 0;
+ int ret;
+
+ hisi_qm_dev_err_uninit(pf_qm);
+
+ /*
+ * Check whether there is an ECC mbit error, If it occurs, need to
+ * wait for soft reset to fix it.
+ */
+ while (qm_check_dev_error(pf_qm)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return;
+ }
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ pci_err(pdev, "FLR not ready!\n");
+ return;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_reset_prepare(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to prepare reset, ret = %d.\n",
+ ret);
+ return;
+ }
+ }
+
+ ret = hisi_qm_stop(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
+ return;
+ }
+
+ pci_info(pdev, "FLR resetting...\n");
+}
+EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
+
+static bool qm_flr_reset_complete(struct pci_dev *pdev)
+{
+ struct pci_dev *pf_pdev = pci_physfn(pdev);
+ struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
+ u32 id;
+
+ pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
+ if (id == QM_PCI_COMMAND_INVALID) {
+ pci_err(pdev, "Device can not be used!\n");
+ return false;
+ }
+
+ clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
+
+ return true;
+}
+
+void hisi_qm_reset_done(struct pci_dev *pdev)
+{
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ hisi_qm_dev_err_init(pf_qm);
+
+ ret = qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
+ goto flr_done;
+ }
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = qm_dev_hw_init(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
+ goto flr_done;
+ }
+
+ if (!qm->vfs_num)
+ goto flr_done;
+
+ ret = qm_vf_q_assign(qm, qm->vfs_num);
+ if (ret) {
+ pci_err(pdev, "Failed to assign VFs, ret = %d.\n", ret);
+ goto flr_done;
+ }
+
+ ret = qm_vf_reset_done(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start VFs, ret = %d.\n", ret);
+ goto flr_done;
+ }
+ }
+
+flr_done:
+ if (qm_flr_reset_complete(pdev))
+ pci_info(pdev, "FLR reset complete\n");
+}
+EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
+
+static irqreturn_t qm_abnormal_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = data;
+ enum acc_err_result ret;
+
+ atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
+ ret = qm_process_dev_error(qm);
+ if (ret == ACC_ERR_NEED_RESET)
+ schedule_work(&qm->rst_work);
+
+ return IRQ_HANDLED;
+}
+
+static int qm_irq_register(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
+ qm_irq, IRQF_SHARED, qm->dev_name, qm);
+ if (ret)
+ return ret;
+
+ if (qm->ver != QM_HW_V1) {
+ ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
+ qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
+ if (ret)
+ goto err_aeq_irq;
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = request_irq(pci_irq_vector(pdev,
+ QM_ABNORMAL_EVENT_IRQ_VECTOR),
+ qm_abnormal_irq, IRQF_SHARED,
+ qm->dev_name, qm);
+ if (ret)
+ goto err_abonormal_irq;
+ }
+ }
+
+ return 0;
+
+err_abonormal_irq:
+ free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
+err_aeq_irq:
+ free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
+ return ret;
+}
+
+static void hisi_qm_controller_reset(struct work_struct *rst_work)
+{
+ struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
+ int ret;
+
+ /* reset pcie device controller */
+ ret = qm_controller_reset(qm);
+ if (ret)
+ dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
+
+}
+
+/**
+ * hisi_qm_init() - Initialize configures about qm.
+ * @qm: The qm needing init.
+ *
+ * This function init qm, then we can call hisi_qm_start to put qm into work.
+ */
+int hisi_qm_init(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct device *dev = &pdev->dev;
+ unsigned int num_vec;
+ int ret;
+
+ hisi_qm_pre_init(qm);
+
+ ret = qm_alloc_uacce(qm);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
+
+ ret = pci_enable_device_mem(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to enable device mem!\n");
+ goto err_remove_uacce;
+ }
+
+ ret = pci_request_mem_regions(pdev, qm->dev_name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request mem regions!\n");
+ goto err_disable_pcidev;
+ }
+
+ qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
+ qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
+ qm->io_base = ioremap(qm->phys_base, qm->phys_size);
+ if (!qm->io_base) {
+ ret = -EIO;
+ goto err_release_mem_regions;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret < 0)
+ goto err_iounmap;
+ pci_set_master(pdev);
+
+ if (!qm->ops->get_irq_num) {
+ ret = -EOPNOTSUPP;
+ goto err_iounmap;
+ }
+ num_vec = qm->ops->get_irq_num(qm);
+ ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable MSI vectors!\n");
+ goto err_iounmap;
+ }
+
+ ret = qm_irq_register(qm);
+ if (ret)
+ goto err_free_irq_vectors;
+
+ if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
+ /* v2 starts to support get vft by mailbox */
+ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ if (ret)
+ goto err_irq_unregister;
+ }
+
+ ret = hisi_qm_memory_init(qm);
+ if (ret)
+ goto err_irq_unregister;
+
+ INIT_WORK(&qm->work, qm_work_process);
+ if (qm->fun_type == QM_HW_PF)
+ INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
+
+ atomic_set(&qm->status.flags, QM_INIT);
+
+ return 0;
+
+err_irq_unregister:
+ qm_irq_unregister(qm);
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+err_iounmap:
+ iounmap(qm->io_base);
+err_release_mem_regions:
+ pci_release_mem_regions(pdev);
+err_disable_pcidev:
+ pci_disable_device(pdev);
+err_remove_uacce:
+ uacce_remove(qm->uacce);
+ qm->uacce = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_init);
+
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index ec5b6f48db6c..0a351de8d838 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -8,6 +8,10 @@
#include <linux/module.h>
#include <linux/pci.h>
+#define QM_QNUM_V1 4096
+#define QM_QNUM_V2 1024
+#define QM_MAX_VFS_NUM_V2 63
+
/* qm user domain */
#define QM_ARUSER_M_CFG_1 0x100088
#define AXUSER_SNOOP_ENABLE BIT(30)
@@ -70,7 +74,7 @@
#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
- QM_OF_FIFO_OF)
+ QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID)
#define QM_BASE_CE QM_ECC_1BIT
#define QM_Q_DEPTH 1024
@@ -80,14 +84,31 @@
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
+enum qm_stop_reason {
+ QM_NORMAL,
+ QM_SOFT_RESET,
+ QM_FLR,
+};
+
+enum qm_state {
+ QM_INIT = 0,
+ QM_START,
+ QM_CLOSE,
+ QM_STOP,
+};
+
enum qp_state {
+ QP_INIT = 1,
+ QP_START,
QP_STOP,
+ QP_CLOSE,
};
enum qm_hw_ver {
QM_HW_UNKNOWN = -1,
QM_HW_V1 = 0x20,
QM_HW_V2 = 0x21,
+ QM_HW_V3 = 0x30,
};
enum qm_fun_type {
@@ -101,6 +122,14 @@ enum qm_debug_file {
DEBUG_FILE_NUM,
};
+struct qm_dfx {
+ atomic64_t err_irq_cnt;
+ atomic64_t aeq_irq_cnt;
+ atomic64_t abnormal_irq_cnt;
+ atomic64_t create_qp_err_cnt;
+ atomic64_t mb_err_cnt;
+};
+
struct debugfs_file {
enum qm_debug_file index;
struct mutex lock;
@@ -109,6 +138,9 @@ struct debugfs_file {
struct qm_debug {
u32 curr_qm_qp_num;
+ u32 sqe_mask_offset;
+ u32 sqe_mask_len;
+ struct qm_dfx dfx;
struct dentry *debug_root;
struct dentry *qm_d;
struct debugfs_file files[DEBUG_FILE_NUM];
@@ -125,22 +157,34 @@ struct hisi_qm_status {
bool eqc_phase;
u32 aeq_head;
bool aeqc_phase;
- unsigned long flags;
+ atomic_t flags;
+ int stop_reason;
};
struct hisi_qm;
struct hisi_qm_err_info {
+ char *acpi_rst;
+ u32 msi_wr_port;
+ u32 ecc_2bits_mask;
u32 ce;
u32 nfe;
u32 fe;
- u32 msi;
+};
+
+struct hisi_qm_err_status {
+ u32 is_qm_ecc_mbit;
+ u32 is_dev_ecc_mbit;
};
struct hisi_qm_err_ini {
+ int (*hw_init)(struct hisi_qm *qm);
void (*hw_err_enable)(struct hisi_qm *qm);
void (*hw_err_disable)(struct hisi_qm *qm);
u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
+ void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
+ void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ void (*close_axi_master_ooo)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
struct hisi_qm_err_info err_info;
};
@@ -161,7 +205,9 @@ struct hisi_qm {
u32 qp_num;
u32 qp_in_used;
u32 ctrl_qp_num;
+ u32 vfs_num;
struct list_head list;
+ struct hisi_qm_list *qm_list;
struct qm_dma qdma;
struct qm_sqc *sqc;
@@ -175,10 +221,12 @@ struct hisi_qm {
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
+ struct hisi_qm_err_status err_status;
+ unsigned long reset_flag;
- rwlock_t qps_lock;
- unsigned long *qp_bitmap;
- struct hisi_qp **qp_array;
+ struct rw_semaphore qps_lock;
+ struct idr qp_idr;
+ struct hisi_qp *qp_array;
struct mutex mailbox_lock;
@@ -187,13 +235,12 @@ struct hisi_qm {
struct qm_debug debug;
u32 error_mask;
- u32 msi_mask;
struct workqueue_struct *wq;
struct work_struct work;
+ struct work_struct rst_work;
const char *algs;
- bool use_dma_api;
bool use_sva;
resource_size_t phys_base;
resource_size_t phys_size;
@@ -205,7 +252,7 @@ struct hisi_qp_status {
u16 sq_tail;
u16 cq_head;
bool cqc_phase;
- unsigned long flags;
+ atomic_t flags;
};
struct hisi_qp_ops {
@@ -230,10 +277,58 @@ struct hisi_qp {
void (*event_cb)(struct hisi_qp *qp);
struct hisi_qm *qm;
+ bool is_resetting;
u16 pasid;
struct uacce_queue *uacce_q;
};
+static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
+ device, NULL);
+ u32 n, q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ if (!pdev) {
+ q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %d\n",
+ q_num);
+ } else {
+ if (pdev->revision == QM_HW_V1)
+ q_num = QM_QNUM_V1;
+ else
+ q_num = QM_QNUM_V2;
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || !n || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret < 0)
+ return ret;
+
+ if (n > QM_MAX_VFS_NUM_V2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
{
INIT_LIST_HEAD(&qm_list->list);
@@ -267,14 +362,19 @@ void hisi_qm_release_qp(struct hisi_qp *qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg);
int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
-int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
int hisi_qm_debug_init(struct hisi_qm *qm);
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
+int hisi_qm_sriov_disable(struct pci_dev *pdev);
+int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
void hisi_qm_dev_err_init(struct hisi_qm *qm);
void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
pci_channel_state_t state);
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
+void hisi_qm_reset_prepare(struct pci_dev *pdev);
+void hisi_qm_reset_done(struct pci_dev *pdev);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 3598fa17beb2..7b64aca704d6 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -160,6 +160,10 @@ struct sec_debug_file {
struct sec_dfx {
atomic64_t send_cnt;
atomic64_t recv_cnt;
+ atomic64_t send_busy_cnt;
+ atomic64_t err_bd_cnt;
+ atomic64_t invalid_req_cnt;
+ atomic64_t done_flag_cnt;
};
struct sec_debug {
@@ -172,7 +176,6 @@ struct sec_dev {
struct sec_debug debug;
u32 ctx_q_num;
bool iommu_used;
- u32 num_vfs;
unsigned long status;
};
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 7f1c6a31b82f..64614a9bdf21 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -148,6 +148,7 @@ static int sec_aead_verify(struct sec_req *req)
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
struct sec_sqe *bd = resp;
struct sec_ctx *ctx;
struct sec_req *req;
@@ -157,11 +158,16 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
type = bd->type_cipher_auth & SEC_TYPE_MASK;
if (unlikely(type != SEC_BD_TYPE2)) {
+ atomic64_inc(&dfx->err_bd_cnt);
pr_err("err bd type [%d]\n", type);
return;
}
req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ if (unlikely(!req)) {
+ atomic64_inc(&dfx->invalid_req_cnt);
+ return;
+ }
req->err_type = bd->type2.error_type;
ctx = req->ctx;
done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
@@ -174,12 +180,13 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
"err_type[%d],done[%d],flag[%d]\n",
req->err_type, done, flag);
err = -EIO;
+ atomic64_inc(&dfx->done_flag_cnt);
}
if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
err = sec_aead_verify(req);
- atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
+ atomic64_inc(&dfx->recv_cnt);
ctx->req_op->buf_unmap(ctx, req);
@@ -200,10 +207,12 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
return -ENOBUFS;
if (!ret) {
- if (req->fake_busy)
+ if (req->fake_busy) {
+ atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
ret = -EBUSY;
- else
+ } else {
ret = -EINPROGRESS;
+ }
}
return ret;
@@ -832,7 +841,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
struct crypto_authenc_keys *keys)
{
struct crypto_shash *hash_tfm = ctx->hash_tfm;
- SHASH_DESC_ON_STACK(shash, hash_tfm);
int blocksize, ret;
if (!keys->authkeylen) {
@@ -842,8 +850,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
blocksize = crypto_shash_blocksize(hash_tfm);
if (keys->authkeylen > blocksize) {
- ret = crypto_shash_digest(shash, keys->authkey,
- keys->authkeylen, ctx->a_key);
+ ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
+ keys->authkeylen, ctx->a_key);
if (ret) {
pr_err("hisi_sec2: aead auth digest error!\n");
return -EINVAL;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 1f54ebe164b6..a4cb58b54b25 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -80,6 +80,9 @@
#define SEC_VF_CNT_MASK 0xffffffc0
#define SEC_DBGFS_VAL_MAX_LEN 20
+#define SEC_SQE_MASK_OFFSET 64
+#define SEC_SQE_MASK_LEN 48
+
#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
@@ -88,6 +91,11 @@ struct sec_hw_error {
const char *msg;
};
+struct sec_dfx_item {
+ const char *name;
+ u32 offset;
+};
+
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
static struct hisi_qm_list sec_devices;
@@ -110,7 +118,16 @@ static const char * const sec_dbg_file_name[] = {
[SEC_CLEAR_ENABLE] = "clear_enable",
};
-static struct debugfs_reg32 sec_dfx_regs[] = {
+static struct sec_dfx_item sec_dfx_labels[] = {
+ {"send_cnt", offsetof(struct sec_dfx, send_cnt)},
+ {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
+ {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
+ {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
+ {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
+ {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
+};
+
+static const struct debugfs_reg32 sec_dfx_regs[] = {
{"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
{"SEC_SAA_EN ", 0x301270},
{"SEC_BD_LATENCY_MIN ", 0x301600},
@@ -136,45 +153,14 @@ static struct debugfs_reg32 sec_dfx_regs[] = {
static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev;
- u32 n, q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- SEC_PF_PCI_DEVICE_ID, NULL);
- if (!pdev) {
- q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2);
- pr_info("No device, suppose queue number is %d!\n", q_num);
- } else {
- rev_id = pdev->revision;
-
- switch (rev_id) {
- case QM_HW_V1:
- q_num = SEC_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = SEC_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret || !n || n > q_num)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
}
static const struct kernel_param_ops sec_pf_q_num_ops = {
.set = sec_pf_q_num_set,
.get = param_get_int,
};
+
static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
@@ -207,6 +193,15 @@ static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
+ .get = param_get_int,
+};
+
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+
void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
{
hisi_qm_free_qps(qps, qp_num);
@@ -240,9 +235,8 @@ static const struct pci_device_id sec_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, sec_dev_ids);
-static u8 sec_get_endian(struct sec_dev *sec)
+static u8 sec_get_endian(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
u32 reg;
/*
@@ -270,9 +264,8 @@ static u8 sec_get_endian(struct sec_dev *sec)
return SEC_64BE;
}
-static int sec_engine_init(struct sec_dev *sec)
+static int sec_engine_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
int ret;
u32 reg;
@@ -315,7 +308,7 @@ static int sec_engine_init(struct sec_dev *sec)
/* config endian */
reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
- reg |= sec_get_endian(sec);
+ reg |= sec_get_endian(qm);
writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
/* Enable sm4 xts mode multiple iv */
@@ -325,10 +318,8 @@ static int sec_engine_init(struct sec_dev *sec)
return 0;
}
-static int sec_set_user_domain_and_cache(struct sec_dev *sec)
+static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
-
/* qm user domain */
writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
@@ -349,7 +340,7 @@ static int sec_set_user_domain_and_cache(struct sec_dev *sec)
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
- return sec_engine_init(sec);
+ return sec_engine_init(qm);
}
/* sec_debug_regs_clear() - clear the sec debug regs */
@@ -424,23 +415,22 @@ static u32 sec_current_qm_read(struct sec_debug_file *file)
static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
{
struct hisi_qm *qm = file->qm;
- struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
u32 vfq_num;
u32 tmp;
- if (val > sec->num_vfs)
+ if (val > qm->vfs_num)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (!val) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs;
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
- if (val == sec->num_vfs)
+ if (val == qm->vfs_num)
qm->debug.curr_qm_qp_num =
qm->ctrl_qp_num - qm->qp_num -
- (sec->num_vfs - 1) * vfq_num;
+ (qm->vfs_num - 1) * vfq_num;
else
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -570,10 +560,22 @@ static const struct file_operations sec_dbg_fops = {
static int sec_debugfs_atomic64_get(void *data, u64 *val)
{
*val = atomic64_read((atomic64_t *)data);
+
+ return 0;
+}
+
+static int sec_debugfs_atomic64_set(void *data, u64 val)
+{
+ if (val)
+ return -EINVAL;
+
+ atomic64_set((atomic64_t *)data, 0);
+
return 0;
}
+
DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
- NULL, "%lld\n");
+ sec_debugfs_atomic64_set, "%lld\n");
static int sec_core_debug_init(struct sec_dev *sec)
{
@@ -582,6 +584,7 @@ static int sec_core_debug_init(struct sec_dev *sec)
struct sec_dfx *dfx = &sec->debug.dfx;
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
+ int i;
tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root);
@@ -593,13 +596,15 @@ static int sec_core_debug_init(struct sec_dev *sec)
regset->nregs = ARRAY_SIZE(sec_dfx_regs);
regset->base = qm->io_base;
- debugfs_create_regset32("regs", 0444, tmp_d, regset);
-
- debugfs_create_file("send_cnt", 0444, tmp_d,
- &dfx->send_cnt, &sec_atomic64_ops);
+ if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
- debugfs_create_file("recv_cnt", 0444, tmp_d,
- &dfx->recv_cnt, &sec_atomic64_ops);
+ for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
+ atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
+ sec_dfx_labels[i].offset);
+ debugfs_create_file(sec_dfx_labels[i].name, 0644,
+ tmp_d, data, &sec_atomic64_ops);
+ }
return 0;
}
@@ -630,6 +635,9 @@ static int sec_debugfs_init(struct sec_dev *sec)
qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
sec_debugfs_root);
+
+ qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
ret = hisi_qm_debug_init(qm);
if (ret)
goto failed_to_create;
@@ -675,8 +683,6 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
}
errs++;
}
-
- writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
}
static u32 sec_get_hw_err_status(struct hisi_qm *qm)
@@ -684,17 +690,36 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
return readl(qm->io_base + SEC_CORE_INT_STATUS);
}
+static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
+}
+
+static void sec_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+}
+
static const struct hisi_qm_err_ini sec_err_ini = {
+ .hw_init = sec_set_user_domain_and_cache,
.hw_err_enable = sec_hw_error_enable,
.hw_err_disable = sec_hw_error_disable,
.get_dev_hw_err_status = sec_get_hw_err_status,
+ .clear_dev_hw_err_status = sec_clear_hw_err_status,
.log_dev_hw_err = sec_log_hw_error,
+ .open_axi_master_ooo = sec_open_axi_master_ooo,
.err_info = {
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
- .msi = QM_DB_RANDOM_INVALID,
+ .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
+ .msi_wr_port = BIT(0),
+ .acpi_rst = "SRST",
}
};
@@ -703,22 +728,14 @@ static int sec_pf_probe_init(struct sec_dev *sec)
struct hisi_qm *qm = &sec->qm;
int ret;
- switch (qm->ver) {
- case QM_HW_V1:
+ if (qm->ver == QM_HW_V1)
qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
- break;
-
- case QM_HW_V2:
+ else
qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
- break;
-
- default:
- return -EINVAL;
- }
qm->err_ini = &sec_err_ini;
- ret = sec_set_user_domain_and_cache(sec);
+ ret = sec_set_user_domain_and_cache(qm);
if (ret)
return ret;
@@ -730,32 +747,30 @@ static int sec_pf_probe_init(struct sec_dev *sec)
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -ENODEV;
+ int ret;
qm->pdev = pdev;
- qm->ver = rev_id;
-
+ qm->ver = pdev->revision;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
+
qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
QM_HW_PF : QM_HW_VF;
- qm->use_dma_api = true;
-
- return hisi_qm_init(qm);
-}
-
-static void sec_qm_uninit(struct hisi_qm *qm)
-{
- hisi_qm_uninit(qm);
-}
-
-static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
-{
- int ret;
+ if (qm->fun_type == QM_HW_PF) {
+ qm->qp_base = SEC_PF_DEF_Q_BASE;
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ qm->qm_list = &sec_devices;
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+ * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
+ * to trigger only one VF in v1 hardware.
+ * v2 hardware has no such problem.
+ */
+ qm->qp_base = SEC_PF_DEF_Q_NUM;
+ qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
+ }
/*
* WQ_HIGHPRI: SEC request must be low delayed,
@@ -763,47 +778,38 @@ static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
* WQ_UNBOUND: SEC task is likely with long
* running CPU intensive workloads.
*/
- qm->wq = alloc_workqueue("%s", WQ_HIGHPRI |
- WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(),
- pci_name(qm->pdev));
+ qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
+ WQ_UNBOUND, num_online_cpus(),
+ pci_name(qm->pdev));
if (!qm->wq) {
pci_err(qm->pdev, "fail to alloc workqueue\n");
return -ENOMEM;
}
- if (qm->fun_type == QM_HW_PF) {
- qm->qp_base = SEC_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
+ ret = hisi_qm_init(qm);
+ if (ret)
+ destroy_workqueue(qm->wq);
+
+ return ret;
+}
+static void sec_qm_uninit(struct hisi_qm *qm)
+{
+ hisi_qm_uninit(qm);
+}
+
+static int sec_probe_init(struct sec_dev *sec)
+{
+ struct hisi_qm *qm = &sec->qm;
+ int ret;
+
+ if (qm->fun_type == QM_HW_PF) {
ret = sec_pf_probe_init(sec);
if (ret)
- goto err_probe_uninit;
- } else if (qm->fun_type == QM_HW_VF) {
- /*
- * have no way to get qm configure in VM in v1 hardware,
- * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
- * to trigger only one VF in v1 hardware.
- * v2 hardware has no such problem.
- */
- if (qm->ver == QM_HW_V1) {
- qm->qp_base = SEC_PF_DEF_Q_NUM;
- qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
- } else if (qm->ver == QM_HW_V2) {
- /* v2 starts to support get vft by mailbox */
- ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- if (ret)
- goto err_probe_uninit;
- }
- } else {
- ret = -ENODEV;
- goto err_probe_uninit;
+ return ret;
}
return 0;
-err_probe_uninit:
- destroy_workqueue(qm->wq);
- return ret;
}
static void sec_probe_uninit(struct hisi_qm *qm)
@@ -840,20 +846,17 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!sec)
return -ENOMEM;
- pci_set_drvdata(pdev, sec);
-
- sec->ctx_q_num = ctx_q_num;
- sec_iommu_used_check(sec);
-
qm = &sec->qm;
-
ret = sec_qm_init(qm, pdev);
if (ret) {
- pci_err(pdev, "Failed to pre init qm!\n");
+ pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);
return ret;
}
- ret = sec_probe_init(qm, sec);
+ sec->ctx_q_num = ctx_q_num;
+ sec_iommu_used_check(sec);
+
+ ret = sec_probe_init(sec);
if (ret) {
pci_err(pdev, "Failed to probe!\n");
goto err_qm_uninit;
@@ -877,8 +880,17 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_remove_from_list;
}
+ if (qm->fun_type == QM_HW_PF && vfs_num) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_crypto_unregister;
+ }
+
return 0;
+err_crypto_unregister:
+ sec_unregister_from_crypto();
+
err_remove_from_list:
hisi_qm_del_from_list(qm, &sec_devices);
sec_debugfs_exit(sec);
@@ -893,110 +905,6 @@ err_qm_uninit:
return ret;
}
-/* now we only support equal assignment */
-static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs)
-{
- struct hisi_qm *qm = &sec->qm;
- u32 qp_num = qm->qp_num;
- u32 q_base = qp_num;
- u32 q_num, remain_q_num;
- int i, j, ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_qp_num - qp_num;
- q_num = remain_q_num / num_vfs;
-
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
- if (ret) {
- for (j = i; j > 0; j--)
- hisi_qm_set_vft(qm, j, 0, 0);
- return ret;
- }
- q_base += q_num;
- }
-
- return 0;
-}
-
-static int sec_clear_vft_config(struct sec_dev *sec)
-{
- struct hisi_qm *qm = &sec->qm;
- u32 num_vfs = sec->num_vfs;
- int ret;
- u32 i;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
-
- sec->num_vfs = 0;
-
- return 0;
-}
-
-static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
- struct sec_dev *sec = pci_get_drvdata(pdev);
- int pre_existing_vfs, ret;
- u32 num_vfs;
-
- pre_existing_vfs = pci_num_vf(pdev);
-
- if (pre_existing_vfs) {
- pci_err(pdev, "Can't enable VF. Please disable at first!\n");
- return 0;
- }
-
- num_vfs = min_t(u32, max_vfs, SEC_VF_NUM);
-
- ret = sec_vf_q_assign(sec, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- sec->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't enable VF!\n");
- sec_clear_vft_config(sec);
- return ret;
- }
-
- return num_vfs;
-}
-
-static int sec_sriov_disable(struct pci_dev *pdev)
-{
- struct sec_dev *sec = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- pci_err(pdev, "Can't disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- /* remove in sec_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
-
- return sec_clear_vft_config(sec);
-}
-
-static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- if (num_vfs)
- return sec_sriov_enable(pdev, num_vfs);
- else
- return sec_sriov_disable(pdev);
-}
-
static void sec_remove(struct pci_dev *pdev)
{
struct sec_dev *sec = pci_get_drvdata(pdev);
@@ -1006,8 +914,8 @@ static void sec_remove(struct pci_dev *pdev)
hisi_qm_del_from_list(qm, &sec_devices);
- if (qm->fun_type == QM_HW_PF && sec->num_vfs)
- (void)sec_sriov_disable(pdev);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ hisi_qm_sriov_disable(pdev);
sec_debugfs_exit(sec);
@@ -1023,6 +931,9 @@ static void sec_remove(struct pci_dev *pdev)
static const struct pci_error_handlers sec_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver sec_pci_driver = {
@@ -1031,7 +942,7 @@ static struct pci_driver sec_pci_driver = {
.probe = sec_probe,
.remove = sec_remove,
.err_handler = &sec_err_handler,
- .sriov_configure = sec_sriov_configure,
+ .sriov_configure = hisi_qm_sriov_configure,
};
static void sec_register_debugfs(void)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 82dc6f867171..f3ed4c0e5493 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -28,12 +28,20 @@ enum hisi_zip_error_type {
HZIP_NC_ERR = 0x0d,
};
+struct hisi_zip_dfx {
+ atomic64_t send_cnt;
+ atomic64_t recv_cnt;
+ atomic64_t send_busy_cnt;
+ atomic64_t err_bd_cnt;
+};
+
struct hisi_zip_ctrl;
struct hisi_zip {
struct hisi_qm qm;
struct list_head list;
struct hisi_zip_ctrl *ctrl;
+ struct hisi_zip_dfx dfx;
};
struct hisi_zip_sqe {
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 369ec3220574..c73707c2e539 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -64,7 +64,6 @@ struct hisi_zip_req_q {
struct hisi_zip_qp_ctx {
struct hisi_qp *qp;
- struct hisi_zip_sqe zip_sqe;
struct hisi_zip_req_q req_q;
struct hisi_acc_sgl_pool *sgl_pool;
struct hisi_zip *zip_dev;
@@ -333,6 +332,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
{
struct hisi_zip_sqe *sqe = data;
struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct hisi_zip_req *req = req_q->q + sqe->tag;
struct acomp_req *acomp_req = req->req;
@@ -340,12 +340,14 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
u32 status, dlen, head_size;
int err = 0;
+ atomic64_inc(&dfx->recv_cnt);
status = sqe->dw3 & HZIP_BD_STATUS_M;
if (status != 0 && status != HZIP_NC_ERR) {
dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
sqe->produced);
+ atomic64_inc(&dfx->err_bd_cnt);
err = -EIO;
}
dlen = sqe->produced;
@@ -484,11 +486,12 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_qp_ctx *qp_ctx)
{
- struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
+ struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+ struct hisi_zip_sqe zip_sqe;
dma_addr_t input;
dma_addr_t output;
int ret;
@@ -511,15 +514,18 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
}
req->dma_dst = output;
- hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen,
+ hisi_zip_fill_sqe(&zip_sqe, qp->req_type, input, output, a_req->slen,
a_req->dlen, req->sskip, req->dskip);
- hisi_zip_config_buf_type(zip_sqe, HZIP_SGL);
- hisi_zip_config_tag(zip_sqe, req->req_id);
+ hisi_zip_config_buf_type(&zip_sqe, HZIP_SGL);
+ hisi_zip_config_tag(&zip_sqe, req->req_id);
/* send command to start a task */
- ret = hisi_qp_send(qp, zip_sqe);
- if (ret < 0)
+ atomic64_inc(&dfx->send_cnt);
+ ret = hisi_qp_send(qp, &zip_sqe);
+ if (ret < 0) {
+ atomic64_inc(&dfx->send_busy_cnt);
goto err_unmap_output;
+ }
return -EINPROGRESS;
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index fcc85d2dbd07..2229a21ae7c8 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -62,6 +62,7 @@
#define HZIP_CORE_INT_SOURCE 0x3010A0
#define HZIP_CORE_INT_MASK_REG 0x3010A4
+#define HZIP_CORE_INT_SET 0x3010A8
#define HZIP_CORE_INT_STATUS 0x3010AC
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
@@ -83,8 +84,13 @@
#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
+#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
+#define HZIP_WR_PORT BIT(11)
#define HZIP_BUF_SIZE 22
+#define HZIP_SQE_MASK_OFFSET 64
+#define HZIP_SQE_MASK_LEN 48
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
@@ -95,6 +101,18 @@ struct hisi_zip_hw_error {
const char *msg;
};
+struct zip_dfx_item {
+ const char *name;
+ u32 offset;
+};
+
+static struct zip_dfx_item zip_dfx_files[] = {
+ {"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)},
+ {"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)},
+ {"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)},
+ {"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)},
+};
+
static const struct hisi_zip_hw_error zip_hw_error[] = {
{ .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" },
{ .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" },
@@ -134,7 +152,6 @@ struct ctrl_debug_file {
* Just relevant for PF.
*/
struct hisi_zip_ctrl {
- u32 num_vfs;
struct hisi_zip *hisi_zip;
struct dentry *debug_root;
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
@@ -162,7 +179,7 @@ static const u64 core_offsets[] = {
[HZIP_DECOMP_CORE5] = 0x309000,
};
-static struct debugfs_reg32 hzip_dfx_regs[] = {
+static const struct debugfs_reg32 hzip_dfx_regs[] = {
{"HZIP_GET_BD_NUM ", 0x00ull},
{"HZIP_GET_RIGHT_BD ", 0x04ull},
{"HZIP_GET_ERROR_BD ", 0x08ull},
@@ -189,38 +206,7 @@ static struct debugfs_reg32 hzip_dfx_regs[] = {
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- PCI_DEVICE_ID_ZIP_PF, NULL);
- u32 n, q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- if (!pdev) {
- q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2);
- pr_info("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- switch (rev_id) {
- case QM_HW_V1:
- q_num = HZIP_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = HZIP_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n > q_num || n == 0)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
}
static const struct kernel_param_ops pf_q_num_ops = {
@@ -232,9 +218,14 @@ static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
+ .get = param_get_int,
+};
+
static u32 vfs_num;
-module_param(vfs_num, uint, 0444);
-MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)");
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
static const struct pci_device_id hisi_zip_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
@@ -250,9 +241,9 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num)
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
-static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
+static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
- void __iomem *base = hisi_zip->qm.io_base;
+ void __iomem *base = qm->io_base;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -279,7 +270,7 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
- if (hisi_zip->qm.use_sva) {
+ if (qm->use_sva) {
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
} else {
@@ -295,10 +286,14 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+
+ return 0;
}
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
+ u32 val;
+
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
qm->io_base + HZIP_CORE_INT_MASK_REG);
@@ -317,12 +312,24 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ /* enable ZIP block master OOO when m-bit error occur */
+ val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ val = val | HZIP_AXI_SHUTDOWN_ENABLE;
+ writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
+ u32 val;
+
/* disable ZIP hw error interrupts */
writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ /* disable ZIP block master OOO when m-bit error occur */
+ val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ val = val & ~HZIP_AXI_SHUTDOWN_ENABLE;
+ writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -342,21 +349,20 @@ static u32 current_qm_read(struct ctrl_debug_file *file)
static int current_qm_write(struct ctrl_debug_file *file, u32 val)
{
struct hisi_qm *qm = file_to_qm(file);
- struct hisi_zip_ctrl *ctrl = file->ctrl;
u32 vfq_num;
u32 tmp;
- if (val > ctrl->num_vfs)
+ if (val > qm->vfs_num)
return -EINVAL;
/* Calculate curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / ctrl->num_vfs;
- if (val == ctrl->num_vfs)
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
+ if (val == qm->vfs_num)
qm->debug.curr_qm_qp_num = qm->ctrl_qp_num -
- qm->qp_num - (ctrl->num_vfs - 1) * vfq_num;
+ qm->qp_num - (qm->vfs_num - 1) * vfq_num;
else
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -477,6 +483,27 @@ static const struct file_operations ctrl_debug_fops = {
.write = ctrl_debug_write,
};
+
+static int zip_debugfs_atomic64_set(void *data, u64 val)
+{
+ if (val)
+ return -EINVAL;
+
+ atomic64_set((atomic64_t *)data, 0);
+
+ return 0;
+}
+
+static int zip_debugfs_atomic64_get(void *data, u64 *val)
+{
+ *val = atomic64_read((atomic64_t *)data);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
+ zip_debugfs_atomic64_set, "%llu\n");
+
static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
{
struct hisi_zip *hisi_zip = ctrl->hisi_zip;
@@ -508,6 +535,25 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
return 0;
}
+static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
+{
+ struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
+ struct hisi_zip_dfx *dfx = &zip->dfx;
+ struct dentry *tmp_dir;
+ void *data;
+ int i;
+
+ tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root);
+ for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) {
+ data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset);
+ debugfs_create_file(zip_dfx_files[i].name,
+ 0644,
+ tmp_dir,
+ data,
+ &zip_atomic64_ops);
+ }
+}
+
static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
{
int i;
@@ -534,6 +580,8 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
+ qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
qm->debug.debug_root = dev_d;
ret = hisi_qm_debug_init(qm);
if (ret)
@@ -546,6 +594,8 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
goto failed_to_create;
}
+ hisi_zip_dfx_debug_init(qm);
+
return 0;
failed_to_create:
@@ -598,8 +648,6 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
}
err++;
}
-
- writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
}
static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
@@ -607,17 +655,55 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
return readl(qm->io_base + HZIP_CORE_INT_STATUS);
}
+static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+}
+
+static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE,
+ qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
+ qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+}
+
+static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 nfe_enb;
+
+ /* Disable ECC Mbit error report. */
+ nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC,
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+
+ /* Inject zip ECC Mbit error to block master ooo. */
+ writel(HZIP_CORE_INT_STATUS_M_ECC,
+ qm->io_base + HZIP_CORE_INT_SET);
+}
+
static const struct hisi_qm_err_ini hisi_zip_err_ini = {
+ .hw_init = hisi_zip_set_user_domain_and_cache,
.hw_err_enable = hisi_zip_hw_error_enable,
.hw_err_disable = hisi_zip_hw_error_disable,
.get_dev_hw_err_status = hisi_zip_get_hw_err_status,
+ .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
.log_dev_hw_err = hisi_zip_log_hw_error,
+ .open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
+ .close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
.err_info = {
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE |
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
- .msi = QM_DB_RANDOM_INVALID,
+ .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC,
+ .msi_wr_port = HZIP_WR_PORT,
+ .acpi_rst = "ZRST",
}
};
@@ -633,177 +719,85 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl;
ctrl->hisi_zip = hisi_zip;
- switch (qm->ver) {
- case QM_HW_V1:
+ if (qm->ver == QM_HW_V1)
qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1;
- break;
-
- case QM_HW_V2:
+ else
qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2;
- break;
-
- default:
- return -EINVAL;
- }
qm->err_ini = &hisi_zip_err_ini;
- hisi_zip_set_user_domain_and_cache(hisi_zip);
+ hisi_zip_set_user_domain_and_cache(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(hisi_zip);
return 0;
}
-/* Currently we only support equal assignment */
-static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs)
+static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- struct hisi_qm *qm = &hisi_zip->qm;
- u32 qp_num = qm->qp_num;
- u32 q_base = qp_num;
- u32 q_num, remain_q_num, i;
- int ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_qp_num - qp_num;
- if (remain_q_num < num_vfs)
- return -EINVAL;
+ qm->pdev = pdev;
+ qm->ver = pdev->revision;
+ qm->algs = "zlib\ngzip";
+ qm->sqe_size = HZIP_SQE_SIZE;
+ qm->dev_name = hisi_zip_name;
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
- if (ret)
- return ret;
- q_base += q_num;
+ qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ?
+ QM_HW_PF : QM_HW_VF;
+ if (qm->fun_type == QM_HW_PF) {
+ qm->qp_base = HZIP_PF_DEF_Q_BASE;
+ qm->qp_num = pf_q_num;
+ qm->qm_list = &zip_devices;
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+ * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
+ * to trigger only one VF in v1 hardware.
+ *
+ * v2 hardware has no such problem.
+ */
+ qm->qp_base = HZIP_PF_DEF_Q_NUM;
+ qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
}
- return 0;
+ return hisi_qm_init(qm);
}
-static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip)
+static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
{
- struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl;
struct hisi_qm *qm = &hisi_zip->qm;
- u32 i, num_vfs = ctrl->num_vfs;
int ret;
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (qm->fun_type == QM_HW_PF) {
+ ret = hisi_zip_pf_probe_init(hisi_zip);
if (ret)
return ret;
}
- ctrl->num_vfs = 0;
-
return 0;
}
-static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- int pre_existing_vfs, num_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
-
- if (pre_existing_vfs) {
- dev_err(&pdev->dev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(int, max_vfs, HZIP_VF_NUM);
-
- ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- hisi_zip->ctrl->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't enable VF!\n");
- hisi_zip_clear_vft_config(hisi_zip);
- return ret;
- }
-
- return num_vfs;
-}
-
-static int hisi_zip_sriov_disable(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- dev_err(&pdev->dev,
- "Can't disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- /* remove in hisi_zip_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
-
- return hisi_zip_clear_vft_config(hisi_zip);
-}
-
static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_zip *hisi_zip;
- enum qm_hw_ver rev_id;
struct hisi_qm *qm;
int ret;
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -EINVAL;
-
hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
if (!hisi_zip)
return -ENOMEM;
- pci_set_drvdata(pdev, hisi_zip);
qm = &hisi_zip->qm;
- qm->use_dma_api = true;
- qm->pdev = pdev;
- qm->ver = rev_id;
- qm->algs = "zlib\ngzip";
- qm->sqe_size = HZIP_SQE_SIZE;
- qm->dev_name = hisi_zip_name;
- qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
- QM_HW_VF;
- ret = hisi_qm_init(qm);
+ ret = hisi_zip_qm_init(qm, pdev);
if (ret) {
- dev_err(&pdev->dev, "Failed to init qm!\n");
+ pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret);
return ret;
}
- if (qm->fun_type == QM_HW_PF) {
- ret = hisi_zip_pf_probe_init(hisi_zip);
- if (ret)
- return ret;
-
- qm->qp_base = HZIP_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- } else if (qm->fun_type == QM_HW_VF) {
- /*
- * have no way to get qm configure in VM in v1 hardware,
- * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
- * to trigger only one VF in v1 hardware.
- *
- * v2 hardware has no such problem.
- */
- if (qm->ver == QM_HW_V1) {
- qm->qp_base = HZIP_PF_DEF_Q_NUM;
- qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
- } else if (qm->ver == QM_HW_V2)
- /* v2 starts to support get vft by mailbox */
- hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ ret = hisi_zip_probe_init(hisi_zip);
+ if (ret) {
+ pci_err(pdev, "Failed to probe (%d)!\n", ret);
+ goto err_qm_uninit;
}
ret = hisi_qm_start(qm);
@@ -823,7 +817,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
- ret = hisi_zip_sriov_enable(pdev, vfs_num);
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
goto err_remove_from_list;
}
@@ -836,15 +830,8 @@ err_remove_from_list:
hisi_qm_stop(qm);
err_qm_uninit:
hisi_qm_uninit(qm);
- return ret;
-}
-static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- if (num_vfs == 0)
- return hisi_zip_sriov_disable(pdev);
- else
- return hisi_zip_sriov_enable(pdev, num_vfs);
+ return ret;
}
static void hisi_zip_remove(struct pci_dev *pdev)
@@ -852,8 +839,8 @@ static void hisi_zip_remove(struct pci_dev *pdev)
struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
struct hisi_qm *qm = &hisi_zip->qm;
- if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0)
- hisi_zip_sriov_disable(pdev);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ hisi_qm_sriov_disable(pdev);
hisi_zip_debugfs_exit(hisi_zip);
hisi_qm_stop(qm);
@@ -865,6 +852,9 @@ static void hisi_zip_remove(struct pci_dev *pdev)
static const struct pci_error_handlers hisi_zip_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_zip_pci_driver = {
@@ -873,7 +863,7 @@ static struct pci_driver hisi_zip_pci_driver = {
.probe = hisi_zip_probe,
.remove = hisi_zip_remove,
.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
- hisi_zip_sriov_configure : NULL,
+ hisi_qm_sriov_configure : NULL,
.err_handler = &hisi_zip_err_handler,
};
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_main.c b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
index 200fb3303db0..34bb3063eb70 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
@@ -79,13 +79,13 @@ static int otx_cpt_device_init(struct otx_cpt_device *cpt)
/* Check BIST status */
bist = (u64)otx_cpt_check_bist_status(cpt);
if (bist) {
- dev_err(dev, "RAM BIST failed with code 0x%llx", bist);
+ dev_err(dev, "RAM BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}
bist = otx_cpt_check_exe_bist_status(cpt);
if (bist) {
- dev_err(dev, "Engine BIST failed with code 0x%llx", bist);
+ dev_err(dev, "Engine BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
index a6774232e9a3..a9e3de65875a 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
@@ -63,11 +63,11 @@ static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
if (vf_id >= 0)
- pr_debug("MBOX opcode %s received from VF%d raw_data %s",
+ pr_debug("MBOX opcode %s received from VF%d raw_data %s\n",
get_mbox_opcode_str(mbox_msg->msg), vf_id,
raw_data_str);
else
- pr_debug("MBOX opcode %s received from PF raw_data %s",
+ pr_debug("MBOX opcode %s received from PF raw_data %s\n",
get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
}
@@ -140,20 +140,20 @@ static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
struct otx_cpt_ucode *ucode;
if (q >= cpt->max_vfs) {
- dev_err(dev, "Requested queue %d is > than maximum avail %d",
+ dev_err(dev, "Requested queue %d is > than maximum avail %d\n",
q, cpt->max_vfs);
return -EINVAL;
}
if (grp >= OTX_CPT_MAX_ENGINE_GROUPS) {
- dev_err(dev, "Requested group %d is > than maximum avail %d",
+ dev_err(dev, "Requested group %d is > than maximum avail %d\n",
grp, OTX_CPT_MAX_ENGINE_GROUPS);
return -EINVAL;
}
eng_grp = &cpt->eng_grps.grp[grp];
if (!eng_grp->is_enabled) {
- dev_err(dev, "Requested engine group %d is disabled", grp);
+ dev_err(dev, "Requested engine group %d is disabled\n", grp);
return -EINVAL;
}
@@ -212,7 +212,7 @@ static void otx_cpt_handle_mbox_intr(struct otx_cpt_device *cpt, int vf)
vftype = otx_cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
if ((vftype != OTX_CPT_AE_TYPES) &&
(vftype != OTX_CPT_SE_TYPES)) {
- dev_err(dev, "VF%d binding to eng group %llu failed",
+ dev_err(dev, "VF%d binding to eng group %llu failed\n",
vf, mbx.data);
otx_cptpf_mbox_send_nack(cpt, vf, &mbx);
} else {
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index d04baa319592..fec8f3b9b112 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -62,7 +62,7 @@ static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
int i;
if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
- dev_err(dev, "unsupported number of engines %d on octeontx",
+ dev_err(dev, "unsupported number of engines %d on octeontx\n",
eng_grp->g->engs_num);
return bmap;
}
@@ -78,7 +78,7 @@ static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
}
if (!found)
- dev_err(dev, "No engines reserved for engine group %d",
+ dev_err(dev, "No engines reserved for engine group %d\n",
eng_grp->idx);
return bmap;
}
@@ -306,7 +306,7 @@ static int process_tar_file(struct device *dev,
ucode_size = ntohl(ucode_hdr->code_length) * 2;
if (!ucode_size || (size < round_up(ucode_size, 16) +
sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
- dev_err(dev, "Ucode %s invalid size", filename);
+ dev_err(dev, "Ucode %s invalid size\n", filename);
return -EINVAL;
}
@@ -379,18 +379,18 @@ static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
{
struct tar_ucode_info_t *curr;
- pr_debug("Tar archive filename %s", tar_filename);
- pr_debug("Tar archive pointer %p, size %ld", tar_arch->fw->data,
+ pr_debug("Tar archive filename %s\n", tar_filename);
+ pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data,
tar_arch->fw->size);
list_for_each_entry(curr, &tar_arch->ucodes, list) {
- pr_debug("Ucode filename %s", curr->ucode.filename);
- pr_debug("Ucode version string %s", curr->ucode.ver_str);
- pr_debug("Ucode version %d.%d.%d.%d",
+ pr_debug("Ucode filename %s\n", curr->ucode.filename);
+ pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d\n",
curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
- pr_debug("Ucode type (%d) %s", curr->ucode.type,
+ pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
get_ucode_type_str(curr->ucode.type));
- pr_debug("Ucode size %d", curr->ucode.size);
+ pr_debug("Ucode size %d\n", curr->ucode.size);
pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
}
}
@@ -417,14 +417,14 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev,
goto release_tar_arch;
if (tar_arch->fw->size < TAR_BLOCK_LEN) {
- dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
tar_size = tar_arch->fw->size;
tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
- dev_err(dev, "Unsupported format of tar archive %s",
+ dev_err(dev, "Unsupported format of tar archive %s\n",
tar_filename);
goto release_tar_arch;
}
@@ -437,7 +437,7 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev,
if (tar_offs + cur_size > tar_size ||
tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
- dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
@@ -458,7 +458,7 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev,
/* Check for the end of the archive */
if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
- dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ dev_err(dev, "Invalid tar archive %s\n", tar_filename);
goto release_tar_arch;
}
@@ -563,13 +563,13 @@ static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
{
- pr_debug("Ucode info");
- pr_debug("Ucode version string %s", ucode->ver_str);
- pr_debug("Ucode version %d.%d.%d.%d", ucode->ver_num.nn,
+ pr_debug("Ucode info\n");
+ pr_debug("Ucode version string %s\n", ucode->ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn,
ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
- pr_debug("Ucode type %s", get_ucode_type_str(ucode->type));
- pr_debug("Ucode size %d", ucode->size);
- pr_debug("Ucode virt address %16.16llx", (u64)ucode->align_va);
+ pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type));
+ pr_debug("Ucode size %d\n", ucode->size);
+ pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va);
pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
}
@@ -600,19 +600,19 @@ static void print_dbg_info(struct device *dev,
u32 mask[4];
int i, j;
- pr_debug("Engine groups global info");
- pr_debug("max SE %d, max AE %d",
+ pr_debug("Engine groups global info\n");
+ pr_debug("max SE %d, max AE %d\n",
eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
- pr_debug("free SE %d", eng_grps->avail.se_cnt);
- pr_debug("free AE %d", eng_grps->avail.ae_cnt);
+ pr_debug("free SE %d\n", eng_grps->avail.se_cnt);
+ pr_debug("free AE %d\n", eng_grps->avail.ae_cnt);
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
- pr_debug("engine_group%d, state %s", i, grp->is_enabled ?
+ pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ?
"enabled" : "disabled");
if (grp->is_enabled) {
mirrored_grp = &eng_grps->grp[grp->mirror.idx];
- pr_debug("Ucode0 filename %s, version %s",
+ pr_debug("Ucode0 filename %s, version %s\n",
grp->mirror.is_ena ?
mirrored_grp->ucode[0].filename :
grp->ucode[0].filename,
@@ -626,18 +626,18 @@ static void print_dbg_info(struct device *dev,
if (engs->type) {
print_engs_info(grp, engs_info,
2*OTX_CPT_UCODE_NAME_LENGTH, j);
- pr_debug("Slot%d: %s", j, engs_info);
+ pr_debug("Slot%d: %s\n", j, engs_info);
bitmap_to_arr32(mask, engs->bmap,
eng_grps->engs_num);
- pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
+ pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n",
mask[3], mask[2], mask[1], mask[0]);
} else
- pr_debug("Slot%d not used", j);
+ pr_debug("Slot%d not used\n", j);
}
if (grp->is_enabled) {
cpt_print_engines_mask(grp, dev, engs_mask,
OTX_CPT_UCODE_NAME_LENGTH);
- pr_debug("Cmask: %s", engs_mask);
+ pr_debug("Cmask: %s\n", engs_mask);
}
}
}
@@ -766,7 +766,7 @@ static int check_engines_availability(struct device *dev,
if (avail_cnt < req_eng->count) {
dev_err(dev,
- "Error available %s engines %d < than requested %d",
+ "Error available %s engines %d < than requested %d\n",
get_eng_type_str(req_eng->type),
avail_cnt, req_eng->count);
return -EBUSY;
@@ -867,7 +867,7 @@ static int copy_ucode_to_dma_mem(struct device *dev,
OTX_CPT_UCODE_ALIGNMENT,
&ucode->dma, GFP_KERNEL);
if (!ucode->va) {
- dev_err(dev, "Unable to allocate space for microcode");
+ dev_err(dev, "Unable to allocate space for microcode\n");
return -ENOMEM;
}
ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
@@ -905,15 +905,15 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
ucode->size = ntohl(ucode_hdr->code_length) * 2;
if (!ucode->size || (fw->size < round_up(ucode->size, 16)
+ sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
- dev_err(dev, "Ucode %s invalid size", ucode_filename);
+ dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
ret = -EINVAL;
goto release_fw;
}
ret = get_ucode_type(ucode_hdr, &ucode->type);
if (ret) {
- dev_err(dev, "Microcode %s unknown type 0x%x", ucode->filename,
- ucode->type);
+ dev_err(dev, "Microcode %s unknown type 0x%x\n",
+ ucode->filename, ucode->type);
goto release_fw;
}
@@ -1083,7 +1083,7 @@ static int eng_grp_update_masks(struct device *dev,
break;
default:
- dev_err(dev, "Invalid engine type %d", engs->type);
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
return -EINVAL;
}
@@ -1142,13 +1142,14 @@ static int delete_engine_group(struct device *dev,
return -EINVAL;
if (eng_grp->mirror.ref_count) {
- dev_err(dev, "Can't delete engine_group%d as it is used by:",
+ dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):",
eng_grp->idx);
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
if (eng_grp->g->grp[i].mirror.is_ena &&
eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
- dev_err(dev, "engine_group%d", i);
+ pr_cont(" %d", i);
}
+ pr_cont("\n");
return -EINVAL;
}
@@ -1182,7 +1183,7 @@ static int validate_1_ucode_scenario(struct device *dev,
if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
engs[i].type)) {
dev_err(dev,
- "Microcode %s does not support %s engines",
+ "Microcode %s does not support %s engines\n",
eng_grp->ucode[0].filename,
get_eng_type_str(engs[i].type));
return -EINVAL;
@@ -1220,7 +1221,7 @@ static int create_engine_group(struct device *dev,
/* Validate if requested engine types are supported by this device */
for (i = 0; i < engs_cnt; i++)
if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
- dev_err(dev, "Device does not support %s engines",
+ dev_err(dev, "Device does not support %s engines\n",
get_eng_type_str(engs[i].type));
return -EPERM;
}
@@ -1228,7 +1229,7 @@ static int create_engine_group(struct device *dev,
/* Find engine group which is not used */
eng_grp = find_unused_eng_grp(eng_grps);
if (!eng_grp) {
- dev_err(dev, "Error all engine groups are being used");
+ dev_err(dev, "Error all engine groups are being used\n");
return -ENOSPC;
}
@@ -1298,11 +1299,11 @@ static int create_engine_group(struct device *dev,
eng_grp->is_enabled = true;
if (eng_grp->mirror.is_ena)
dev_info(dev,
- "Engine_group%d: reuse microcode %s from group %d",
+ "Engine_group%d: reuse microcode %s from group %d\n",
eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
mirrored_eng_grp->idx);
else
- dev_info(dev, "Engine_group%d: microcode loaded %s",
+ dev_info(dev, "Engine_group%d: microcode loaded %s\n",
eng_grp->idx, eng_grp->ucode[0].ver_str);
return 0;
@@ -1412,14 +1413,14 @@ static ssize_t ucode_load_store(struct device *dev,
} else {
if (del_grp_idx < 0 ||
del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
- dev_err(dev, "Invalid engine group index %d",
+ dev_err(dev, "Invalid engine group index %d\n",
del_grp_idx);
ret = -EINVAL;
return ret;
}
if (!eng_grps->grp[del_grp_idx].is_enabled) {
- dev_err(dev, "Error engine_group%d is not configured",
+ dev_err(dev, "Error engine_group%d is not configured\n",
del_grp_idx);
ret = -EINVAL;
return ret;
@@ -1568,7 +1569,7 @@ void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
udelay(CSR_DELAY);
reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
if (timeout--) {
- dev_warn(&cpt->pdev->dev, "Cores still busy");
+ dev_warn(&cpt->pdev->dev, "Cores still busy\n");
break;
}
}
@@ -1626,7 +1627,7 @@ int otx_cpt_init_eng_grps(struct pci_dev *pdev,
eng_grps->avail.max_ae_cnt;
if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
dev_err(&pdev->dev,
- "Number of engines %d > than max supported %d",
+ "Number of engines %d > than max supported %d\n",
eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
ret = -EINVAL;
goto err;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 06202bcffb33..60e744f680d3 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -1660,7 +1660,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
case OTX_CPT_SE_TYPES:
count = atomic_read(&se_devices.count);
if (count >= CPT_MAX_VF_NUM) {
- dev_err(&pdev->dev, "No space to add a new device");
+ dev_err(&pdev->dev, "No space to add a new device\n");
ret = -ENOSPC;
goto err;
}
@@ -1687,7 +1687,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
case OTX_CPT_AE_TYPES:
count = atomic_read(&ae_devices.count);
if (count >= CPT_MAX_VF_NUM) {
- dev_err(&pdev->dev, "No space to a add new device");
+ dev_err(&pdev->dev, "No space to a add new device\n");
ret = -ENOSPC;
goto err;
}
@@ -1728,7 +1728,7 @@ void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
}
if (!dev_found) {
- dev_err(&pdev->dev, "%s device not found", __func__);
+ dev_err(&pdev->dev, "%s device not found\n", __func__);
goto exit;
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index a91860b5dc77..ce3168327a39 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -584,7 +584,7 @@ static irqreturn_t cptvf_done_intr_handler(int __always_unused irq,
cptvf_write_vq_done_ack(cptvf, intr);
wqe = get_cptvf_vq_wqe(cptvf, 0);
if (unlikely(!wqe)) {
- dev_err(&pdev->dev, "No work to schedule for VF (%d)",
+ dev_err(&pdev->dev, "No work to schedule for VF (%d)\n",
cptvf->vfid);
return IRQ_NONE;
}
@@ -602,7 +602,7 @@ static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec)
if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
GFP_KERNEL)) {
dev_err(&pdev->dev,
- "Allocation failed for affinity_mask for VF %d",
+ "Allocation failed for affinity_mask for VF %d\n",
cptvf->vfid);
return;
}
@@ -691,7 +691,7 @@ static ssize_t vf_engine_group_store(struct device *dev,
return -EINVAL;
if (val >= OTX_CPT_MAX_ENGINE_GROUPS) {
- dev_err(dev, "Engine group >= than max available groups %d",
+ dev_err(dev, "Engine group >= than max available groups %d\n",
OTX_CPT_MAX_ENGINE_GROUPS);
return -EINVAL;
}
@@ -837,7 +837,7 @@ static int otx_cptvf_probe(struct pci_dev *pdev,
cptvf_misc_intr_handler, 0, "CPT VF misc intr",
cptvf);
if (err) {
- dev_err(dev, "Failed to request misc irq");
+ dev_err(dev, "Failed to request misc irq\n");
goto free_vectors;
}
@@ -854,7 +854,7 @@ static int otx_cptvf_probe(struct pci_dev *pdev,
cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE;
err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF);
if (err) {
- dev_err(dev, "cptvf_sw_init() failed");
+ dev_err(dev, "cptvf_sw_init() failed\n");
goto free_misc_irq;
}
/* Convey VQ LEN to PF */
@@ -946,7 +946,7 @@ static void otx_cptvf_remove(struct pci_dev *pdev)
/* Convey DOWN to PF */
if (otx_cptvf_send_vf_down(cptvf)) {
- dev_err(&pdev->dev, "PF not responding to DOWN msg");
+ dev_err(&pdev->dev, "PF not responding to DOWN msg\n");
} else {
sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group);
otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
index df839b880354..239195cccf93 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
@@ -314,7 +314,7 @@ static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
GFP_ATOMIC;
ret = setup_sgio_list(pdev, &info, req, gfp);
if (unlikely(ret)) {
- dev_err(&pdev->dev, "Setting up SG list failed");
+ dev_err(&pdev->dev, "Setting up SG list failed\n");
goto request_cleanup;
}
cpt_req->dlen = info->dlen;
@@ -410,17 +410,17 @@ int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
if (!otx_cpt_device_ready(cptvf)) {
- dev_err(&pdev->dev, "CPT Device is not ready");
+ dev_err(&pdev->dev, "CPT Device is not ready\n");
return -ENODEV;
}
if ((cptvf->vftype == OTX_CPT_SE_TYPES) && (!req->ctrl.s.se_req)) {
- dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request",
+ dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request\n",
cptvf->vfid);
return -EINVAL;
} else if ((cptvf->vftype == OTX_CPT_AE_TYPES) &&
(req->ctrl.s.se_req)) {
- dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request",
+ dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request\n",
cptvf->vfid);
return -EINVAL;
}
@@ -461,7 +461,7 @@ static int cpt_process_ccode(struct pci_dev *pdev,
/* check for timeout */
if (time_after_eq(jiffies, cpt_info->time_in +
OTX_CPT_COMMAND_TIMEOUT * HZ))
- dev_warn(&pdev->dev, "Request timed out 0x%p", req);
+ dev_warn(&pdev->dev, "Request timed out 0x%p\n", req);
else if (cpt_info->extra_time < OTX_CPT_TIME_IN_RESET_COUNT) {
cpt_info->time_in = jiffies;
cpt_info->extra_time++;
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index bd6309e57ab8..da3f0b8814aa 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -805,12 +805,9 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
size_t ds = crypto_shash_digestsize(bctx->shash);
int err, i;
- SHASH_DESC_ON_STACK(shash, bctx->shash);
-
- shash->tfm = bctx->shash;
-
if (keylen > bs) {
- err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
+ err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
+ bctx->ipad);
if (err)
return err;
keylen = ds;
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index f5c468f2cc82..6a828bbecea4 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -462,7 +462,6 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *child_shash = ctx->child_shash;
struct crypto_ahash *fallback_tfm;
- SHASH_DESC_ON_STACK(shash, child_shash);
int err, bs, ds;
fallback_tfm = ctx->base.fallback_tfm;
@@ -470,14 +469,12 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
if (err)
return err;
- shash->tfm = child_shash;
-
bs = crypto_shash_blocksize(child_shash);
ds = crypto_shash_digestsize(child_shash);
BUG_ON(ds > N2_HASH_KEY_MAX);
if (keylen > bs) {
- err = crypto_shash_digest(shash, key, keylen,
- ctx->hash_key);
+ err = crypto_shash_tfm_digest(child_shash, key, keylen,
+ ctx->hash_key);
if (err)
return err;
keylen = ds;
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index 015155da59c2..bc89a20e5d9d 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -15,4 +15,4 @@ obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compres
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
nx-compress-objs := nx-842.o
nx-compress-pseries-objs := nx-842-pseries.o
-nx-compress-powernv-objs := nx-842-powernv.o
+nx-compress-powernv-objs := nx-common-powernv.o
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index c037a2403b82..13c65deda8e9 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Driver for IBM PowerNV 842 compression accelerator
+ * Driver for IBM PowerNV compression accelerator
*
* Copyright (C) 2015 Dan Streetman, IBM Corp
*/
@@ -20,7 +20,7 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors");
+MODULE_DESCRIPTION("H/W Compression driver for IBM PowerNV processors");
MODULE_ALIAS_CRYPTO("842");
MODULE_ALIAS_CRYPTO("842-nx");
@@ -40,9 +40,9 @@ struct nx842_workmem {
char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */
} __packed __aligned(WORKMEM_ALIGN);
-struct nx842_coproc {
+struct nx_coproc {
unsigned int chip_id;
- unsigned int ct;
+ unsigned int ct; /* Can be 842 or GZIP high/normal*/
unsigned int ci; /* Coprocessor instance, used with icswx */
struct {
struct vas_window *rxwin;
@@ -58,9 +58,16 @@ struct nx842_coproc {
static DEFINE_PER_CPU(struct vas_window *, cpu_txwin);
/* no cpu hotplug on powernv, so this list never changes after init */
-static LIST_HEAD(nx842_coprocs);
+static LIST_HEAD(nx_coprocs);
static unsigned int nx842_ct; /* used in icswx function */
+/*
+ * Using same values as in skiboot or coprocessor type representing
+ * in NX workbook.
+ */
+#define NX_CT_GZIP (2) /* on P9 and later */
+#define NX_CT_842 (3)
+
static int (*nx842_powernv_exec)(const unsigned char *in,
unsigned int inlen, unsigned char *out,
unsigned int *outlenp, void *workmem, int fc);
@@ -666,15 +673,15 @@ static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
wmem, CCW_FC_842_DECOMP_CRC);
}
-static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc,
+static inline void nx_add_coprocs_list(struct nx_coproc *coproc,
int chipid)
{
coproc->chip_id = chipid;
INIT_LIST_HEAD(&coproc->list);
- list_add(&coproc->list, &nx842_coprocs);
+ list_add(&coproc->list, &nx_coprocs);
}
-static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
+static struct vas_window *nx_alloc_txwin(struct nx_coproc *coproc)
{
struct vas_window *txwin = NULL;
struct vas_tx_win_attr txattr;
@@ -685,7 +692,6 @@ static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
*/
vas_init_tx_win_attr(&txattr, coproc->ct);
txattr.lpid = 0; /* lpid is 0 for kernel requests */
- txattr.pid = 0; /* pid is 0 for kernel requests */
/*
* Open a VAS send window which is used to send request to NX.
@@ -704,9 +710,9 @@ static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
* cpu_txwin is used in copy/paste operation for each compression /
* decompression request.
*/
-static int nx842_open_percpu_txwins(void)
+static int nx_open_percpu_txwins(void)
{
- struct nx842_coproc *coproc, *n;
+ struct nx_coproc *coproc, *n;
unsigned int i, chip_id;
for_each_possible_cpu(i) {
@@ -714,17 +720,18 @@ static int nx842_open_percpu_txwins(void)
chip_id = cpu_to_chip_id(i);
- list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
+ list_for_each_entry_safe(coproc, n, &nx_coprocs, list) {
/*
* Kernel requests use only high priority FIFOs. So
* open send windows for these FIFOs.
+ * GZIP is not supported in kernel right now.
*/
if (coproc->ct != VAS_COP_TYPE_842_HIPRI)
continue;
if (coproc->chip_id == chip_id) {
- txwin = nx842_alloc_txwin(coproc);
+ txwin = nx_alloc_txwin(coproc);
if (IS_ERR(txwin))
return PTR_ERR(txwin);
@@ -743,13 +750,28 @@ static int nx842_open_percpu_txwins(void)
return 0;
}
+static int __init nx_set_ct(struct nx_coproc *coproc, const char *priority,
+ int high, int normal)
+{
+ if (!strcmp(priority, "High"))
+ coproc->ct = high;
+ else if (!strcmp(priority, "Normal"))
+ coproc->ct = normal;
+ else {
+ pr_err("Invalid RxFIFO priority value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
- int vasid, int *ct)
+ int vasid, int type, int *ct)
{
struct vas_window *rxwin = NULL;
struct vas_rx_win_attr rxattr;
- struct nx842_coproc *coproc;
u32 lpid, pid, tid, fifo_size;
+ struct nx_coproc *coproc;
u64 rx_fifo;
const char *priority;
int ret;
@@ -794,15 +816,15 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
if (!coproc)
return -ENOMEM;
- if (!strcmp(priority, "High"))
- coproc->ct = VAS_COP_TYPE_842_HIPRI;
- else if (!strcmp(priority, "Normal"))
- coproc->ct = VAS_COP_TYPE_842;
- else {
- pr_err("Invalid RxFIFO priority value\n");
- ret = -EINVAL;
+ if (type == NX_CT_842)
+ ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_842_HIPRI,
+ VAS_COP_TYPE_842);
+ else if (type == NX_CT_GZIP)
+ ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_GZIP_HIPRI,
+ VAS_COP_TYPE_GZIP);
+
+ if (ret)
goto err_out;
- }
vas_init_rx_win_attr(&rxattr, coproc->ct);
rxattr.rx_fifo = (void *)rx_fifo;
@@ -830,7 +852,7 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
coproc->vas.rxwin = rxwin;
coproc->vas.id = vasid;
- nx842_add_coprocs_list(coproc, chip_id);
+ nx_add_coprocs_list(coproc, chip_id);
/*
* (lpid, pid, tid) combination has to be unique for each
@@ -848,13 +870,47 @@ err_out:
return ret;
}
+static int __init nx_coproc_init(int chip_id, int ct_842, int ct_gzip)
+{
+ int ret = 0;
+
+ if (opal_check_token(OPAL_NX_COPROC_INIT)) {
+ ret = opal_nx_coproc_init(chip_id, ct_842);
+
+ if (!ret)
+ ret = opal_nx_coproc_init(chip_id, ct_gzip);
+
+ if (ret) {
+ ret = opal_error_code(ret);
+ pr_err("Failed to initialize NX for chip(%d): %d\n",
+ chip_id, ret);
+ }
+ } else
+ pr_warn("Firmware doesn't support NX initialization\n");
+
+ return ret;
+}
+
+static int __init find_nx_device_tree(struct device_node *dn, int chip_id,
+ int vasid, int type, char *devname,
+ int *ct)
+{
+ int ret = 0;
+
+ if (of_device_is_compatible(dn, devname)) {
+ ret = vas_cfg_coproc_info(dn, chip_id, vasid, type, ct);
+ if (ret)
+ of_node_put(dn);
+ }
+
+ return ret;
+}
-static int __init nx842_powernv_probe_vas(struct device_node *pn)
+static int __init nx_powernv_probe_vas(struct device_node *pn)
{
- struct device_node *dn;
int chip_id, vasid, ret = 0;
- int nx_fifo_found = 0;
- int uninitialized_var(ct);
+ int ct_842 = 0, ct_gzip = 0;
+ struct device_node *dn;
chip_id = of_get_ibm_chip_id(pn);
if (chip_id < 0) {
@@ -869,40 +925,33 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn)
}
for_each_child_of_node(pn, dn) {
- if (of_device_is_compatible(dn, "ibm,p9-nx-842")) {
- ret = vas_cfg_coproc_info(dn, chip_id, vasid, &ct);
- if (ret) {
- of_node_put(dn);
- return ret;
- }
- nx_fifo_found++;
- }
+ ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_842,
+ "ibm,p9-nx-842", &ct_842);
+
+ if (!ret)
+ ret = find_nx_device_tree(dn, chip_id, vasid,
+ NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip);
+
+ if (ret)
+ return ret;
}
- if (!nx_fifo_found) {
- pr_err("NX842 FIFO nodes are missing\n");
+ if (!ct_842 || !ct_gzip) {
+ pr_err("NX FIFO nodes are missing\n");
return -EINVAL;
}
/*
* Initialize NX instance for both high and normal priority FIFOs.
*/
- if (opal_check_token(OPAL_NX_COPROC_INIT)) {
- ret = opal_nx_coproc_init(chip_id, ct);
- if (ret) {
- pr_err("Failed to initialize NX for chip(%d): %d\n",
- chip_id, ret);
- ret = opal_error_code(ret);
- }
- } else
- pr_warn("Firmware doesn't support NX initialization\n");
+ ret = nx_coproc_init(chip_id, ct_842, ct_gzip);
return ret;
}
static int __init nx842_powernv_probe(struct device_node *dn)
{
- struct nx842_coproc *coproc;
+ struct nx_coproc *coproc;
unsigned int ct, ci;
int chip_id;
@@ -922,13 +971,13 @@ static int __init nx842_powernv_probe(struct device_node *dn)
return -EINVAL;
}
- coproc = kmalloc(sizeof(*coproc), GFP_KERNEL);
+ coproc = kzalloc(sizeof(*coproc), GFP_KERNEL);
if (!coproc)
return -ENOMEM;
coproc->ct = ct;
coproc->ci = ci;
- nx842_add_coprocs_list(coproc, chip_id);
+ nx_add_coprocs_list(coproc, chip_id);
pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci);
@@ -941,9 +990,9 @@ static int __init nx842_powernv_probe(struct device_node *dn)
return 0;
}
-static void nx842_delete_coprocs(void)
+static void nx_delete_coprocs(void)
{
- struct nx842_coproc *coproc, *n;
+ struct nx_coproc *coproc, *n;
struct vas_window *txwin;
int i;
@@ -955,10 +1004,10 @@ static void nx842_delete_coprocs(void)
if (txwin)
vas_win_close(txwin);
- per_cpu(cpu_txwin, i) = 0;
+ per_cpu(cpu_txwin, i) = NULL;
}
- list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
+ list_for_each_entry_safe(coproc, n, &nx_coprocs, list) {
if (coproc->vas.rxwin)
vas_win_close(coproc->vas.rxwin);
@@ -1002,7 +1051,7 @@ static struct crypto_alg nx842_powernv_alg = {
.coa_decompress = nx842_crypto_decompress } }
};
-static __init int nx842_powernv_init(void)
+static __init int nx_compress_powernv_init(void)
{
struct device_node *dn;
int ret;
@@ -1017,15 +1066,15 @@ static __init int nx842_powernv_init(void)
BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
for_each_compatible_node(dn, NULL, "ibm,power9-nx") {
- ret = nx842_powernv_probe_vas(dn);
+ ret = nx_powernv_probe_vas(dn);
if (ret) {
- nx842_delete_coprocs();
+ nx_delete_coprocs();
of_node_put(dn);
return ret;
}
}
- if (list_empty(&nx842_coprocs)) {
+ if (list_empty(&nx_coprocs)) {
for_each_compatible_node(dn, NULL, "ibm,power-nx")
nx842_powernv_probe(dn);
@@ -1034,9 +1083,25 @@ static __init int nx842_powernv_init(void)
nx842_powernv_exec = nx842_exec_icswx;
} else {
- ret = nx842_open_percpu_txwins();
+ /*
+ * Register VAS user space API for NX GZIP so
+ * that user space can use GZIP engine.
+ * Using high FIFO priority for kernel requests and
+ * normal FIFO priority is assigned for userspace.
+ * 842 compression is supported only in kernel.
+ */
+ ret = vas_register_coproc_api(THIS_MODULE, VAS_COP_TYPE_GZIP,
+ "nx-gzip");
+
+ /*
+ * GZIP is not supported in kernel right now.
+ * So open tx windows only for 842.
+ */
+ if (!ret)
+ ret = nx_open_percpu_txwins();
+
if (ret) {
- nx842_delete_coprocs();
+ nx_delete_coprocs();
return ret;
}
@@ -1045,18 +1110,27 @@ static __init int nx842_powernv_init(void)
ret = crypto_register_alg(&nx842_powernv_alg);
if (ret) {
- nx842_delete_coprocs();
+ nx_delete_coprocs();
return ret;
}
return 0;
}
-module_init(nx842_powernv_init);
+module_init(nx_compress_powernv_init);
-static void __exit nx842_powernv_exit(void)
+static void __exit nx_compress_powernv_exit(void)
{
+ /*
+ * GZIP engine is supported only in power9 or later and nx842_ct
+ * is used on power8 (icswx).
+ * VAS API for NX GZIP is registered during init for user space
+ * use. So delete this API use for GZIP engine.
+ */
+ if (!nx842_ct)
+ vas_unregister_coproc_api();
+
crypto_unregister_alg(&nx842_powernv_alg);
- nx842_delete_coprocs();
+ nx_delete_coprocs();
}
-module_exit(nx842_powernv_exit);
+module_exit(nx_compress_powernv_exit);
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 32dc00dc570b..9f937bdc53a7 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -77,7 +77,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
tag = (u8 *)rctx->auth_tag;
for (i = 0; i < dd->authsize; i++) {
if (tag[i]) {
- dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
ret = -EBADMSG;
}
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 824ddf2a66ff..b5aff20c5900 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1269,13 +1269,17 @@ static int omap_aes_remove(struct platform_device *pdev)
spin_unlock(&list_lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
- for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
+ dd->pdata->algs_info[i].registered--;
+ }
- for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
+ for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
crypto_unregister_aead(aalg);
+ dd->pdata->aead_algs_info->registered--;
+
}
crypto_engine_exit(dd->engine);
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index cc88b7362bc2..94b2dba90f0d 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -178,11 +178,17 @@ static void omap_crypto_copy_data(struct scatterlist *src,
amt = min(src->length - srco, dst->length - dsto);
amt = min(len, amt);
- srcb = sg_virt(src) + srco;
- dstb = sg_virt(dst) + dsto;
+ srcb = kmap_atomic(sg_page(src)) + srco + src->offset;
+ dstb = kmap_atomic(sg_page(dst)) + dsto + dst->offset;
memcpy(dstb, srcb, amt);
+ if (!PageSlab(sg_page(dst)))
+ flush_kernel_dcache_page(sg_page(dst));
+
+ kunmap_atomic(srcb);
+ kunmap_atomic(dstb);
+
srco += amt;
dsto += amt;
len -= amt;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index e4072cd38585..82691a057d2a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -33,7 +33,6 @@
#include <linux/of_irq.h>
#include <linux/delay.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/sha.h>
@@ -169,8 +168,6 @@ struct omap_sham_hmac_ctx {
};
struct omap_sham_ctx {
- struct omap_sham_dev *dd;
-
unsigned long flags;
/* fallback stuff */
@@ -751,8 +748,17 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
int offset = rctx->offset;
int bufcnt = rctx->bufcnt;
- if (!sg || !sg->length || !nbytes)
+ if (!sg || !sg->length || !nbytes) {
+ if (bufcnt) {
+ bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
+ sg_init_table(rctx->sgl, 1);
+ sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
+ rctx->sg = rctx->sgl;
+ rctx->sg_len = 1;
+ }
+
return 0;
+ }
new_len = nbytes;
@@ -896,7 +902,7 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
if (hash_later < 0)
hash_later = 0;
- if (hash_later) {
+ if (hash_later && hash_later <= rctx->buflen) {
scatterwalk_map_and_copy(rctx->buffer,
req->src,
req->nbytes - hash_later,
@@ -926,27 +932,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
return 0;
}
+struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
+{
+ struct omap_sham_dev *dd;
+
+ if (ctx->dd)
+ return ctx->dd;
+
+ spin_lock_bh(&sham.lock);
+ dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
+ list_move_tail(&dd->list, &sham.dev_list);
+ ctx->dd = dd;
+ spin_unlock_bh(&sham.lock);
+
+ return dd;
+}
+
static int omap_sham_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- struct omap_sham_dev *dd = NULL, *tmp;
+ struct omap_sham_dev *dd;
int bs = 0;
- spin_lock_bh(&sham.lock);
- if (!tctx->dd) {
- list_for_each_entry(tmp, &sham.dev_list, list) {
- dd = tmp;
- break;
- }
- tctx->dd = dd;
- } else {
- dd = tctx->dd;
- }
- spin_unlock_bh(&sham.lock);
+ ctx->dd = NULL;
- ctx->dd = dd;
+ dd = omap_sham_find_dev(ctx);
+ if (!dd)
+ return -ENODEV;
ctx->flags = 0;
@@ -1216,8 +1230,7 @@ err1:
static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct omap_sham_dev *dd = tctx->dd;
+ struct omap_sham_dev *dd = ctx->dd;
ctx->op = op;
@@ -1227,7 +1240,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
static int omap_sham_update(struct ahash_request *req)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- struct omap_sham_dev *dd = ctx->dd;
+ struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
if (!req->nbytes)
return 0;
@@ -1245,16 +1258,6 @@ static int omap_sham_update(struct ahash_request *req)
return omap_sham_enqueue(req, OP_UPDATE);
}
-static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
- const u8 *data, unsigned int len, u8 *out)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
-
- shash->tfm = tfm;
-
- return crypto_shash_digest(shash, data, len, out);
-}
-
static int omap_sham_final_shash(struct ahash_request *req)
{
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
@@ -1270,9 +1273,8 @@ static int omap_sham_final_shash(struct ahash_request *req)
!test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
offset = get_block_size(ctx);
- return omap_sham_shash_digest(tctx->fallback, req->base.flags,
- ctx->buffer + offset,
- ctx->bufcnt - offset, req->result);
+ return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
+ ctx->bufcnt - offset, req->result);
}
static int omap_sham_final(struct ahash_request *req)
@@ -1331,29 +1333,15 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
struct omap_sham_hmac_ctx *bctx = tctx->base;
int bs = crypto_shash_blocksize(bctx->shash);
int ds = crypto_shash_digestsize(bctx->shash);
- struct omap_sham_dev *dd = NULL, *tmp;
int err, i;
- spin_lock_bh(&sham.lock);
- if (!tctx->dd) {
- list_for_each_entry(tmp, &sham.dev_list, list) {
- dd = tmp;
- break;
- }
- tctx->dd = dd;
- } else {
- dd = tctx->dd;
- }
- spin_unlock_bh(&sham.lock);
-
err = crypto_shash_setkey(tctx->fallback, key, keylen);
if (err)
return err;
if (keylen > bs) {
- err = omap_sham_shash_digest(bctx->shash,
- crypto_shash_get_flags(bctx->shash),
- key, keylen, bctx->ipad);
+ err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
+ bctx->ipad);
if (err)
return err;
keylen = ds;
@@ -1363,7 +1351,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
memset(bctx->ipad + keylen, 0, bs - keylen);
- if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
+ if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
memcpy(bctx->opad, bctx->ipad, bs);
for (i = 0; i < bs; i++) {
@@ -1584,7 +1572,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha224",
.cra_driver_name = "omap-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1605,7 +1594,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "omap-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1627,7 +1617,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "omap-hmac-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1650,7 +1641,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "omap-hmac-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1675,7 +1667,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha384",
.cra_driver_name = "omap-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1696,7 +1689,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha512",
.cra_driver_name = "omap-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1718,7 +1712,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "omap-hmac-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1741,7 +1736,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "omap-hmac-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -2167,6 +2163,7 @@ static int omap_sham_probe(struct platform_device *pdev)
}
dd->flags |= dd->pdata->flags;
+ sham.flags |= dd->pdata->flags;
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
@@ -2194,6 +2191,9 @@ static int omap_sham_probe(struct platform_device *pdev)
spin_unlock(&sham.lock);
for (i = 0; i < dd->pdata->algs_info_size; i++) {
+ if (dd->pdata->algs_info[i].registered)
+ break;
+
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
struct ahash_alg *alg;
@@ -2245,9 +2245,11 @@ static int omap_sham_remove(struct platform_device *pdev)
list_del(&dd->list);
spin_unlock(&sham.lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
- for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
crypto_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
+ dd->pdata->algs_info[i].registered--;
+ }
tasklet_kill(&dd->done_task);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 2a16800d2579..341433fbcc4a 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1521,37 +1521,6 @@ static int s5p_hash_update(struct ahash_request *req)
}
/**
- * s5p_hash_shash_digest() - calculate shash digest
- * @tfm: crypto transformation
- * @flags: tfm flags
- * @data: input data
- * @len: length of data
- * @out: output buffer
- */
-static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
- const u8 *data, unsigned int len, u8 *out)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
-
- shash->tfm = tfm;
-
- return crypto_shash_digest(shash, data, len, out);
-}
-
-/**
- * s5p_hash_final_shash() - calculate shash digest
- * @req: AHASH request
- */
-static int s5p_hash_final_shash(struct ahash_request *req)
-{
- struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
-
- return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
- ctx->buffer, ctx->bufcnt, req->result);
-}
-
-/**
* s5p_hash_final() - close up hash and calculate digest
* @req: AHASH request
*
@@ -1582,8 +1551,12 @@ static int s5p_hash_final(struct ahash_request *req)
if (ctx->error)
return -EINVAL; /* uncompleted hash is not needed */
- if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
- return s5p_hash_final_shash(req);
+ if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+
+ return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
+ ctx->bufcnt, req->result);
+ }
return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
}
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 8e92e4ac79f1..3ba41148c2a4 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -28,18 +28,23 @@
/* Registers values */
#define CRC_CR_RESET BIT(0)
-#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
-#define CRC_INIT_DEFAULT 0xFFFFFFFF
+#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
+#define CRC_CR_REV_IN_BYTE BIT(5)
+#define CRC_CR_REV_OUT BIT(7)
+#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
#define CRC_AUTOSUSPEND_DELAY 50
+static unsigned int burst_size;
+module_param(burst_size, uint, 0644);
+MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)");
+
struct stm32_crc {
struct list_head list;
struct device *dev;
void __iomem *regs;
struct clk *clk;
- u8 pending_data[sizeof(u32)];
- size_t nb_pending_bytes;
+ spinlock_t lock;
};
struct stm32_crc_list {
@@ -59,14 +64,13 @@ struct stm32_crc_ctx {
struct stm32_crc_desc_ctx {
u32 partial; /* crc32c: partial in first 4 bytes of that struct */
- struct stm32_crc *crc;
};
static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
- mctx->key = CRC_INIT_DEFAULT;
+ mctx->key = 0;
mctx->poly = CRC32_POLY_LE;
return 0;
}
@@ -75,7 +79,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
- mctx->key = CRC_INIT_DEFAULT;
+ mctx->key = CRC32C_INIT_DEFAULT;
mctx->poly = CRC32C_POLY_LE;
return 0;
}
@@ -92,87 +96,135 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
return 0;
}
+static struct stm32_crc *stm32_crc_get_next_crc(void)
+{
+ struct stm32_crc *crc;
+
+ spin_lock_bh(&crc_list.lock);
+ crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
+ if (crc)
+ list_move_tail(&crc->list, &crc_list.dev_list);
+ spin_unlock_bh(&crc_list.lock);
+
+ return crc;
+}
+
static int stm32_crc_init(struct shash_desc *desc)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
+ unsigned long flags;
- spin_lock_bh(&crc_list.lock);
- list_for_each_entry(crc, &crc_list.dev_list, list) {
- ctx->crc = crc;
- break;
- }
- spin_unlock_bh(&crc_list.lock);
+ crc = stm32_crc_get_next_crc();
+ if (!crc)
+ return -ENODEV;
+
+ pm_runtime_get_sync(crc->dev);
- pm_runtime_get_sync(ctx->crc->dev);
+ spin_lock_irqsave(&crc->lock, flags);
/* Reset, set key, poly and configure in bit reverse mode */
- writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
- writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
- writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
+ writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
+ writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
+ writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
/* Store partial result */
- ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
- ctx->crc->nb_pending_bytes = 0;
+ ctx->partial = readl_relaxed(crc->regs + CRC_DR);
- pm_runtime_mark_last_busy(ctx->crc->dev);
- pm_runtime_put_autosuspend(ctx->crc->dev);
+ spin_unlock_irqrestore(&crc->lock, flags);
+
+ pm_runtime_mark_last_busy(crc->dev);
+ pm_runtime_put_autosuspend(crc->dev);
return 0;
}
-static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
- unsigned int length)
+static int burst_update(struct shash_desc *desc, const u8 *d8,
+ size_t length)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc *crc = ctx->crc;
- u32 *d32;
- unsigned int i;
+ struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
+ struct stm32_crc *crc;
+ unsigned long flags;
+
+ crc = stm32_crc_get_next_crc();
+ if (!crc)
+ return -ENODEV;
pm_runtime_get_sync(crc->dev);
- if (unlikely(crc->nb_pending_bytes)) {
- while (crc->nb_pending_bytes != sizeof(u32) && length) {
- /* Fill in pending data */
- crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+ spin_lock_irqsave(&crc->lock, flags);
+
+ /*
+ * Restore previously calculated CRC for this context as init value
+ * Restore polynomial configuration
+ * Configure in register for word input data,
+ * Configure out register in reversed bit mode data.
+ */
+ writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
+ writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
+ writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
+
+ if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
+ /* Configure for byte data */
+ writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
+ while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
+ writeb_relaxed(*d8++, crc->regs + CRC_DR);
length--;
}
-
- if (crc->nb_pending_bytes == sizeof(u32)) {
- /* Process completed pending data */
- writel_relaxed(*(u32 *)crc->pending_data,
- crc->regs + CRC_DR);
- crc->nb_pending_bytes = 0;
- }
+ /* Configure for word data */
+ writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
}
- d32 = (u32 *)d8;
- for (i = 0; i < length >> 2; i++)
- /* Process 32 bits data */
- writel_relaxed(*(d32++), crc->regs + CRC_DR);
+ for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
+ writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
+
+ if (length) {
+ /* Configure for byte data */
+ writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
+ while (length--)
+ writeb_relaxed(*d8++, crc->regs + CRC_DR);
+ }
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
+ spin_unlock_irqrestore(&crc->lock, flags);
+
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
- /* Check for pending data (non 32 bits) */
- length &= 3;
- if (likely(!length))
- return 0;
+ return 0;
+}
- if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
- /* Shall not happen */
- dev_err(crc->dev, "Pending data overflow\n");
- return -EINVAL;
- }
+static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
+ unsigned int length)
+{
+ const unsigned int burst_sz = burst_size;
+ unsigned int rem_sz;
+ const u8 *cur;
+ size_t size;
+ int ret;
- d8 = (const u8 *)d32;
- for (i = 0; i < length; i++)
- /* Store pending data */
- crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+ if (!burst_sz)
+ return burst_update(desc, d8, length);
+
+ /* Digest first bytes not 32bit aligned at first pass in the loop */
+ size = min(length,
+ burst_sz + (unsigned int)d8 - ALIGN_DOWN((unsigned int)d8,
+ sizeof(u32)));
+ for (rem_sz = length, cur = d8; rem_sz;
+ rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
+ ret = burst_update(desc, cur, size);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -202,6 +254,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
}
+static unsigned int refcnt;
+static DEFINE_MUTEX(refcnt_lock);
static struct shash_alg algs[] = {
/* CRC-32 */
{
@@ -284,20 +338,29 @@ static int stm32_crc_probe(struct platform_device *pdev)
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
+ pm_runtime_irq_safe(dev);
pm_runtime_enable(dev);
+ spin_lock_init(&crc->lock);
+
platform_set_drvdata(pdev, crc);
spin_lock(&crc_list.lock);
list_add(&crc->list, &crc_list.dev_list);
spin_unlock(&crc_list.lock);
- ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
- if (ret) {
- dev_err(dev, "Failed to register\n");
- clk_disable_unprepare(crc->clk);
- return ret;
+ mutex_lock(&refcnt_lock);
+ if (!refcnt) {
+ ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
+ if (ret) {
+ mutex_unlock(&refcnt_lock);
+ dev_err(dev, "Failed to register\n");
+ clk_disable_unprepare(crc->clk);
+ return ret;
+ }
}
+ refcnt++;
+ mutex_unlock(&refcnt_lock);
dev_info(dev, "Initialized\n");
@@ -318,7 +381,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
list_del(&crc->list);
spin_unlock(&crc_list.lock);
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ mutex_lock(&refcnt_lock);
+ if (!--refcnt)
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ mutex_unlock(&refcnt_lock);
pm_runtime_disable(crc->dev);
pm_runtime_put_noidle(crc->dev);
@@ -328,34 +394,60 @@ static int stm32_crc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int stm32_crc_runtime_suspend(struct device *dev)
+static int __maybe_unused stm32_crc_suspend(struct device *dev)
{
struct stm32_crc *crc = dev_get_drvdata(dev);
+ int ret;
- clk_disable_unprepare(crc->clk);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+
+ clk_unprepare(crc->clk);
return 0;
}
-static int stm32_crc_runtime_resume(struct device *dev)
+static int __maybe_unused stm32_crc_resume(struct device *dev)
{
struct stm32_crc *crc = dev_get_drvdata(dev);
int ret;
- ret = clk_prepare_enable(crc->clk);
+ ret = clk_prepare(crc->clk);
if (ret) {
- dev_err(crc->dev, "Failed to prepare_enable clock\n");
+ dev_err(crc->dev, "Failed to prepare clock\n");
+ return ret;
+ }
+
+ return pm_runtime_force_resume(dev);
+}
+
+static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+
+ clk_disable(crc->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_crc_runtime_resume(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(crc->clk);
+ if (ret) {
+ dev_err(crc->dev, "Failed to enable clock\n");
return ret;
}
return 0;
}
-#endif
static const struct dev_pm_ops stm32_crc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend,
+ stm32_crc_resume)
SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
stm32_crc_runtime_resume, NULL)
};
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 167b80eec437..03c5e6683805 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -507,6 +507,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
{
struct dma_slave_config dma_conf;
+ struct dma_chan *chan;
int err;
memset(&dma_conf, 0, sizeof(dma_conf));
@@ -518,11 +519,11 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
dma_conf.dst_maxburst = hdev->dma_maxburst;
dma_conf.device_fc = false;
- hdev->dma_lch = dma_request_chan(hdev->dev, "in");
- if (IS_ERR(hdev->dma_lch)) {
- dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return PTR_ERR(hdev->dma_lch);
- }
+ chan = dma_request_chan(hdev->dev, "in");
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ hdev->dma_lch = chan;
err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
if (err) {
@@ -1463,8 +1464,11 @@ static int stm32_hash_probe(struct platform_device *pdev)
hdev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hdev->clk)) {
- dev_err(dev, "failed to get clock for hash (%lu)\n",
- PTR_ERR(hdev->clk));
+ if (PTR_ERR(hdev->clk) != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get clock for hash (%lu)\n",
+ PTR_ERR(hdev->clk));
+ }
+
return PTR_ERR(hdev->clk);
}
@@ -1482,7 +1486,12 @@ static int stm32_hash_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
- if (!IS_ERR(hdev->rst)) {
+ if (IS_ERR(hdev->rst)) {
+ if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_reset;
+ }
+ } else {
reset_control_assert(hdev->rst);
udelay(2);
reset_control_deassert(hdev->rst);
@@ -1493,8 +1502,15 @@ static int stm32_hash_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hdev);
ret = stm32_hash_dma_init(hdev);
- if (ret)
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOENT:
dev_dbg(dev, "DMA mode not available\n");
+ break;
+ default:
+ goto err_dma;
+ }
spin_lock(&stm32_hash.lock);
list_add_tail(&hdev->list, &stm32_hash.dev_list);
@@ -1532,10 +1548,10 @@ err_engine:
spin_lock(&stm32_hash.lock);
list_del(&hdev->list);
spin_unlock(&stm32_hash.lock);
-
+err_dma:
if (hdev->dma_lch)
dma_release_channel(hdev->dma_lch);
-
+err_reset:
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index fd045e64972a..cb8a6ea2a4bc 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -350,13 +350,18 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
int err;
unsigned long flags;
struct scatterlist outhdr, iv_sg, status_sg, **sgs;
- int i;
u64 dst_len;
unsigned int num_out = 0, num_in = 0;
int sg_total;
uint8_t *iv;
+ struct scatterlist *sg;
src_nents = sg_nents_for_len(req->src, req->cryptlen);
+ if (src_nents < 0) {
+ pr_err("Invalid number of src SG.\n");
+ return src_nents;
+ }
+
dst_nents = sg_nents(req->dst);
pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
@@ -402,6 +407,7 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
goto free;
}
+ dst_len = min_t(unsigned int, req->cryptlen, dst_len);
pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
req->cryptlen, dst_len);
@@ -442,12 +448,12 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
vc_sym_req->iv = iv;
/* Source data */
- for (i = 0; i < src_nents; i++)
- sgs[num_out++] = &req->src[i];
+ for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
+ sgs[num_out++] = sg;
/* Destination data */
- for (i = 0; i < dst_nents; i++)
- sgs[num_out + num_in++] = &req->dst[i];
+ for (sg = req->dst; sg; sg = sg_next(sg))
+ sgs[num_out + num_in++] = sg;
/* Status */
sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
@@ -577,10 +583,11 @@ static void virtio_crypto_skcipher_finalize_req(
scatterwalk_map_and_copy(req->iv, req->dst,
req->cryptlen - AES_BLOCK_SIZE,
AES_BLOCK_SIZE, 0);
- crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
- req, err);
kzfree(vc_sym_req->iv);
virtcrypto_clear_request(&vc_sym_req->base);
+
+ crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
+ req, err);
}
static struct virtio_crypto_algo virtio_crypto_algs[] = { {
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 09f7f468eef8..cd11558893cd 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -46,7 +46,6 @@ struct zynqmp_aead_drv_ctx {
} alg;
struct device *dev;
struct crypto_engine *engine;
- const struct zynqmp_eemi_ops *eemi_ops;
};
struct zynqmp_aead_hw_req {
@@ -80,21 +79,15 @@ static int zynqmp_aes_aead_cipher(struct aead_request *req)
struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
struct device *dev = tfm_ctx->dev;
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct zynqmp_aead_drv_ctx *drv_ctx;
struct zynqmp_aead_hw_req *hwreq;
dma_addr_t dma_addr_data, dma_addr_hw_req;
unsigned int data_size;
unsigned int status;
+ int ret;
size_t dma_size;
char *kbuf;
int err;
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
-
- if (!drv_ctx->eemi_ops->aes)
- return -ENOTSUPP;
-
if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY)
dma_size = req->cryptlen + ZYNQMP_AES_KEY_SIZE
+ GCM_AES_IV_SIZE;
@@ -136,9 +129,12 @@ static int zynqmp_aes_aead_cipher(struct aead_request *req)
hwreq->key = 0;
}
- drv_ctx->eemi_ops->aes(dma_addr_hw_req, &status);
+ ret = zynqmp_pm_aes_engine(dma_addr_hw_req, &status);
- if (status) {
+ if (ret) {
+ dev_err(dev, "ERROR: AES PM API failed\n");
+ err = ret;
+ } else if (status) {
switch (status) {
case ZYNQMP_AES_GCM_TAG_MISMATCH_ERR:
dev_err(dev, "ERROR: Gcm Tag mismatch\n");
@@ -388,12 +384,6 @@ static int zynqmp_aes_aead_probe(struct platform_device *pdev)
else
return -ENODEV;
- aes_drv_ctx.eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(aes_drv_ctx.eemi_ops)) {
- dev_err(dev, "Failed to get ZynqMP EEMI interface\n");
- return PTR_ERR(aes_drv_ctx.eemi_ops);
- }
-
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
if (err < 0) {
dev_err(dev, "No usable DMA configuration\n");
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 3107ce80e809..16850d5388ab 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -44,6 +44,7 @@ struct dax_region {
* @dev - device core
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
* @dax_mem_res: physical address range of hotadded DAX memory
+ * @dax_mem_name: name for hotadded DAX memory via add_memory_driver_managed()
*/
struct dev_dax {
struct dax_region *region;
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 1af823b2fe6b..4c0af2eb7e19 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -377,6 +377,7 @@ static int dax_open(struct inode *inode, struct file *filp)
inode->i_mapping->a_ops = &dev_dax_aops;
filp->f_mapping = inode->i_mapping;
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
+ filp->f_sb_err = file_sample_sb_err(filp);
filp->private_data = dev_dax;
inode->i_flags = S_DAX;
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 1e678bdf5aed..275aa5f87399 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -14,6 +14,11 @@
#include "dax-private.h"
#include "bus.h"
+/* Memory resource name used for add_memory_driver_managed(). */
+static const char *kmem_name;
+/* Set if any memory will remain added when the driver will be unloaded. */
+static bool any_hotremove_failed;
+
int dev_dax_kmem_probe(struct device *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
@@ -70,7 +75,12 @@ int dev_dax_kmem_probe(struct device *dev)
*/
new_res->flags = IORESOURCE_SYSTEM_RAM;
- rc = add_memory(numa_node, new_res->start, resource_size(new_res));
+ /*
+ * Ensure that future kexec'd kernels will not treat this as RAM
+ * automatically.
+ */
+ rc = add_memory_driver_managed(numa_node, new_res->start,
+ resource_size(new_res), kmem_name);
if (rc) {
release_resource(new_res);
kfree(new_res);
@@ -100,6 +110,7 @@ static int dev_dax_kmem_remove(struct device *dev)
*/
rc = remove_memory(dev_dax->target_node, kmem_start, kmem_size);
if (rc) {
+ any_hotremove_failed = true;
dev_err(dev,
"DAX region %pR cannot be hotremoved until the next reboot\n",
res);
@@ -124,6 +135,7 @@ static int dev_dax_kmem_remove(struct device *dev)
* permanently pinned as reserved by the unreleased
* request_mem_region().
*/
+ any_hotremove_failed = true;
return 0;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
@@ -137,12 +149,24 @@ static struct dax_device_driver device_dax_kmem_driver = {
static int __init dax_kmem_init(void)
{
- return dax_driver_register(&device_dax_kmem_driver);
+ int rc;
+
+ /* Resource name is permanently allocated if any hotremove fails. */
+ kmem_name = kstrdup_const("System RAM (kmem)", GFP_KERNEL);
+ if (!kmem_name)
+ return -ENOMEM;
+
+ rc = dax_driver_register(&device_dax_kmem_driver);
+ if (rc)
+ kfree_const(kmem_name);
+ return rc;
}
static void __exit dax_kmem_exit(void)
{
dax_driver_unregister(&device_dax_kmem_driver);
+ if (!any_hotremove_failed)
+ kfree_const(kmem_name);
}
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index eb25627b059d..21ebd0af268b 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -24,9 +24,7 @@ int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
cd = device_create(dca_class, dca->cd, MKDEV(0, slot + 1), NULL,
"requester%d", req_count++);
- if (IS_ERR(cd))
- return PTR_ERR(cd);
- return 0;
+ return PTR_ERR_OR_ZERO(cd);
}
void dca_sysfs_remove_req(struct dca_provider *dca, int slot)
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 0b1df12e0f21..37dc40d1fcfb 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -91,6 +91,14 @@ config ARM_EXYNOS_BUS_DEVFREQ
and adjusts the operating frequencies and voltages with OPP support.
This does not yet operate with optimal voltages.
+config ARM_IMX_BUS_DEVFREQ
+ tristate "i.MX Generic Bus DEVFREQ Driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ select DEVFREQ_GOV_USERSPACE
+ help
+ This adds the generic DEVFREQ driver for i.MX interconnects. It
+ allows adjusting NIC/NOC frequency.
+
config ARM_IMX8M_DDRC_DEVFREQ
tristate "i.MX8M DDRC DEVFREQ Driver"
depends on (ARCH_MXC && HAVE_ARM_SMCCC) || \
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 3eb4d5e6635c..3ca1ad0ecb97 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
+obj-$(CONFIG_ARM_IMX_BUS_DEVFREQ) += imx-bus.o
obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 6fecd11dafdd..52b9c3e141f3 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -60,12 +60,12 @@ static struct devfreq *find_device_devfreq(struct device *dev)
{
struct devfreq *tmp_devfreq;
+ lockdep_assert_held(&devfreq_list_lock);
+
if (IS_ERR_OR_NULL(dev)) {
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
- WARN(!mutex_is_locked(&devfreq_list_lock),
- "devfreq_list_lock must be locked.");
list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
if (tmp_devfreq->dev.parent == dev)
@@ -258,12 +258,12 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
{
struct devfreq_governor *tmp_governor;
+ lockdep_assert_held(&devfreq_list_lock);
+
if (IS_ERR_OR_NULL(name)) {
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
- WARN(!mutex_is_locked(&devfreq_list_lock),
- "devfreq_list_lock must be locked.");
list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
@@ -289,12 +289,12 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
struct devfreq_governor *governor;
int err = 0;
+ lockdep_assert_held(&devfreq_list_lock);
+
if (IS_ERR_OR_NULL(name)) {
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
- WARN(!mutex_is_locked(&devfreq_list_lock),
- "devfreq_list_lock must be locked.");
governor = find_devfreq_governor(name);
if (IS_ERR(governor)) {
@@ -392,10 +392,7 @@ int update_devfreq(struct devfreq *devfreq)
int err = 0;
u32 flags = 0;
- if (!mutex_is_locked(&devfreq->lock)) {
- WARN(true, "devfreq->lock must be locked by the caller.\n");
- return -EINVAL;
- }
+ lockdep_assert_held(&devfreq->lock);
if (!devfreq->governor)
return -EINVAL;
@@ -768,7 +765,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->dev.release = devfreq_dev_release;
INIT_LIST_HEAD(&devfreq->node);
devfreq->profile = profile;
- strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
+ strscpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
devfreq->last_status.current_frequency = profile->initial_freq;
devfreq->data = data;
diff --git a/drivers/devfreq/imx-bus.c b/drivers/devfreq/imx-bus.c
new file mode 100644
index 000000000000..4f38455ad742
--- /dev/null
+++ b/drivers/devfreq/imx-bus.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct imx_bus {
+ struct devfreq_dev_profile profile;
+ struct devfreq *devfreq;
+ struct clk *clk;
+ struct platform_device *icc_pdev;
+};
+
+static int imx_bus_target(struct device *dev,
+ unsigned long *freq, u32 flags)
+{
+ struct dev_pm_opp *new_opp;
+ int ret;
+
+ new_opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(new_opp)) {
+ ret = PTR_ERR(new_opp);
+ dev_err(dev, "failed to get recommended opp: %d\n", ret);
+ return ret;
+ }
+ dev_pm_opp_put(new_opp);
+
+ return dev_pm_opp_set_rate(dev, *freq);
+}
+
+static int imx_bus_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct imx_bus *priv = dev_get_drvdata(dev);
+
+ *freq = clk_get_rate(priv->clk);
+
+ return 0;
+}
+
+static int imx_bus_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct imx_bus *priv = dev_get_drvdata(dev);
+
+ stat->busy_time = 0;
+ stat->total_time = 0;
+ stat->current_frequency = clk_get_rate(priv->clk);
+
+ return 0;
+}
+
+static void imx_bus_exit(struct device *dev)
+{
+ struct imx_bus *priv = dev_get_drvdata(dev);
+
+ dev_pm_opp_of_remove_table(dev);
+ platform_device_unregister(priv->icc_pdev);
+}
+
+/* imx_bus_init_icc() - register matching icc provider if required */
+static int imx_bus_init_icc(struct device *dev)
+{
+ struct imx_bus *priv = dev_get_drvdata(dev);
+ const char *icc_driver_name;
+
+ if (!of_get_property(dev->of_node, "#interconnect-cells", 0))
+ return 0;
+ if (!IS_ENABLED(CONFIG_INTERCONNECT_IMX)) {
+ dev_warn(dev, "imx interconnect drivers disabled\n");
+ return 0;
+ }
+
+ icc_driver_name = of_device_get_match_data(dev);
+ if (!icc_driver_name) {
+ dev_err(dev, "unknown interconnect driver\n");
+ return 0;
+ }
+
+ priv->icc_pdev = platform_device_register_data(
+ dev, icc_driver_name, -1, NULL, 0);
+ if (IS_ERR(priv->icc_pdev)) {
+ dev_err(dev, "failed to register icc provider %s: %ld\n",
+ icc_driver_name, PTR_ERR(priv->icc_pdev));
+ return PTR_ERR(priv->icc_pdev);
+ }
+
+ return 0;
+}
+
+static int imx_bus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_bus *priv;
+ const char *gov = DEVFREQ_GOV_USERSPACE;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * Fetch the clock to adjust but don't explicitly enable.
+ *
+ * For imx bus clock clk_set_rate is safe no matter if the clock is on
+ * or off and some peripheral side-buses might be off unless enabled by
+ * drivers for devices on those specific buses.
+ *
+ * Rate adjustment on a disabled bus clock just takes effect later.
+ */
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ dev_err(dev, "failed to fetch clk: %d\n", ret);
+ return ret;
+ }
+ platform_set_drvdata(pdev, priv);
+
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get OPP table\n");
+ return ret;
+ }
+
+ priv->profile.polling_ms = 1000;
+ priv->profile.target = imx_bus_target;
+ priv->profile.get_dev_status = imx_bus_get_dev_status;
+ priv->profile.exit = imx_bus_exit;
+ priv->profile.get_cur_freq = imx_bus_get_cur_freq;
+ priv->profile.initial_freq = clk_get_rate(priv->clk);
+
+ priv->devfreq = devm_devfreq_add_device(dev, &priv->profile,
+ gov, NULL);
+ if (IS_ERR(priv->devfreq)) {
+ ret = PTR_ERR(priv->devfreq);
+ dev_err(dev, "failed to add devfreq device: %d\n", ret);
+ goto err;
+ }
+
+ ret = imx_bus_init_icc(dev);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_pm_opp_of_remove_table(dev);
+ return ret;
+}
+
+static const struct of_device_id imx_bus_of_match[] = {
+ { .compatible = "fsl,imx8mq-noc", .data = "imx8mq-interconnect", },
+ { .compatible = "fsl,imx8mm-noc", .data = "imx8mm-interconnect", },
+ { .compatible = "fsl,imx8mn-noc", .data = "imx8mn-interconnect", },
+ { .compatible = "fsl,imx8m-noc", },
+ { .compatible = "fsl,imx8m-nic", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx_bus_of_match);
+
+static struct platform_driver imx_bus_platdrv = {
+ .probe = imx_bus_probe,
+ .driver = {
+ .name = "imx-bus-devfreq",
+ .of_match_table = of_match_ptr(imx_bus_of_match),
+ },
+};
+module_platform_driver(imx_bus_platdrv);
+
+MODULE_DESCRIPTION("Generic i.MX bus frequency scaling driver");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 28b2c7ca416e..e94a27804c20 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -420,7 +420,7 @@ tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
- if (dev_freq >= static_cpu_emc_freq)
+ if (dev_freq + actmon_dev->boost_freq >= static_cpu_emc_freq)
return 0;
return static_cpu_emc_freq;
@@ -807,10 +807,9 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
}
err = platform_get_irq(pdev, 0);
- if (err < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ: %d\n", err);
+ if (err < 0)
return err;
- }
+
tegra->irq = err;
irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 9c190026bfab..995e05f609ff 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_UDMABUF) += udmabuf.o
dmabuf_selftests-y := \
selftest.o \
- st-dma-fence.o
+ st-dma-fence.o \
+ st-dma-fence-chain.o
obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 07df88f2e305..01ce125f8e8d 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -691,6 +691,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
attach->dev = dev;
attach->dmabuf = dmabuf;
+ if (importer_ops)
+ attach->peer2peer = importer_ops->allow_peer2peer;
attach->importer_ops = importer_ops;
attach->importer_priv = importer_priv;
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 44a741677d25..c435bbba851c 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -62,7 +62,8 @@ struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
replacement = NULL;
}
- tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement);
+ tmp = cmpxchg((struct dma_fence __force **)&chain->prev,
+ prev, replacement);
if (tmp == prev)
dma_fence_put(tmp);
else
@@ -98,6 +99,12 @@ int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
return -EINVAL;
dma_fence_chain_for_each(*pfence, &chain->base) {
+ if ((*pfence)->seqno < seqno) { /* already signaled */
+ dma_fence_put(*pfence);
+ *pfence = NULL;
+ break;
+ }
+
if ((*pfence)->context != chain->base.context ||
to_dma_fence_chain(*pfence)->prev_seqno < seqno)
break;
@@ -221,6 +228,7 @@ EXPORT_SYMBOL(dma_fence_chain_ops);
* @chain: the chain node to initialize
* @prev: the previous fence
* @fence: the current fence
+ * @seqno: the sequence number (syncpt) of the fence within the chain
*
* Initialize a new chain node and either start a new chain or add the node to
* the existing chain of the previous fence.
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 052a41e2451c..90edf2b281b0 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(dma_fence_get_stub);
u64 dma_fence_context_alloc(unsigned num)
{
WARN_ON(!num);
- return atomic64_add_return(num, &dma_fence_context_counter) - num;
+ return atomic64_fetch_add(num, &dma_fence_context_counter);
}
EXPORT_SYMBOL(dma_fence_context_alloc);
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 4264e64788c4..b45f8514dc82 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -34,6 +34,7 @@
#include <linux/dma-resv.h>
#include <linux/export.h>
+#include <linux/mm.h>
#include <linux/sched/mm.h>
/**
@@ -109,7 +110,7 @@ static int __init dma_resv_lockdep(void)
dma_resv_init(&obj);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ww_acquire_init(&ctx, &reservation_ww_class);
ret = dma_resv_lock(&obj, &ctx);
if (ret == -EDEADLK)
@@ -118,7 +119,7 @@ static int __init dma_resv_lockdep(void)
fs_reclaim_release(GFP_KERNEL);
ww_mutex_unlock(&obj.lock);
ww_acquire_fini(&ctx);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
index 5320386f02e5..55918ef9adab 100644
--- a/drivers/dma-buf/selftests.h
+++ b/drivers/dma-buf/selftests.h
@@ -11,3 +11,4 @@
*/
selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
selftest(dma_fence, dma_fence)
+selftest(dma_fence_chain, dma_fence_chain)
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
new file mode 100644
index 000000000000..5d45ba7ba3cd
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -0,0 +1,715 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+
+#include "selftest.h"
+
+#define CHAIN_SZ (4 << 10)
+
+static struct kmem_cache *slab_fences;
+
+static inline struct mock_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+} *to_mock_fence(struct dma_fence *f) {
+ return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "mock";
+}
+
+static void mock_fence_release(struct dma_fence *f)
+{
+ kmem_cache_free(slab_fences, to_mock_fence(f));
+}
+
+static const struct dma_fence_ops mock_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+ .release = mock_fence_release,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+ struct mock_fence *f;
+
+ f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+ return &f->base;
+}
+
+static inline struct mock_chain {
+ struct dma_fence_chain base;
+} *to_mock_chain(struct dma_fence *f) {
+ return container_of(f, struct mock_chain, base.base);
+}
+
+static struct dma_fence *mock_chain(struct dma_fence *prev,
+ struct dma_fence *fence,
+ u64 seqno)
+{
+ struct mock_chain *f;
+
+ f = kmalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ dma_fence_chain_init(&f->base,
+ dma_fence_get(prev),
+ dma_fence_get(fence),
+ seqno);
+
+ return &f->base.base;
+}
+
+static int sanitycheck(void *arg)
+{
+ struct dma_fence *f, *chain;
+ int err = 0;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ chain = mock_chain(NULL, f, 1);
+ if (!chain)
+ err = -ENOMEM;
+
+ dma_fence_signal(f);
+ dma_fence_put(f);
+
+ dma_fence_put(chain);
+
+ return err;
+}
+
+struct fence_chains {
+ unsigned int chain_length;
+ struct dma_fence **fences;
+ struct dma_fence **chains;
+
+ struct dma_fence *tail;
+};
+
+static uint64_t seqno_inc(unsigned int i)
+{
+ return i + 1;
+}
+
+static int fence_chains_init(struct fence_chains *fc, unsigned int count,
+ uint64_t (*seqno_fn)(unsigned int))
+{
+ unsigned int i;
+ int err = 0;
+
+ fc->chains = kvmalloc_array(count, sizeof(*fc->chains),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!fc->chains)
+ return -ENOMEM;
+
+ fc->fences = kvmalloc_array(count, sizeof(*fc->fences),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!fc->fences) {
+ err = -ENOMEM;
+ goto err_chains;
+ }
+
+ fc->tail = NULL;
+ for (i = 0; i < count; i++) {
+ fc->fences[i] = mock_fence();
+ if (!fc->fences[i]) {
+ err = -ENOMEM;
+ goto unwind;
+ }
+
+ fc->chains[i] = mock_chain(fc->tail,
+ fc->fences[i],
+ seqno_fn(i));
+ if (!fc->chains[i]) {
+ err = -ENOMEM;
+ goto unwind;
+ }
+
+ fc->tail = fc->chains[i];
+ }
+
+ fc->chain_length = i;
+ return 0;
+
+unwind:
+ for (i = 0; i < count; i++) {
+ dma_fence_put(fc->fences[i]);
+ dma_fence_put(fc->chains[i]);
+ }
+ kvfree(fc->fences);
+err_chains:
+ kvfree(fc->chains);
+ return err;
+}
+
+static void fence_chains_fini(struct fence_chains *fc)
+{
+ unsigned int i;
+
+ for (i = 0; i < fc->chain_length; i++) {
+ dma_fence_signal(fc->fences[i]);
+ dma_fence_put(fc->fences[i]);
+ }
+ kvfree(fc->fences);
+
+ for (i = 0; i < fc->chain_length; i++)
+ dma_fence_put(fc->chains[i]);
+ kvfree(fc->chains);
+}
+
+static int find_seqno(void *arg)
+{
+ struct fence_chains fc;
+ struct dma_fence *fence;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, 64, seqno_inc);
+ if (err)
+ return err;
+
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, 0);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno(0)!\n", err);
+ goto err;
+ }
+
+ for (i = 0; i < fc.chain_length; i++) {
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, i + 1);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno(%d:%d)!\n",
+ err, fc.chain_length + 1, i + 1);
+ goto err;
+ }
+ if (fence != fc.chains[i]) {
+ pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
+ fc.chain_length + 1, i + 1);
+ err = -EINVAL;
+ goto err;
+ }
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, i + 1);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Error reported for finding self\n");
+ goto err;
+ }
+ if (fence != fc.chains[i]) {
+ pr_err("Incorrect fence reported by find self\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, i + 2);
+ dma_fence_put(fence);
+ if (!err) {
+ pr_err("Error not reported for future fence: find_seqno(%d:%d)!\n",
+ i + 1, i + 2);
+ err = -EINVAL;
+ goto err;
+ }
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, i);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Error reported for previous fence!\n");
+ goto err;
+ }
+ if (i > 0 && fence != fc.chains[i - 1]) {
+ pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
+ i + 1, i);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int find_signaled(void *arg)
+{
+ struct fence_chains fc;
+ struct dma_fence *fence;
+ int err;
+
+ err = fence_chains_init(&fc, 2, seqno_inc);
+ if (err)
+ return err;
+
+ dma_fence_signal(fc.fences[0]);
+
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, 1);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno()!\n", err);
+ goto err;
+ }
+
+ if (fence && fence != fc.chains[0]) {
+ pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:1\n",
+ fence->seqno);
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, 1);
+ dma_fence_put(fence);
+ if (err)
+ pr_err("Reported %d for finding self!\n", err);
+
+ err = -EINVAL;
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int find_out_of_order(void *arg)
+{
+ struct fence_chains fc;
+ struct dma_fence *fence;
+ int err;
+
+ err = fence_chains_init(&fc, 3, seqno_inc);
+ if (err)
+ return err;
+
+ dma_fence_signal(fc.fences[1]);
+
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, 2);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno()!\n", err);
+ goto err;
+ }
+
+ if (fence && fence != fc.chains[1]) {
+ pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:2\n",
+ fence->seqno);
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, 2);
+ dma_fence_put(fence);
+ if (err)
+ pr_err("Reported %d for finding self!\n", err);
+
+ err = -EINVAL;
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static uint64_t seqno_inc2(unsigned int i)
+{
+ return 2 * i + 2;
+}
+
+static int find_gap(void *arg)
+{
+ struct fence_chains fc;
+ struct dma_fence *fence;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, 64, seqno_inc2);
+ if (err)
+ return err;
+
+ for (i = 0; i < fc.chain_length; i++) {
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, 2 * i + 1);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno(%d:%d)!\n",
+ err, fc.chain_length + 1, 2 * i + 1);
+ goto err;
+ }
+ if (fence != fc.chains[i]) {
+ pr_err("Incorrect fence.seqno:%lld reported by find_seqno(%d:%d)\n",
+ fence->seqno,
+ fc.chain_length + 1,
+ 2 * i + 1);
+ err = -EINVAL;
+ goto err;
+ }
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, 2 * i + 2);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Error reported for finding self\n");
+ goto err;
+ }
+ if (fence != fc.chains[i]) {
+ pr_err("Incorrect fence reported by find self\n");
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+struct find_race {
+ struct fence_chains fc;
+ atomic_t children;
+};
+
+static int __find_race(void *arg)
+{
+ struct find_race *data = arg;
+ int err = 0;
+
+ while (!kthread_should_stop()) {
+ struct dma_fence *fence = dma_fence_get(data->fc.tail);
+ int seqno;
+
+ seqno = prandom_u32_max(data->fc.chain_length) + 1;
+
+ err = dma_fence_chain_find_seqno(&fence, seqno);
+ if (err) {
+ pr_err("Failed to find fence seqno:%d\n",
+ seqno);
+ dma_fence_put(fence);
+ break;
+ }
+ if (!fence)
+ goto signal;
+
+ err = dma_fence_chain_find_seqno(&fence, seqno);
+ if (err) {
+ pr_err("Reported an invalid fence for find-self:%d\n",
+ seqno);
+ dma_fence_put(fence);
+ break;
+ }
+
+ if (fence->seqno < seqno) {
+ pr_err("Reported an earlier fence.seqno:%lld for seqno:%d\n",
+ fence->seqno, seqno);
+ err = -EINVAL;
+ dma_fence_put(fence);
+ break;
+ }
+
+ dma_fence_put(fence);
+
+signal:
+ seqno = prandom_u32_max(data->fc.chain_length - 1);
+ dma_fence_signal(data->fc.fences[seqno]);
+ cond_resched();
+ }
+
+ if (atomic_dec_and_test(&data->children))
+ wake_up_var(&data->children);
+ return err;
+}
+
+static int find_race(void *arg)
+{
+ struct find_race data;
+ int ncpus = num_online_cpus();
+ struct task_struct **threads;
+ unsigned long count;
+ int err;
+ int i;
+
+ err = fence_chains_init(&data.fc, CHAIN_SZ, seqno_inc);
+ if (err)
+ return err;
+
+ threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
+ if (!threads) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ atomic_set(&data.children, 0);
+ for (i = 0; i < ncpus; i++) {
+ threads[i] = kthread_run(__find_race, &data, "dmabuf/%d", i);
+ if (IS_ERR(threads[i])) {
+ ncpus = i;
+ break;
+ }
+ atomic_inc(&data.children);
+ get_task_struct(threads[i]);
+ }
+
+ wait_var_event_timeout(&data.children,
+ !atomic_read(&data.children),
+ 5 * HZ);
+
+ for (i = 0; i < ncpus; i++) {
+ int ret;
+
+ ret = kthread_stop(threads[i]);
+ if (ret && !err)
+ err = ret;
+ put_task_struct(threads[i]);
+ }
+ kfree(threads);
+
+ count = 0;
+ for (i = 0; i < data.fc.chain_length; i++)
+ if (dma_fence_is_signaled(data.fc.fences[i]))
+ count++;
+ pr_info("Completed %lu cycles\n", count);
+
+err:
+ fence_chains_fini(&data.fc);
+ return err;
+}
+
+static int signal_forward(void *arg)
+{
+ struct fence_chains fc;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, 64, seqno_inc);
+ if (err)
+ return err;
+
+ for (i = 0; i < fc.chain_length; i++) {
+ dma_fence_signal(fc.fences[i]);
+
+ if (!dma_fence_is_signaled(fc.chains[i])) {
+ pr_err("chain[%d] not signaled!\n", i);
+ err = -EINVAL;
+ goto err;
+ }
+
+ if (i + 1 < fc.chain_length &&
+ dma_fence_is_signaled(fc.chains[i + 1])) {
+ pr_err("chain[%d] is signaled!\n", i);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int signal_backward(void *arg)
+{
+ struct fence_chains fc;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, 64, seqno_inc);
+ if (err)
+ return err;
+
+ for (i = fc.chain_length; i--; ) {
+ dma_fence_signal(fc.fences[i]);
+
+ if (i > 0 && dma_fence_is_signaled(fc.chains[i])) {
+ pr_err("chain[%d] is signaled!\n", i);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ for (i = 0; i < fc.chain_length; i++) {
+ if (!dma_fence_is_signaled(fc.chains[i])) {
+ pr_err("chain[%d] was not signaled!\n", i);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int __wait_fence_chains(void *arg)
+{
+ struct fence_chains *fc = arg;
+
+ if (dma_fence_wait(fc->tail, false))
+ return -EIO;
+
+ return 0;
+}
+
+static int wait_forward(void *arg)
+{
+ struct fence_chains fc;
+ struct task_struct *tsk;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+ if (err)
+ return err;
+
+ tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ goto err;
+ }
+ get_task_struct(tsk);
+ yield_to(tsk, true);
+
+ for (i = 0; i < fc.chain_length; i++)
+ dma_fence_signal(fc.fences[i]);
+
+ err = kthread_stop(tsk);
+ put_task_struct(tsk);
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int wait_backward(void *arg)
+{
+ struct fence_chains fc;
+ struct task_struct *tsk;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+ if (err)
+ return err;
+
+ tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ goto err;
+ }
+ get_task_struct(tsk);
+ yield_to(tsk, true);
+
+ for (i = fc.chain_length; i--; )
+ dma_fence_signal(fc.fences[i]);
+
+ err = kthread_stop(tsk);
+ put_task_struct(tsk);
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static void randomise_fences(struct fence_chains *fc)
+{
+ unsigned int count = fc->chain_length;
+
+ /* Fisher-Yates shuffle courtesy of Knuth */
+ while (--count) {
+ unsigned int swp;
+
+ swp = prandom_u32_max(count + 1);
+ if (swp == count)
+ continue;
+
+ swap(fc->fences[count], fc->fences[swp]);
+ }
+}
+
+static int wait_random(void *arg)
+{
+ struct fence_chains fc;
+ struct task_struct *tsk;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+ if (err)
+ return err;
+
+ randomise_fences(&fc);
+
+ tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ goto err;
+ }
+ get_task_struct(tsk);
+ yield_to(tsk, true);
+
+ for (i = 0; i < fc.chain_length; i++)
+ dma_fence_signal(fc.fences[i]);
+
+ err = kthread_stop(tsk);
+ put_task_struct(tsk);
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+int dma_fence_chain(void)
+{
+ static const struct subtest tests[] = {
+ SUBTEST(sanitycheck),
+ SUBTEST(find_seqno),
+ SUBTEST(find_signaled),
+ SUBTEST(find_out_of_order),
+ SUBTEST(find_gap),
+ SUBTEST(find_race),
+ SUBTEST(signal_forward),
+ SUBTEST(signal_backward),
+ SUBTEST(wait_forward),
+ SUBTEST(wait_backward),
+ SUBTEST(wait_random),
+ };
+ int ret;
+
+ pr_info("sizeof(dma_fence_chain)=%zu\n",
+ sizeof(struct dma_fence_chain));
+
+ slab_fences = KMEM_CACHE(mock_fence,
+ SLAB_TYPESAFE_BY_RCU |
+ SLAB_HWCACHE_ALIGN);
+ if (!slab_fences)
+ return -ENOMEM;
+
+ ret = subtests(tests, NULL);
+
+ kmem_cache_destroy(slab_fences);
+ return ret;
+}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 023db6883d05..e9ed9165de40 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -106,7 +106,7 @@ config AXI_DMAC
select REGMAP_MMIO
help
Enable support for the Analog Devices AXI-DMAC peripheral. This DMA
- controller is often used in Analog Device's reference designs for FPGA
+ controller is often used in Analog Devices' reference designs for FPGA
platforms.
config BCM_SBA_RAID
@@ -395,12 +395,10 @@ config MMP_TDMA
bool "MMP Two-Channel DMA support"
depends on ARCH_MMP || COMPILE_TEST
select DMA_ENGINE
- select MMP_SRAM if ARCH_MMP
select GENERIC_ALLOCATOR
help
Support the MMP Two-Channel DMA engine.
This engine used for MMP Audio DMA and pxa910 SQU.
- It needs sram driver under mach-mmp.
config MOXART_DMA
tristate "MOXART DMA support"
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 397692e937b3..80fc2fe8c77e 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -331,7 +331,7 @@ struct at_dma {
struct dma_pool *dma_desc_pool;
struct dma_pool *memset_pool;
/* AT THE END channels table */
- struct at_dma_chan chan[0];
+ struct at_dma_chan chan[];
};
#define dma_readl(atdma, name) \
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index bb0eaf38b594..fd92f048c491 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -212,7 +212,7 @@ struct at_xdmac {
struct clk *clk;
u32 save_gim;
struct dma_pool *at_xdmac_desc_pool;
- struct at_xdmac_chan chan[0];
+ struct at_xdmac_chan chan[];
};
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d31076d9ef25..2b06a7a8629d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -53,6 +53,8 @@
#include <linux/mempool.h>
#include <linux/numa.h>
+#include "dmaengine.h"
+
static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDA(dma_ida);
static LIST_HEAD(dma_device_list);
@@ -145,9 +147,9 @@ static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
/**
* dev_to_dma_chan - convert a device pointer to its sysfs container object
- * @dev - device node
+ * @dev: device node
*
- * Must be called under dma_list_mutex
+ * Must be called under dma_list_mutex.
*/
static struct dma_chan *dev_to_dma_chan(struct device *dev)
{
@@ -243,22 +245,18 @@ static struct class dma_devclass = {
/* --- client and device registration --- */
-/**
- * dma_cap_mask_all - enable iteration over all operation types
- */
+/* enable iteration over all operation types */
static dma_cap_mask_t dma_cap_mask_all;
/**
- * dma_chan_tbl_ent - tracks channel allocations per core/operation
- * @chan - associated channel for this entry
+ * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan: associated channel for this entry
*/
struct dma_chan_tbl_ent {
struct dma_chan *chan;
};
-/**
- * channel_table - percpu lookup table for memory-to-memory offload providers
- */
+/* percpu lookup table for memory-to-memory offload providers */
static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
static int __init dma_channel_table_init(void)
@@ -295,8 +293,11 @@ static int __init dma_channel_table_init(void)
arch_initcall(dma_channel_table_init);
/**
- * dma_chan_is_local - returns true if the channel is in the same numa-node as
- * the cpu
+ * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
+ * @chan: DMA channel to test
+ * @cpu: CPU index which the channel should be close to
+ *
+ * Returns true if the channel is in the same NUMA-node as the CPU.
*/
static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
{
@@ -306,14 +307,14 @@ static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
}
/**
- * min_chan - returns the channel with min count and in the same numa-node as
- * the cpu
- * @cap: capability to match
- * @cpu: cpu index which the channel should be close to
+ * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
+ * @cap: capability to match
+ * @cpu: CPU index which the channel should be close to
*
- * If some channels are close to the given cpu, the one with the lowest
- * reference count is returned. Otherwise, cpu is ignored and only the
+ * If some channels are close to the given CPU, the one with the lowest
+ * reference count is returned. Otherwise, CPU is ignored and only the
* reference count is taken into account.
+ *
* Must be called under dma_list_mutex.
*/
static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
@@ -351,10 +352,11 @@ static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
/**
* dma_channel_rebalance - redistribute the available channels
*
- * Optimize for cpu isolation (each cpu gets a dedicated channel for an
- * operation type) in the SMP case, and operation isolation (avoid
- * multi-tasking channels) in the non-SMP case. Must be called under
- * dma_list_mutex.
+ * Optimize for CPU isolation (each CPU gets a dedicated channel for an
+ * operation type) in the SMP case, and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case.
+ *
+ * Must be called under dma_list_mutex.
*/
static void dma_channel_rebalance(void)
{
@@ -404,9 +406,9 @@ static struct module *dma_chan_to_owner(struct dma_chan *chan)
/**
* balance_ref_count - catch up the channel reference count
- * @chan - channel to balance ->client_count versus dmaengine_ref_count
+ * @chan: channel to balance ->client_count versus dmaengine_ref_count
*
- * balance_ref_count must be called under dma_list_mutex
+ * Must be called under dma_list_mutex.
*/
static void balance_ref_count(struct dma_chan *chan)
{
@@ -436,10 +438,10 @@ static void dma_device_put(struct dma_device *device)
}
/**
- * dma_chan_get - try to grab a dma channel's parent driver module
- * @chan - channel to grab
+ * dma_chan_get - try to grab a DMA channel's parent driver module
+ * @chan: channel to grab
*
- * Must be called under dma_list_mutex
+ * Must be called under dma_list_mutex.
*/
static int dma_chan_get(struct dma_chan *chan)
{
@@ -483,10 +485,10 @@ module_put_out:
}
/**
- * dma_chan_put - drop a reference to a dma channel's parent driver module
- * @chan - channel to release
+ * dma_chan_put - drop a reference to a DMA channel's parent driver module
+ * @chan: channel to release
*
- * Must be called under dma_list_mutex
+ * Must be called under dma_list_mutex.
*/
static void dma_chan_put(struct dma_chan *chan)
{
@@ -537,7 +539,7 @@ EXPORT_SYMBOL(dma_sync_wait);
/**
* dma_find_channel - find a channel to carry out the operation
- * @tx_type: transaction type
+ * @tx_type: transaction type
*/
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
{
@@ -677,7 +679,7 @@ static struct dma_chan *find_candidate(struct dma_device *device,
/**
* dma_get_slave_channel - try to get specific channel exclusively
- * @chan: target channel
+ * @chan: target channel
*/
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
{
@@ -731,10 +733,10 @@ EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
/**
* __dma_request_channel - try to allocate an exclusive channel
- * @mask: capabilities that the channel must satisfy
- * @fn: optional callback to disposition available channels
- * @fn_param: opaque parameter to pass to dma_filter_fn
- * @np: device node to look for DMA channels
+ * @mask: capabilities that the channel must satisfy
+ * @fn: optional callback to disposition available channels
+ * @fn_param: opaque parameter to pass to dma_filter_fn()
+ * @np: device node to look for DMA channels
*
* Returns pointer to appropriate DMA channel on success or NULL.
*/
@@ -877,7 +879,7 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel);
/**
* dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
- * @mask: capabilities that the channel must satisfy
+ * @mask: capabilities that the channel must satisfy
*
* Returns pointer to appropriate DMA channel on success or an error pointer.
*/
@@ -968,7 +970,7 @@ void dmaengine_get(void)
EXPORT_SYMBOL(dmaengine_get);
/**
- * dmaengine_put - let dma drivers be removed when ref_count == 0
+ * dmaengine_put - let DMA drivers be removed when ref_count == 0
*/
void dmaengine_put(void)
{
@@ -1132,7 +1134,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
/**
* dma_async_device_register - registers DMA devices found
- * @device: &dma_device
+ * @device: pointer to &struct dma_device
*
* After calling this routine the structure should not be freed except in the
* device_release() callback which will be called after
@@ -1304,7 +1306,7 @@ EXPORT_SYMBOL(dma_async_device_register);
/**
* dma_async_device_unregister - unregister a DMA device
- * @device: &dma_device
+ * @device: pointer to &struct dma_device
*
* This routine is called by dma driver exit routines, dmaengine holds module
* references to prevent it being called while channels are in use.
@@ -1341,7 +1343,7 @@ static void dmam_device_release(struct device *dev, void *res)
/**
* dmaenginem_async_device_register - registers DMA devices found
- * @device: &dma_device
+ * @device: pointer to &struct dma_device
*
* The operation is managed and will be undone on driver detach.
*/
@@ -1578,8 +1580,9 @@ int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
}
EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
-/* dma_wait_for_async_tx - spin wait for a transaction to complete
- * @tx: in-flight transaction to wait on
+/**
+ * dma_wait_for_async_tx - spin wait for a transaction to complete
+ * @tx: in-flight transaction to wait on
*/
enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
@@ -1602,9 +1605,12 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
}
EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
-/* dma_run_dependencies - helper routine for dma drivers to process
- * (start) dependent operations on their target channel
- * @tx: transaction with dependencies
+/**
+ * dma_run_dependencies - process dependent operations on the target channel
+ * @tx: transaction with dependencies
+ *
+ * Helper routine for DMA drivers to process (start) dependent operations
+ * on their target channel.
*/
void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 0425984db118..b175229a4b01 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -60,9 +60,9 @@ MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
static int timeout = 3000;
-module_param(timeout, uint, S_IRUGO | S_IWUSR);
+module_param(timeout, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
- "Pass 0xFFFFFFFF (4294967295) for maximum timeout");
+ "Pass -1 for infinite timeout");
static bool noverify;
module_param(noverify, bool, S_IRUGO | S_IWUSR);
@@ -72,10 +72,6 @@ static bool norandom;
module_param(norandom, bool, 0644);
MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
-static bool polled;
-module_param(polled, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
-
static bool verbose;
module_param(verbose, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
@@ -88,6 +84,10 @@ static unsigned int transfer_size;
module_param(transfer_size, uint, 0644);
MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
+static bool polled;
+module_param(polled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
+
/**
* struct dmatest_params - test parameters.
* @buf_size: size of the memcpy test buffer
@@ -98,7 +98,12 @@ MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default
* @iterations: iterations before stopping test
* @xor_sources: number of xor source buffers
* @pq_sources: number of p+q source buffers
- * @timeout: transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295)
+ * @timeout: transfer timeout in msec, -1 for infinite timeout
+ * @noverify: disable data verification
+ * @norandom: disable random offset setup
+ * @alignment: custom data address alignment taken as 2^alignment
+ * @transfer_size: custom transfer size in bytes
+ * @polled: use polling for completion instead of interrupts
*/
struct dmatest_params {
unsigned int buf_size;
@@ -109,7 +114,7 @@ struct dmatest_params {
unsigned int iterations;
unsigned int xor_sources;
unsigned int pq_sources;
- unsigned int timeout;
+ int timeout;
bool noverify;
bool norandom;
int alignment;
@@ -120,7 +125,10 @@ struct dmatest_params {
/**
* struct dmatest_info - test information.
* @params: test parameters
+ * @channels: channels under test
+ * @nr_channels: number of channels under test
* @lock: access protection to the fields of this structure
+ * @did_init: module has been initialized completely
*/
static struct dmatest_info {
/* Test parameters */
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index ff392c01bad1..ed430ad9b3dd 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -13,8 +13,9 @@
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/dma/edma.h>
-#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
@@ -322,7 +323,7 @@ static struct dma_async_tx_descriptor *
dw_edma_device_transfer(struct dw_edma_transfer *xfer)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
- enum dma_transfer_direction direction = xfer->direction;
+ enum dma_transfer_direction dir = xfer->direction;
phys_addr_t src_addr, dst_addr;
struct scatterlist *sg = NULL;
struct dw_edma_chunk *chunk;
@@ -331,10 +332,26 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
u32 cnt;
int i;
- if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) ||
- (direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ))
+ if (!chan->configured)
return NULL;
+ switch (chan->config.direction) {
+ case DMA_DEV_TO_MEM: /* local dma */
+ if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
+ break;
+ return NULL;
+ case DMA_MEM_TO_DEV: /* local dma */
+ if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
+ break;
+ return NULL;
+ default: /* remote dma */
+ if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
+ break;
+ if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
+ break;
+ return NULL;
+ }
+
if (xfer->cyclic) {
if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
return NULL;
@@ -343,9 +360,6 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
return NULL;
}
- if (!chan->configured)
- return NULL;
-
desc = dw_edma_alloc_desc(chan);
if (unlikely(!desc))
goto err_alloc;
@@ -386,7 +400,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
- if (direction == DMA_DEV_TO_MEM) {
+ if (chan->dir == EDMA_DIR_WRITE) {
burst->sar = src_addr;
if (xfer->cyclic) {
burst->dar = xfer->xfer.cyclic.paddr;
@@ -773,6 +787,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
u32 rd_mask = 1;
int i, err = 0;
u32 ch_cnt;
+ int irq;
ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
@@ -781,16 +796,16 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
if (dw->nr_irqs == 1) {
/* Common IRQ shared among all channels */
- err = request_irq(pci_irq_vector(to_pci_dev(dev), 0),
- dw_edma_interrupt_common,
+ irq = dw->ops->irq_vector(dev, 0);
+ err = request_irq(irq, dw_edma_interrupt_common,
IRQF_SHARED, dw->name, &dw->irq[0]);
if (err) {
dw->nr_irqs = 0;
return err;
}
- get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0),
- &dw->irq[0].msi);
+ if (irq_get_msi_desc(irq))
+ get_cached_msi_msg(irq, &dw->irq[0].msi);
} else {
/* Distribute IRQs equally among all channels */
int tmp = dw->nr_irqs;
@@ -804,7 +819,8 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
- err = request_irq(pci_irq_vector(to_pci_dev(dev), i),
+ irq = dw->ops->irq_vector(dev, i);
+ err = request_irq(irq,
i < *wr_alloc ?
dw_edma_interrupt_write :
dw_edma_interrupt_read,
@@ -815,8 +831,8 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
return err;
}
- get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i),
- &dw->irq[i].msi);
+ if (irq_get_msi_desc(irq))
+ get_cached_msi_msg(irq, &dw->irq[i].msi);
}
dw->nr_irqs = i;
@@ -827,12 +843,23 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
int dw_edma_probe(struct dw_edma_chip *chip)
{
- struct device *dev = chip->dev;
- struct dw_edma *dw = chip->dw;
+ struct device *dev;
+ struct dw_edma *dw;
u32 wr_alloc = 0;
u32 rd_alloc = 0;
int i, err;
+ if (!chip)
+ return -EINVAL;
+
+ dev = chip->dev;
+ if (!dev)
+ return -EINVAL;
+
+ dw = chip->dw;
+ if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
+ return -EINVAL;
+
raw_spin_lock_init(&dw->lock);
/* Find out how many write channels are supported by hardware */
@@ -884,7 +911,7 @@ int dw_edma_probe(struct dw_edma_chip *chip)
err_irq_free:
for (i = (dw->nr_irqs - 1); i >= 0; i--)
- free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]);
+ free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
dw->nr_irqs = 0;
@@ -904,7 +931,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
/* Free irqs */
for (i = (dw->nr_irqs - 1); i >= 0; i--)
- free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]);
+ free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
/* Power management */
pm_runtime_disable(dev);
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index 4e5f9f6e901b..31fc50d31792 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -103,6 +103,10 @@ struct dw_edma_irq {
struct dw_edma *dw;
};
+struct dw_edma_core_ops {
+ int (*irq_vector)(struct device *dev, unsigned int nr);
+};
+
struct dw_edma {
char name[20];
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index dc85f55e1bb8..1eafc602e17e 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -54,6 +54,15 @@ static const struct dw_edma_pcie_data snps_edda_data = {
.irqs = 1,
};
+static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
+{
+ return pci_irq_vector(to_pci_dev(dev), nr);
+}
+
+static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
+ .irq_vector = dw_edma_pcie_irq_vector,
+};
+
static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
@@ -151,6 +160,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
dw->version = pdata->version;
dw->mode = pdata->mode;
dw->nr_irqs = nr_irqs;
+ dw->ops = &dw_edma_pcie_core_ops;
/* Debug info */
pci_dbg(pdev, "Version:\t%u\n", dw->version);
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 3999827970ab..052dae5d6ddd 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1092,6 +1092,16 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
};
/* IDXD device attribs */
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%#x\n", idxd->hw.version);
+}
+static DEVICE_ATTR_RO(version);
+
static ssize_t max_work_queues_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1313,6 +1323,7 @@ static ssize_t cdev_major_show(struct device *dev,
static DEVICE_ATTR_RO(cdev_major);
static struct attribute *idxd_device_attributes[] = {
+ &dev_attr_version.attr,
&dev_attr_max_groups.attr,
&dev_attr_max_work_queues.attr,
&dev_attr_max_work_queues_size.attr,
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 4d4477df4ede..91774039ae5d 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -2063,7 +2063,7 @@ static int sdma_probe(struct platform_device *pdev)
/* initially no scripts available */
saddr_arr = (s32 *)sdma->script_addrs;
- for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
+ for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++)
saddr_arr[i] = -EINVAL;
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 18c011e57592..8ad0ad861c86 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -332,8 +332,8 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
u8 *pos;
off_t offs;
- chunk = idx / IOAT_DESCS_PER_2M;
- idx &= (IOAT_DESCS_PER_2M - 1);
+ chunk = idx / IOAT_DESCS_PER_CHUNK;
+ idx &= (IOAT_DESCS_PER_CHUNK - 1);
offs = idx * IOAT_DESC_SZ;
pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
phys = ioat_chan->descs[chunk].hw + offs;
@@ -370,7 +370,8 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
if (!ring)
return NULL;
- ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
+ chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
+ ioat_chan->desc_chunks = chunks;
for (i = 0; i < chunks; i++) {
struct ioat_descs *descs = &ioat_chan->descs[i];
@@ -382,8 +383,9 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
for (idx = 0; idx < i; idx++) {
descs = &ioat_chan->descs[idx];
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
- descs->virt, descs->hw);
+ dma_free_coherent(to_dev(ioat_chan),
+ IOAT_CHUNK_SIZE,
+ descs->virt, descs->hw);
descs->virt = NULL;
descs->hw = 0;
}
@@ -404,7 +406,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
dma_free_coherent(to_dev(ioat_chan),
- SZ_2M,
+ IOAT_CHUNK_SIZE,
ioat_chan->descs[idx].virt,
ioat_chan->descs[idx].hw);
ioat_chan->descs[idx].virt = NULL;
@@ -867,6 +869,23 @@ static void check_active(struct ioatdma_chan *ioat_chan)
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
+static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
+{
+ spin_lock_bh(&ioat_chan->prep_lock);
+ set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+
+ ioat_abort_descs(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Reset channel...\n");
+ ioat_reset_hw(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Restart channel...\n");
+ ioat_restart_channel(ioat_chan);
+
+ spin_lock_bh(&ioat_chan->prep_lock);
+ clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+}
+
void ioat_timer_event(struct timer_list *t)
{
struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
@@ -889,19 +908,7 @@ void ioat_timer_event(struct timer_list *t)
if (test_bit(IOAT_RUN, &ioat_chan->state)) {
spin_lock_bh(&ioat_chan->cleanup_lock);
- spin_lock_bh(&ioat_chan->prep_lock);
- set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
-
- ioat_abort_descs(ioat_chan);
- dev_warn(to_dev(ioat_chan), "Reset channel...\n");
- ioat_reset_hw(ioat_chan);
- dev_warn(to_dev(ioat_chan), "Restart channel...\n");
- ioat_restart_channel(ioat_chan);
-
- spin_lock_bh(&ioat_chan->prep_lock);
- clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
+ ioat_reboot_chan(ioat_chan);
spin_unlock_bh(&ioat_chan->cleanup_lock);
}
@@ -915,17 +922,23 @@ void ioat_timer_event(struct timer_list *t)
spin_lock_bh(&ioat_chan->prep_lock);
check_active(ioat_chan);
spin_unlock_bh(&ioat_chan->prep_lock);
- spin_unlock_bh(&ioat_chan->cleanup_lock);
- return;
+ goto unlock_out;
+ }
+
+ /* handle the missed cleanup case */
+ if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) {
+ /* timer restarted in ioat_cleanup_preamble
+ * and IOAT_COMPLETION_ACK cleared
+ */
+ __cleanup(ioat_chan, phys_complete);
+ goto unlock_out;
}
/* if we haven't made progress and we have already
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
- if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
- __cleanup(ioat_chan, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
+ if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
u32 chanerr;
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -937,25 +950,23 @@ void ioat_timer_event(struct timer_list *t)
dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
ioat_ring_active(ioat_chan));
- spin_lock_bh(&ioat_chan->prep_lock);
- set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
+ ioat_reboot_chan(ioat_chan);
- ioat_abort_descs(ioat_chan);
- dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
- ioat_reset_hw(ioat_chan);
- dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
- ioat_restart_channel(ioat_chan);
+ goto unlock_out;
+ }
+ /* handle missed issue pending case */
+ if (ioat_ring_pending(ioat_chan)) {
+ dev_warn(to_dev(ioat_chan),
+ "Completion timeout with pending descriptors\n");
spin_lock_bh(&ioat_chan->prep_lock);
- clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ __ioat_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->prep_lock);
- spin_unlock_bh(&ioat_chan->cleanup_lock);
- return;
- } else
- set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
+ }
+ set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+unlock_out:
spin_unlock_bh(&ioat_chan->cleanup_lock);
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index b8e8e0b9693c..e6b622e1ba92 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -81,6 +81,11 @@ struct ioatdma_device {
u32 msixpba;
};
+#define IOAT_MAX_ORDER 16
+#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
+#define IOAT_CHUNK_SIZE (SZ_512K)
+#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
+
struct ioat_descs {
void *virt;
dma_addr_t hw;
@@ -128,7 +133,7 @@ struct ioatdma_chan {
u16 produce;
struct ioat_ring_ent **ring;
spinlock_t prep_lock;
- struct ioat_descs descs[2];
+ struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
int desc_chunks;
int intr_coalesce;
int prev_intr_coalesce;
@@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err)
return !!err;
}
-#define IOAT_MAX_ORDER 16
-#define IOAT_MAX_DESCS 65536
-#define IOAT_DESCS_PER_2M 32768
static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
{
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 60e9afbb896c..58d13564f88b 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
}
for (i = 0; i < ioat_chan->desc_chunks; i++) {
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
ioat_chan->descs[i].virt,
ioat_chan->descs[i].hw);
ioat_chan->descs[i].virt = NULL;
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index d683232d7fea..dbc6a48424fa 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -235,7 +235,7 @@ static int mmp_tdma_config_chan(struct dma_chan *chan)
tdcr |= TDCR_BURSTSZ_128B;
break;
default:
- dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
+ dev_err(tdmac->dev, "unknown burst size.\n");
return -EINVAL;
}
@@ -250,7 +250,7 @@ static int mmp_tdma_config_chan(struct dma_chan *chan)
tdcr |= TDCR_SSZ_32_BITS;
break;
default:
- dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n");
+ dev_err(tdmac->dev, "unknown bus size.\n");
return -EINVAL;
}
} else if (tdmac->type == PXA910_SQU) {
@@ -276,7 +276,7 @@ static int mmp_tdma_config_chan(struct dma_chan *chan)
tdcr |= TDCR_BURSTSZ_SQU_32B;
break;
default:
- dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
+ dev_err(tdmac->dev, "unknown burst size.\n");
return -EINVAL;
}
}
@@ -429,8 +429,15 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
int num_periods = buf_len / period_len;
int i = 0, buf = 0;
- if (tdmac->status != DMA_COMPLETE)
+ if (!is_slave_direction(direction)) {
+ dev_err(tdmac->dev, "unsupported transfer direction\n");
return NULL;
+ }
+
+ if (tdmac->status != DMA_COMPLETE) {
+ dev_err(tdmac->dev, "controller busy");
+ return NULL;
+ }
if (period_len > TDMA_MAX_XFER_BYTES) {
dev_err(tdmac->dev,
@@ -704,6 +711,17 @@ static int mmp_tdma_probe(struct platform_device *pdev)
tdev->device.device_terminate_all = mmp_tdma_terminate_all;
tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
+ tdev->device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ if (type == MMP_AUD_TDMA) {
+ tdev->device.max_burst = SZ_128;
+ tdev->device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ tdev->device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ } else if (type == PXA910_SQU) {
+ tdev->device.max_burst = SZ_32;
+ }
+ tdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ tdev->device.descriptor_reuse = true;
+
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
platform_set_drvdata(pdev, tdev);
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index e04499c1f27f..4ab493d46375 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -568,7 +568,7 @@ static int moxart_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct resource *res;
- static void __iomem *dma_base_addr;
+ void __iomem *dma_base_addr;
int ret, i;
unsigned int irq;
struct moxart_chan *ch;
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index ef73f65224b1..5a08dd0d3388 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -74,7 +74,7 @@ struct bam_async_desc {
struct list_head desc_node;
enum dma_transfer_direction dir;
size_t length;
- struct bam_desc_hw desc[0];
+ struct bam_desc_hw desc[];
};
enum bam_reg {
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 411f91fde734..0a6d3ea08c78 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -550,7 +550,7 @@ static void hidma_free_chan_resources(struct dma_chan *dmach)
kfree(mdesc);
}
- mchan->allocated = 0;
+ mchan->allocated = false;
spin_unlock_irqrestore(&mchan->lock, irqflags);
}
@@ -897,7 +897,6 @@ uninit:
if (msi)
hidma_free_msis(dmadev);
- hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
dmafree:
if (dmadev)
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 6d0bec947636..5c118c7e02bd 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -506,11 +506,11 @@ static int sf_pdma_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdma->membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pdma->membase))
- goto ERR_MEMBASE;
+ return PTR_ERR(pdma->membase);
ret = sf_pdma_irq_init(pdev, pdma);
if (ret)
- goto ERR_INITIRQ;
+ return ret;
sf_pdma_setup_chans(pdma);
@@ -544,24 +544,13 @@ static int sf_pdma_probe(struct platform_device *pdev)
"Failed to set DMA mask. Fall back to default.\n");
ret = dma_async_device_register(&pdma->dma_dev);
- if (ret)
- goto ERR_REG_DMADEVICE;
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't register SiFive Platform DMA. (%d)\n", ret);
+ return ret;
+ }
return 0;
-
-ERR_MEMBASE:
- devm_kfree(&pdev->dev, pdma);
- return PTR_ERR(pdma->membase);
-
-ERR_INITIRQ:
- devm_kfree(&pdev->dev, pdma);
- return ret;
-
-ERR_REG_DMADEVICE:
- devm_kfree(&pdev->dev, pdma);
- dev_err(&pdev->dev,
- "Can't register SiFive Platform DMA. (%d)\n", ret);
- return ret;
}
static int sf_pdma_remove(struct platform_device *pdev)
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 0ddbaa4b4f0b..96ad1b3d24c6 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -117,6 +117,7 @@
#define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01
#define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02
#define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
+#define STM32_DMA_FIFO_THRESHOLD_NONE 0x04
#define STM32_DMA_MAX_DATA_ITEMS 0xffff
/*
@@ -136,6 +137,9 @@
/* DMA Features */
#define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
+#define STM32_DMA_DIRECT_MODE_MASK BIT(2)
+#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) \
+ >> 2)
enum stm32_dma_width {
STM32_DMA_BYTE,
@@ -281,6 +285,9 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
{
u32 remaining;
+ if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
+ return false;
+
if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) {
if (burst != 0) {
/*
@@ -302,6 +309,10 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
{
+ /* If FIFO direct mode, burst is not possible */
+ if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
+ return false;
+
/*
* Buffer or period length has to be aligned on FIFO depth.
* Otherwise bytes may be stuck within FIFO at buffer or period
@@ -657,6 +668,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
}
}
+ if (status & STM32_DMA_DMEI) {
+ stm32_dma_irq_clear(chan, STM32_DMA_DMEI);
+ status &= ~STM32_DMA_DMEI;
+ if (sfcr & STM32_DMA_SCR_DMEIE)
+ dev_dbg(chan2dev(chan), "Direct mode overrun\n");
+ }
if (status) {
stm32_dma_irq_clear(chan, status);
dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
@@ -692,13 +709,13 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
int src_bus_width, dst_bus_width;
int src_burst_size, dst_burst_size;
u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
- u32 dma_scr, threshold;
+ u32 dma_scr, fifoth;
src_addr_width = chan->dma_sconfig.src_addr_width;
dst_addr_width = chan->dma_sconfig.dst_addr_width;
src_maxburst = chan->dma_sconfig.src_maxburst;
dst_maxburst = chan->dma_sconfig.dst_maxburst;
- threshold = chan->threshold;
+ fifoth = chan->threshold;
switch (direction) {
case DMA_MEM_TO_DEV:
@@ -710,7 +727,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
/* Set device burst size */
dst_best_burst = stm32_dma_get_best_burst(buf_len,
dst_maxburst,
- threshold,
+ fifoth,
dst_addr_width);
dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
@@ -718,7 +735,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
return dst_burst_size;
/* Set memory data size */
- src_addr_width = stm32_dma_get_max_width(buf_len, threshold);
+ src_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
chan->mem_width = src_addr_width;
src_bus_width = stm32_dma_get_width(chan, src_addr_width);
if (src_bus_width < 0)
@@ -728,7 +745,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
src_maxburst = STM32_DMA_MAX_BURST;
src_best_burst = stm32_dma_get_best_burst(buf_len,
src_maxburst,
- threshold,
+ fifoth,
src_addr_width);
src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
if (src_burst_size < 0)
@@ -742,7 +759,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
/* Set FIFO threshold */
chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
- chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
+ if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
+ chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
/* Set peripheral address */
chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
@@ -758,7 +776,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
/* Set device burst size */
src_best_burst = stm32_dma_get_best_burst(buf_len,
src_maxburst,
- threshold,
+ fifoth,
src_addr_width);
chan->mem_burst = src_best_burst;
src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
@@ -766,7 +784,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
return src_burst_size;
/* Set memory data size */
- dst_addr_width = stm32_dma_get_max_width(buf_len, threshold);
+ dst_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
chan->mem_width = dst_addr_width;
dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
if (dst_bus_width < 0)
@@ -776,7 +794,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
dst_maxburst = STM32_DMA_MAX_BURST;
dst_best_burst = stm32_dma_get_best_burst(buf_len,
dst_maxburst,
- threshold,
+ fifoth,
dst_addr_width);
chan->mem_burst = dst_best_burst;
dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
@@ -791,7 +809,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
/* Set FIFO threshold */
chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
- chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
+ if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
+ chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
/* Set peripheral address */
chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
@@ -1216,6 +1235,8 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan,
chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
+ if (STM32_DMA_DIRECT_MODE_GET(cfg->features))
+ chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
}
static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
index f76e06651f80..79618fac119a 100644
--- a/drivers/dma/ti/Kconfig
+++ b/drivers/dma/ti/Kconfig
@@ -36,7 +36,7 @@ config DMA_OMAP
config TI_K3_UDMA
bool "Texas Instruments UDMA support"
- depends on ARCH_K3 || COMPILE_TEST
+ depends on ARCH_K3
depends on TI_SCI_PROTOCOL
depends on TI_SCI_INTA_IRQCHIP
select DMA_ENGINE
@@ -49,7 +49,7 @@ config TI_K3_UDMA
config TI_K3_UDMA_GLUE_LAYER
bool "Texas Instruments UDMA Glue layer for non DMAengine users"
- depends on ARCH_K3 || COMPILE_TEST
+ depends on ARCH_K3
depends on TI_K3_UDMA
help
Say y here to support the K3 NAVSS DMA glue interface
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index a90e154b0ae0..945b7c604f91 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -231,7 +231,6 @@ struct udma_chan {
struct udma_tx_drain tx_drain;
u32 bcnt; /* number of bytes completed since the start of the channel */
- u32 in_ring_cnt; /* number of descriptors in flight */
/* Channel configuration parameters */
struct udma_chan_config config;
@@ -574,7 +573,6 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx)
struct udma_desc *d = uc->desc;
struct k3_ring *ring = NULL;
dma_addr_t paddr;
- int ret;
switch (uc->config.dir) {
case DMA_DEV_TO_MEM:
@@ -598,11 +596,7 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx)
udma_sync_for_device(uc, idx);
}
- ret = k3_ringacc_ring_push(ring, &paddr);
- if (!ret)
- uc->in_ring_cnt++;
-
- return ret;
+ return k3_ringacc_ring_push(ring, &paddr);
}
static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
@@ -655,9 +649,6 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
d->hwdesc[0].cppi5_desc_size,
DMA_FROM_DEVICE);
rmb(); /* Ensure that reads are not moved before this point */
-
- if (!ret)
- uc->in_ring_cnt--;
}
return ret;
@@ -697,8 +688,6 @@ static void udma_reset_rings(struct udma_chan *uc)
udma_desc_free(&uc->terminated_desc->vd);
uc->terminated_desc = NULL;
}
-
- uc->in_ring_cnt = 0;
}
static void udma_reset_counters(struct udma_chan *uc)
@@ -1073,9 +1062,6 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
/* Teardown completion message */
if (cppi5_desc_is_tdcm(paddr)) {
- /* Compensate our internal pop/push counter */
- uc->in_ring_cnt++;
-
complete_all(&uc->teardown_completed);
if (uc->terminated_desc) {
@@ -1291,10 +1277,8 @@ static int udma_get_tchan(struct udma_chan *uc)
}
uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
- if (IS_ERR(uc->tchan))
- return PTR_ERR(uc->tchan);
- return 0;
+ return PTR_ERR_OR_ZERO(uc->tchan);
}
static int udma_get_rchan(struct udma_chan *uc)
@@ -1308,10 +1292,8 @@ static int udma_get_rchan(struct udma_chan *uc)
}
uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
- if (IS_ERR(uc->rchan))
- return PTR_ERR(uc->rchan);
- return 0;
+ return PTR_ERR_OR_ZERO(uc->rchan);
}
static int udma_get_chan_pair(struct udma_chan *uc)
@@ -1373,10 +1355,8 @@ static int udma_get_rflow(struct udma_chan *uc, int flow_id)
}
uc->rflow = __udma_get_rflow(ud, flow_id);
- if (IS_ERR(uc->rflow))
- return PTR_ERR(uc->rflow);
- return 0;
+ return PTR_ERR_OR_ZERO(uc->rflow);
}
static void udma_put_rchan(struct udma_chan *uc)
@@ -1870,6 +1850,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
udma_stop(uc);
if (udma_is_chan_running(uc)) {
dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ ret = -EBUSY;
goto err_res_free;
}
}
@@ -3189,7 +3170,7 @@ static struct udma_match_data am654_main_data = {
static struct udma_match_data am654_mcu_data = {
.psil_base = 0x6000,
- .enable_memcpy_support = true, /* TEST: DMA domains */
+ .enable_memcpy_support = false,
.statictr_z_mask = GENMASK(11, 0),
.rchan_oes_offset = 0x2000,
.tpl_levels = 2,
@@ -3471,6 +3452,9 @@ static int udma_setup_rx_flush(struct udma_dev *ud)
tr_req->icnt0 = rx_flush->buffer_size;
tr_req->icnt1 = 1;
+ dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
+ hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
+
/* Set up descriptor to be used for packet mode */
hwdesc = &rx_flush->hwdescs[1];
hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index f91f3bc1e0b2..9cf7cc1f3f72 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3403,7 +3403,7 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
static int hw_info_get(struct amd64_pvt *pvt)
{
u16 pci_id1, pci_id2;
- int ret = -EINVAL;
+ int ret;
if (pvt->fam >= 0x17) {
pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
index 93c82bc17493..169353710982 100644
--- a/drivers/edac/amd8131_edac.c
+++ b/drivers/edac/amd8131_edac.c
@@ -44,14 +44,6 @@ static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
" PCI Access Write Error at 0x%x\n", reg);
}
-static char * const bridge_str[] = {
- [NORTH_A] = "NORTH A",
- [NORTH_B] = "NORTH B",
- [SOUTH_A] = "SOUTH A",
- [SOUTH_B] = "SOUTH B",
- [NO_BRIDGE] = "NO BRIDGE",
-};
-
/* Support up to two AMD8131 chipsets on a platform */
static struct amd8131_dev_info amd8131_devices[] = {
{
diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c
index a7502ebe9bdc..e3e757513d1b 100644
--- a/drivers/edac/armada_xp_edac.c
+++ b/drivers/edac/armada_xp_edac.c
@@ -78,7 +78,7 @@ struct axp_mc_drvdata {
char msg[128];
};
-/* derived from "DRAM Address Multiplexing" in the ARAMDA XP Functional Spec */
+/* derived from "DRAM Address Multiplexing" in the ARMADA XP Functional Spec */
static uint32_t axp_mc_calc_address(struct axp_mc_drvdata *drvdata,
uint8_t cs, uint8_t bank, uint16_t row,
uint16_t col)
@@ -160,12 +160,12 @@ static void axp_mc_check(struct mem_ctl_info *mci)
if (cnt_sbe)
cnt_sbe--;
else
- dev_warn(mci->pdev, "inconsistent SBE count detected");
+ dev_warn(mci->pdev, "inconsistent SBE count detected\n");
} else {
if (cnt_dbe)
cnt_dbe--;
else
- dev_warn(mci->pdev, "inconsistent DBE count detected");
+ dev_warn(mci->pdev, "inconsistent DBE count detected\n");
}
/* report earlier errors */
@@ -304,7 +304,7 @@ static int axp_mc_probe(struct platform_device *pdev)
config = readl(base + SDRAM_CONFIG_REG);
if (!(config & SDRAM_CONFIG_ECC_MASK)) {
- dev_warn(&pdev->dev, "SDRAM ECC is not enabled");
+ dev_warn(&pdev->dev, "SDRAM ECC is not enabled\n");
return -EINVAL;
}
@@ -532,9 +532,9 @@ static int aurora_l2_probe(struct platform_device *pdev)
l2x0_aux_ctrl = readl(base + L2X0_AUX_CTRL);
if (!(l2x0_aux_ctrl & AURORA_ACR_PARITY_EN))
- dev_warn(&pdev->dev, "tag parity is not enabled");
+ dev_warn(&pdev->dev, "tag parity is not enabled\n");
if (!(l2x0_aux_ctrl & AURORA_ACR_ECC_EN))
- dev_warn(&pdev->dev, "data ECC is not enabled");
+ dev_warn(&pdev->dev, "data ECC is not enabled\n");
dci = edac_device_alloc_ctl_info(sizeof(*drvdata),
"cpu", 1, "L", 1, 2, NULL, 0, 0);
@@ -618,7 +618,7 @@ static int __init armada_xp_edac_init(void)
res = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (res)
- pr_warn("Aramda XP EDAC drivers fail to register\n");
+ pr_warn("Armada XP EDAC drivers fail to register\n");
return 0;
}
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index df08de963d10..9b0044cd21cd 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -122,10 +122,22 @@ static int i10nm_get_all_munits(void)
return 0;
}
+static struct res_config i10nm_cfg0 = {
+ .type = I10NM,
+ .decs_did = 0x3452,
+ .busno_cfg_offset = 0xcc,
+};
+
+static struct res_config i10nm_cfg1 = {
+ .type = I10NM,
+ .decs_did = 0x3452,
+ .busno_cfg_offset = 0xd0,
+};
+
static const struct x86_cpu_id i10nm_cpuids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &i10nm_cfg0),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &i10nm_cfg0),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &i10nm_cfg1),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
@@ -161,7 +173,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci)
mtr, mcddrtcfg, imc->mc, i, j);
if (IS_DIMM_PRESENT(mtr))
- ndimms += skx_get_dimm_info(mtr, 0, dimm,
+ ndimms += skx_get_dimm_info(mtr, 0, 0, dimm,
imc, i, j);
else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
@@ -234,6 +246,7 @@ static int __init i10nm_init(void)
{
u8 mc = 0, src_id = 0, node_id = 0;
const struct x86_cpu_id *id;
+ struct res_config *cfg;
const char *owner;
struct skx_dev *d;
int rc, i, off[3] = {0xd0, 0xc8, 0xcc};
@@ -249,11 +262,17 @@ static int __init i10nm_init(void)
if (!id)
return -ENODEV;
+ cfg = (struct res_config *)id->driver_data;
+
+ /* Newer steppings have different offset for ATOM_TREMONT_D/ICELAKE_X */
+ if (boot_cpu_data.x86_stepping >= 4)
+ cfg->busno_cfg_offset = 0xd0;
+
rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
if (rc)
return rc;
- rc = skx_get_all_bus_mappings(0x3452, 0xcc, I10NM, &i10nm_edac_list);
+ rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list);
if (rc < 0)
goto fail;
if (rc == 0) {
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 46a3a3440f5e..b907a0f4ece6 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -157,33 +157,35 @@ fail:
return -ENODEV;
}
+static struct res_config skx_cfg = {
+ .type = SKX,
+ .decs_did = 0x2016,
+ .busno_cfg_offset = 0xcc,
+};
+
static const struct x86_cpu_id skx_cpuids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_cfg),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
-#define SKX_GET_MTMTR(dev, reg) \
- pci_read_config_dword((dev), 0x87c, &(reg))
-
-static bool skx_check_ecc(struct pci_dev *pdev)
+static bool skx_check_ecc(u32 mcmtr)
{
- u32 mtmtr;
-
- SKX_GET_MTMTR(pdev, mtmtr);
-
- return !!GET_BITFIELD(mtmtr, 2, 2);
+ return !!GET_BITFIELD(mcmtr, 2, 2);
}
static int skx_get_dimm_config(struct mem_ctl_info *mci)
{
struct skx_pvt *pvt = mci->pvt_info;
+ u32 mtr, mcmtr, amap, mcddrtcfg;
struct skx_imc *imc = pvt->imc;
- u32 mtr, amap, mcddrtcfg;
struct dimm_info *dimm;
int i, j;
int ndimms;
+ /* Only the mcmtr on the first channel is effective */
+ pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
+
for (i = 0; i < SKX_NUM_CHANNELS; i++) {
ndimms = 0;
pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
@@ -193,14 +195,14 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci)
pci_read_config_dword(imc->chan[i].cdev,
0x80 + 4 * j, &mtr);
if (IS_DIMM_PRESENT(mtr)) {
- ndimms += skx_get_dimm_info(mtr, amap, dimm, imc, i, j);
+ ndimms += skx_get_dimm_info(mtr, mcmtr, amap, dimm, imc, i, j);
} else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) {
ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
EDAC_MOD_STR);
nvdimm_count++;
}
}
- if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) {
+ if (ndimms && !skx_check_ecc(mcmtr)) {
skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
return -ENODEV;
}
@@ -641,6 +643,7 @@ static inline void teardown_skx_debug(void) {}
static int __init skx_init(void)
{
const struct x86_cpu_id *id;
+ struct res_config *cfg;
const struct munit *m;
const char *owner;
int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8};
@@ -657,11 +660,13 @@ static int __init skx_init(void)
if (!id)
return -ENODEV;
+ cfg = (struct res_config *)id->driver_data;
+
rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm);
if (rc)
return rc;
- rc = skx_get_all_bus_mappings(0x2016, 0xcc, SKX, &skx_edac_list);
+ rc = skx_get_all_bus_mappings(cfg, &skx_edac_list);
if (rc < 0)
goto fail;
if (rc == 0) {
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 99bbaf629b8d..46be1a77bd1d 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -197,12 +197,11 @@ static int get_width(u32 mtr)
}
/*
- * We use the per-socket device @did to count how many sockets are present,
+ * We use the per-socket device @cfg->did to count how many sockets are present,
* and to detemine which PCI buses are associated with each socket. Allocate
* and build the full list of all the skx_dev structures that we need here.
*/
-int skx_get_all_bus_mappings(unsigned int did, int off, enum type type,
- struct list_head **list)
+int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
{
struct pci_dev *pdev, *prev;
struct skx_dev *d;
@@ -211,7 +210,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type type,
prev = NULL;
for (;;) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, prev);
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, cfg->decs_did, prev);
if (!pdev)
break;
ndev++;
@@ -221,7 +220,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type type,
return -ENOMEM;
}
- if (pci_read_config_dword(pdev, off, &reg)) {
+ if (pci_read_config_dword(pdev, cfg->busno_cfg_offset, &reg)) {
kfree(d);
pci_dev_put(pdev);
skx_printk(KERN_ERR, "Failed to read bus idx\n");
@@ -230,7 +229,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type type,
d->bus[0] = GET_BITFIELD(reg, 0, 7);
d->bus[1] = GET_BITFIELD(reg, 8, 15);
- if (type == SKX) {
+ if (cfg->type == SKX) {
d->seg = pci_domain_nr(pdev->bus);
d->bus[2] = GET_BITFIELD(reg, 16, 23);
d->bus[3] = GET_BITFIELD(reg, 24, 31);
@@ -304,7 +303,7 @@ static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
#define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows")
#define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols")
-int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
+int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
struct skx_imc *imc, int chan, int dimmno)
{
int banks = 16, ranks, rows, cols, npages;
@@ -324,8 +323,8 @@ int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
imc->mc, chan, dimmno, size, npages,
banks, 1 << ranks, rows, cols);
- imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
- imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
+ imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mcmtr, 0, 0);
+ imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mcmtr, 9, 9);
imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
imc->chan[chan].dimms[dimmno].rowbits = rows;
imc->chan[chan].dimms[dimmno].colbits = cols;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 60d1ea669afd..78f8c1de0b71 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -112,6 +112,14 @@ struct decoded_addr {
int bank_group;
};
+struct res_config {
+ enum type type;
+ /* Configuration agent device ID */
+ unsigned int decs_did;
+ /* Default bus number configuration register offset */
+ int busno_cfg_offset;
+};
+
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci);
typedef bool (*skx_decode_f)(struct decoded_addr *res);
typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len);
@@ -123,12 +131,11 @@ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
int skx_get_node_id(struct skx_dev *d, u8 *id);
-int skx_get_all_bus_mappings(unsigned int did, int off, enum type,
- struct list_head **list);
+int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list);
int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm);
-int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
+int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
struct skx_imc *imc, int chan, int dimmno);
int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 34be60fe6892..4af9744cc6d0 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -1278,7 +1278,7 @@ OCX_DEBUGFS_ATTR(lne23_badcnt, OCX_LNE_BAD_CNT(23));
OCX_DEBUGFS_ATTR(com_int, OCX_COM_INT_W1S);
-struct debugfs_entry *ocx_dfs_ents[] = {
+static struct debugfs_entry *ocx_dfs_ents[] = {
&debugfs_tlk0_ecc_ctl,
&debugfs_tlk1_ecc_ctl,
&debugfs_tlk2_ecc_ctl,
@@ -1919,19 +1919,19 @@ err_free:
L2C_DEBUGFS_ATTR(tad_int, L2C_TAD_INT_W1S);
-struct debugfs_entry *l2c_tad_dfs_ents[] = {
+static struct debugfs_entry *l2c_tad_dfs_ents[] = {
&debugfs_tad_int,
};
L2C_DEBUGFS_ATTR(cbc_int, L2C_CBC_INT_W1S);
-struct debugfs_entry *l2c_cbc_dfs_ents[] = {
+static struct debugfs_entry *l2c_cbc_dfs_ents[] = {
&debugfs_cbc_int,
};
L2C_DEBUGFS_ATTR(mci_int, L2C_MCI_INT_W1S);
-struct debugfs_entry *l2c_mci_dfs_ents[] = {
+static struct debugfs_entry *l2c_mci_dfs_ents[] = {
&debugfs_mci_int,
};
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index e4a1032ba0b5..1d2c27a00a4a 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1349,7 +1349,6 @@ static int xgene_edac_l3_remove(struct xgene_edac_dev_ctx *l3)
#define WORD_ALIGNED_ERR_MASK BIT(28)
#define PAGE_ACCESS_ERR_MASK BIT(27)
#define WRITE_ACCESS_MASK BIT(26)
-#define RBERRADDR_RD(src) ((src) & 0x03FFFFFF)
static const char * const soc_mem_err_v1[] = {
"10GbE0",
@@ -1483,13 +1482,11 @@ static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
return;
if (reg & STICKYERR_MASK) {
bool write;
- u32 address;
dev_err(edac_dev->dev, "IOB bus access error(s)\n");
if (regmap_read(ctx->edac->rb_map, RBEIR, &reg))
return;
write = reg & WRITE_ACCESS_MASK ? 1 : 0;
- address = RBERRADDR_RD(reg);
if (reg & AGENT_OFFLINE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to offline agent error\n",
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index ad02dc6747a4..0317b614b680 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -124,7 +124,7 @@ static int adc_jack_probe(struct platform_device *pdev)
for (i = 0; data->adc_conditions[i].id != EXTCON_NONE; i++);
data->num_conditions = i;
- data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel);
+ data->chan = devm_iio_channel_get(&pdev->dev, pdata->consumer_channel);
if (IS_ERR(data->chan))
return PTR_ERR(data->chan);
@@ -164,7 +164,6 @@ static int adc_jack_remove(struct platform_device *pdev)
free_irq(data->irq, data);
cancel_work_sync(&data->handler.work);
- iio_channel_release(data->chan);
return 0;
}
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 7401733db08b..aae82db542a5 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1460,7 +1460,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (!info->input) {
dev_err(arizona->dev, "Can't allocate input dev\n");
ret = -ENOMEM;
- goto err_register;
+ return ret;
}
info->input->name = "Headset";
@@ -1492,7 +1492,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
pdata->micd_pol_gpio, ret);
- goto err_register;
+ return ret;
}
info->micd_pol_gpio = gpio_to_desc(pdata->micd_pol_gpio);
@@ -1515,7 +1515,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
dev_err(arizona->dev,
"Failed to get microphone polarity GPIO: %d\n",
ret);
- goto err_register;
+ return ret;
}
}
@@ -1672,7 +1672,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
ret);
- goto err_gpio;
+ goto err_pm;
}
ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
@@ -1721,14 +1721,14 @@ static int arizona_extcon_probe(struct platform_device *pdev)
dev_warn(arizona->dev, "Failed to set MICVDD to bypass: %d\n",
ret);
- pm_runtime_put(&pdev->dev);
-
ret = input_register_device(info->input);
if (ret) {
dev_err(&pdev->dev, "Can't register input device: %d\n", ret);
goto err_hpdet;
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
err_hpdet:
@@ -1743,10 +1743,11 @@ err_rise_wake:
arizona_set_irq_wake(arizona, jack_irq_rise, 0);
err_rise:
arizona_free_irq(arizona, jack_irq_rise, info);
+err_pm:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
err_gpio:
gpiod_put(info->micd_pol_gpio);
-err_register:
- pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index 32f663436e6e..cc47d626095c 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -782,9 +782,19 @@ static const struct platform_device_id max14577_muic_id[] = {
};
MODULE_DEVICE_TABLE(platform, max14577_muic_id);
+static const struct of_device_id of_max14577_muic_dt_match[] = {
+ { .compatible = "maxim,max14577-muic",
+ .data = (void *)MAXIM_DEVICE_TYPE_MAX14577, },
+ { .compatible = "maxim,max77836-muic",
+ .data = (void *)MAXIM_DEVICE_TYPE_MAX77836, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_max14577_muic_dt_match);
+
static struct platform_driver max14577_muic_driver = {
.driver = {
.name = "max14577-muic",
+ .of_match_table = of_max14577_muic_dt_match,
},
.probe = max14577_muic_probe,
.remove = max14577_muic_remove,
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 2dfbfec572f9..0a6438cbb3f3 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -900,7 +900,7 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
{
unsigned long flags;
- int ret, idx = -EINVAL;
+ int ret, idx;
if (!edev || !nb)
return -EINVAL;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 6e291d8f3a27..c7ea4f2d5ca6 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1081,8 +1081,6 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
return -EINVAL;
p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
- if (!access_ok(p, a->size))
- return -EFAULT;
end = (void __user *)p + a->size;
count = 0;
@@ -1120,7 +1118,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
&p->header[transmit_header_bytes / 4];
if (next > end)
return -EINVAL;
- if (__copy_from_user
+ if (copy_from_user
(u.packet.header, p->header, transmit_header_bytes))
return -EFAULT;
if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 8007d4aa76dc..fbd785dd0513 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -178,8 +178,9 @@ config ISCSI_IBFT
Otherwise, say N.
config RASPBERRYPI_FIRMWARE
- tristate "Raspberry Pi Firmware Driver"
+ bool "Raspberry Pi Firmware Driver"
depends on BCM2835_MBOX
+ default USB_PCI
help
This option enables support for communicating with the firmware on the
Raspberry Pi.
@@ -295,15 +296,13 @@ config TURRIS_MOX_RWTM
other manufacturing data and also utilize the Entropy Bit Generator
for hardware random number generation.
-config HAVE_ARM_SMCCC
- bool
-
-source "drivers/firmware/psci/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
source "drivers/firmware/imx/Kconfig"
source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/psci/Kconfig"
+source "drivers/firmware/smccc/Kconfig"
source "drivers/firmware/tegra/Kconfig"
source "drivers/firmware/xilinx/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index e9fb838af4df..99510be9f5ed 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -23,12 +23,13 @@ obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
-obj-y += psci/
obj-y += broadcom/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
obj-$(CONFIG_UEFI_CPER) += efi/
obj-y += imx/
+obj-y += psci/
+obj-y += smccc/
obj-y += tegra/
obj-y += xilinx/
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 6694d0d908d6..1cad32b38b29 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -2,6 +2,8 @@
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o
scmi-bus-y = bus.o
scmi-driver-y = driver.o
-scmi-transport-y = mailbox.o shmem.o
+scmi-transport-y = shmem.o
+scmi-transport-$(CONFIG_MAILBOX) += mailbox.o
+scmi-transport-$(CONFIG_ARM_PSCI_FW) += smc.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index f804e8af6521..ce7d9203e41b 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -14,6 +14,13 @@ enum scmi_base_protocol_cmd {
BASE_DISCOVER_LIST_PROTOCOLS = 0x6,
BASE_DISCOVER_AGENT = 0x7,
BASE_NOTIFY_ERRORS = 0x8,
+ BASE_SET_DEVICE_PERMISSIONS = 0x9,
+ BASE_SET_PROTOCOL_PERMISSIONS = 0xa,
+ BASE_RESET_AGENT_CONFIGURATION = 0xb,
+};
+
+enum scmi_base_protocol_notify {
+ BASE_ERROR_EVENT = 0x0,
};
struct scmi_msg_resp_base_attributes {
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 5ac06469b01c..31fe5a22a011 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -178,6 +178,8 @@ struct scmi_chan_info {
* @send_message: Callback to send a message
* @mark_txdone: Callback to mark tx as done
* @fetch_response: Callback to fetch response
+ * @fetch_notification: Callback to fetch notification
+ * @clear_channel: Callback to clear a channel
* @poll_done: Callback to poll transfer status
*/
struct scmi_transport_ops {
@@ -190,6 +192,9 @@ struct scmi_transport_ops {
void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
void (*fetch_response)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
+ void (*fetch_notification)(struct scmi_chan_info *cinfo,
+ size_t max_len, struct scmi_xfer *xfer);
+ void (*clear_channel)(struct scmi_chan_info *cinfo);
bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
};
@@ -210,6 +215,9 @@ struct scmi_desc {
};
extern const struct scmi_desc scmi_mailbox_desc;
+#ifdef CONFIG_HAVE_ARM_SMCCC
+extern const struct scmi_desc scmi_smc_desc;
+#endif
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
@@ -222,5 +230,8 @@ void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
+void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer);
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index dbec767222e9..7483cacf63f9 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -76,6 +76,7 @@ struct scmi_xfers_info {
* implementation version and (sub-)vendor identification.
* @handle: Instance of SCMI handle to send to clients
* @tx_minfo: Universal Transmit Message management info
+ * @rx_minfo: Universal Receive Message management info
* @tx_idr: IDR object to map protocol id to Tx channel info pointer
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
* @protocols_imp: List of protocols implemented, currently maximum of
@@ -89,6 +90,7 @@ struct scmi_info {
struct scmi_revision_info version;
struct scmi_handle handle;
struct scmi_xfers_info tx_minfo;
+ struct scmi_xfers_info rx_minfo;
struct idr tx_idr;
struct idr rx_idr;
u8 *protocols_imp;
@@ -200,37 +202,66 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
}
-/**
- * scmi_rx_callback() - callback for receiving messages
- *
- * @cinfo: SCMI channel info
- * @msg_hdr: Message header
- *
- * Processes one received message to appropriate transfer information and
- * signals completion of the transfer.
- *
- * NOTE: This function will be invoked in IRQ context, hence should be
- * as optimal as possible.
- */
-void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
+static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
{
- struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
- struct scmi_xfers_info *minfo = &info->tx_minfo;
- u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
- u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
- struct device *dev = cinfo->dev;
struct scmi_xfer *xfer;
+ struct device *dev = cinfo->dev;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->rx_minfo;
- if (msg_type == MSG_TYPE_NOTIFICATION)
- return; /* Notifications not yet supported */
+ xfer = scmi_xfer_get(cinfo->handle, minfo);
+ if (IS_ERR(xfer)) {
+ dev_err(dev, "failed to get free message slot (%ld)\n",
+ PTR_ERR(xfer));
+ info->desc->ops->clear_channel(cinfo);
+ return;
+ }
+
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ scmi_dump_header_dbg(dev, &xfer->hdr);
+ info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
+ xfer);
+
+ trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ MSG_TYPE_NOTIFICATION);
+
+ __scmi_xfer_put(minfo, xfer);
+
+ info->desc->ops->clear_channel(cinfo);
+}
+
+static void scmi_handle_response(struct scmi_chan_info *cinfo,
+ u16 xfer_id, u8 msg_type)
+{
+ struct scmi_xfer *xfer;
+ struct device *dev = cinfo->dev;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
/* Are we even expecting this? */
if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
dev_err(dev, "message for %d is not expected!\n", xfer_id);
+ info->desc->ops->clear_channel(cinfo);
return;
}
xfer = &minfo->xfer_block[xfer_id];
+ /*
+ * Even if a response was indeed expected on this slot at this point,
+ * a buggy platform could wrongly reply feeding us an unexpected
+ * delayed response we're not prepared to handle: bail-out safely
+ * blaming firmware.
+ */
+ if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
+ dev_err(dev,
+ "Delayed Response for %d not expected! Buggy F/W ?\n",
+ xfer_id);
+ info->desc->ops->clear_channel(cinfo);
+ /* It was unexpected, so nobody will clear the xfer if not us */
+ __scmi_xfer_put(minfo, xfer);
+ return;
+ }
scmi_dump_header_dbg(dev, &xfer->hdr);
@@ -240,10 +271,43 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
xfer->hdr.protocol_id, xfer->hdr.seq,
msg_type);
- if (msg_type == MSG_TYPE_DELAYED_RESP)
+ if (msg_type == MSG_TYPE_DELAYED_RESP) {
+ info->desc->ops->clear_channel(cinfo);
complete(xfer->async_done);
- else
+ } else {
complete(&xfer->done);
+ }
+}
+
+/**
+ * scmi_rx_callback() - callback for receiving messages
+ *
+ * @cinfo: SCMI channel info
+ * @msg_hdr: Message header
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
+{
+ u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+ u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+
+ switch (msg_type) {
+ case MSG_TYPE_NOTIFICATION:
+ scmi_handle_notification(cinfo, msg_hdr);
+ break;
+ case MSG_TYPE_COMMAND:
+ case MSG_TYPE_DELAYED_RESP:
+ scmi_handle_response(cinfo, xfer_id, msg_type);
+ break;
+ default:
+ WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
+ break;
+ }
}
/**
@@ -525,13 +589,13 @@ int scmi_handle_put(const struct scmi_handle *handle)
return 0;
}
-static int scmi_xfer_info_init(struct scmi_info *sinfo)
+static int __scmi_xfer_info_init(struct scmi_info *sinfo,
+ struct scmi_xfers_info *info)
{
int i;
struct scmi_xfer *xfer;
struct device *dev = sinfo->dev;
const struct scmi_desc *desc = sinfo->desc;
- struct scmi_xfers_info *info = &sinfo->tx_minfo;
/* Pre-allocated messages, no more than what hdr.seq can support */
if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
@@ -566,6 +630,16 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
return 0;
}
+static int scmi_xfer_info_init(struct scmi_info *sinfo)
+{
+ int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+
+ if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
+ ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
+
+ return ret;
+}
+
static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
int prot_id, bool tx)
{
@@ -699,10 +773,6 @@ static int scmi_probe(struct platform_device *pdev)
info->desc = desc;
INIT_LIST_HEAD(&info->node);
- ret = scmi_xfer_info_init(info);
- if (ret)
- return ret;
-
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
idr_init(&info->rx_idr);
@@ -715,6 +785,10 @@ static int scmi_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = scmi_xfer_info_init(info);
+ if (ret)
+ return ret;
+
ret = scmi_base_protocol_init(handle);
if (ret) {
dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
@@ -827,6 +901,9 @@ ATTRIBUTE_GROUPS(versions);
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
+#ifdef CONFIG_ARM_PSCI_FW
+ { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
+#endif
{ /* Sentinel */ },
};
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 73077bbc4ad9..6998dc86b5ce 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -158,6 +158,21 @@ static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
shmem_fetch_response(smbox->shmem, xfer);
}
+static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_fetch_notification(smbox->shmem, max_len, xfer);
+}
+
+static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_clear_channel(smbox->shmem);
+}
+
static bool
mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
{
@@ -173,6 +188,8 @@ static struct scmi_transport_ops scmi_mailbox_ops = {
.send_message = mailbox_send_message,
.mark_txdone = mailbox_mark_txdone,
.fetch_response = mailbox_fetch_response,
+ .fetch_notification = mailbox_fetch_notification,
+ .clear_channel = mailbox_clear_channel,
.poll_done = mailbox_poll_done,
};
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 34f3a917dd8d..eadc171e254b 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -27,6 +27,11 @@ enum scmi_performance_protocol_cmd {
PERF_DESCRIBE_FASTCHANNEL = 0xb,
};
+enum scmi_performance_protocol_notify {
+ PERFORMANCE_LIMITS_CHANGED = 0x0,
+ PERFORMANCE_LEVEL_CHANGED = 0x1,
+};
+
struct scmi_opp {
u32 perf;
u32 power;
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 214886ce84f1..cf7f0312381b 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -12,6 +12,12 @@ enum scmi_power_protocol_cmd {
POWER_STATE_SET = 0x4,
POWER_STATE_GET = 0x5,
POWER_STATE_NOTIFY = 0x6,
+ POWER_STATE_CHANGE_REQUESTED_NOTIFY = 0x7,
+};
+
+enum scmi_power_protocol_notify {
+ POWER_STATE_CHANGED = 0x0,
+ POWER_STATE_CHANGE_REQUESTED = 0x1,
};
struct scmi_msg_resp_power_attributes {
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index eba61b9c1f53..db1b1ab303da 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -14,6 +14,10 @@ enum scmi_sensor_protocol_cmd {
SENSOR_READING_GET = 0x6,
};
+enum scmi_sensor_protocol_notify {
+ SENSOR_TRIP_POINT_EVENT = 0x0,
+};
+
struct scmi_msg_resp_sensor_attributes {
__le16 num_sensors;
u8 max_requests;
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index e1e816e0018c..0e3eaea5d852 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -67,6 +67,21 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
}
+void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ /* Skip only the length of header in shmem area i.e 4 bytes */
+ xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
+}
+
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
+{
+ iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
+}
+
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer)
{
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
new file mode 100644
index 000000000000..49bc4b0e8428
--- /dev/null
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message SMC/HVC
+ * Transport driver
+ *
+ * Copyright 2020 NXP
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/**
+ * struct scmi_smc - Structure representing a SCMI smc transport
+ *
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
+ * @func_id: smc/hvc call function id
+ */
+
+struct scmi_smc {
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
+ struct mutex shmem_lock;
+ u32 func_id;
+};
+
+static bool smc_chan_available(struct device *dev, int idx)
+{
+ struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
+ if (!np)
+ return false;
+
+ of_node_put(np);
+ return true;
+}
+
+static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx)
+{
+ struct device *cdev = cinfo->dev;
+ struct scmi_smc *scmi_info;
+ resource_size_t size;
+ struct resource res;
+ struct device_node *np;
+ u32 func_id;
+ int ret;
+
+ if (!tx)
+ return -ENODEV;
+
+ scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL);
+ if (!scmi_info)
+ return -ENOMEM;
+
+ np = of_parse_phandle(cdev->of_node, "shmem", 0);
+ ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (ret) {
+ dev_err(cdev, "failed to get SCMI Tx shared memory\n");
+ return ret;
+ }
+
+ size = resource_size(&res);
+ scmi_info->shmem = devm_ioremap(dev, res.start, size);
+ if (!scmi_info->shmem) {
+ dev_err(dev, "failed to ioremap SCMI Tx shared memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
+ if (ret < 0)
+ return ret;
+
+ scmi_info->func_id = func_id;
+ scmi_info->cinfo = cinfo;
+ mutex_init(&scmi_info->shmem_lock);
+ cinfo->transport_info = scmi_info;
+
+ return 0;
+}
+
+static int smc_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ cinfo->transport_info = NULL;
+ scmi_info->cinfo = NULL;
+
+ scmi_free_channel(cinfo, data, id);
+
+ return 0;
+}
+
+static int smc_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+ struct arm_smccc_res res;
+
+ mutex_lock(&scmi_info->shmem_lock);
+
+ shmem_tx_prepare(scmi_info->shmem, xfer);
+
+ arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+ scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem));
+
+ mutex_unlock(&scmi_info->shmem_lock);
+
+ /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
+ if (res.a0)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void smc_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ shmem_fetch_response(scmi_info->shmem, xfer);
+}
+
+static bool
+smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ return shmem_poll_done(scmi_info->shmem, xfer);
+}
+
+static struct scmi_transport_ops scmi_smc_ops = {
+ .chan_available = smc_chan_available,
+ .chan_setup = smc_chan_setup,
+ .chan_free = smc_chan_free,
+ .send_message = smc_send_message,
+ .fetch_response = smc_fetch_response,
+ .poll_done = smc_poll_done,
+};
+
+const struct scmi_desc scmi_smc_desc = {
+ .ops = &scmi_smc_ops,
+ .max_rx_timeout_ms = 30,
+ .max_msg = 1,
+ .max_msg_size = 128,
+};
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 334c8be0c11f..e7e36aab2386 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -429,7 +429,6 @@ int sdei_event_enable(u32 event_num)
return err;
}
-EXPORT_SYMBOL(sdei_event_enable);
static int sdei_api_event_disable(u32 event_num)
{
@@ -471,7 +470,6 @@ int sdei_event_disable(u32 event_num)
return err;
}
-EXPORT_SYMBOL(sdei_event_disable);
static int sdei_api_event_unregister(u32 event_num)
{
@@ -533,7 +531,6 @@ int sdei_event_unregister(u32 event_num)
return err;
}
-EXPORT_SYMBOL(sdei_event_unregister);
/*
* unregister events, but don't destroy them as they are re-registered by
@@ -643,7 +640,6 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
return err;
}
-EXPORT_SYMBOL(sdei_event_register);
static int sdei_reregister_event_llocked(struct sdei_event *event)
{
@@ -1079,26 +1075,9 @@ static struct platform_driver sdei_driver = {
.probe = sdei_probe,
};
-static bool __init sdei_present_dt(void)
-{
- struct device_node *np, *fw_np;
-
- fw_np = of_find_node_by_name(NULL, "firmware");
- if (!fw_np)
- return false;
-
- np = of_find_matching_node(fw_np, sdei_of_match);
- if (!np)
- return false;
- of_node_put(np);
-
- return true;
-}
-
static bool __init sdei_present_acpi(void)
{
acpi_status status;
- struct platform_device *pdev;
struct acpi_table_header *sdei_table_header;
if (acpi_disabled)
@@ -1113,20 +1092,26 @@ static bool __init sdei_present_acpi(void)
if (ACPI_FAILURE(status))
return false;
- pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL,
- 0);
- if (IS_ERR(pdev))
- return false;
+ acpi_put_table(sdei_table_header);
return true;
}
static int __init sdei_init(void)
{
- if (sdei_present_dt() || sdei_present_acpi())
- platform_driver_register(&sdei_driver);
+ int ret = platform_driver_register(&sdei_driver);
- return 0;
+ if (!ret && sdei_present_acpi()) {
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_simple(sdei_driver.driver.name,
+ 0, NULL, 0);
+ if (IS_ERR(pdev))
+ pr_info("Failed to register ACPI:SDEI platform device %ld\n",
+ PTR_ERR(pdev));
+ }
+
+ return ret;
}
/*
@@ -1143,6 +1128,14 @@ int sdei_event_handler(struct pt_regs *regs,
mm_segment_t orig_addr_limit;
u32 event_num = arg->event_num;
+ /*
+ * Save restore 'fs'.
+ * The architecture's entry code save/restores 'fs' when taking an
+ * exception from the kernel. This ensures addr_limit isn't inherited
+ * if you interrupted something that allowed the uaccess routines to
+ * access kernel memory.
+ * Do the same here because this doesn't come via the same entry code.
+ */
orig_addr_limit = get_fs();
set_fs(USER_DS);
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index ff39f64f2aae..86d71b0212b1 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -42,6 +42,8 @@ DEFINE_DMI_ATTR_WITH_SHOW(bios_vendor, 0444, DMI_BIOS_VENDOR);
DEFINE_DMI_ATTR_WITH_SHOW(bios_version, 0444, DMI_BIOS_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(bios_date, 0444, DMI_BIOS_DATE);
DEFINE_DMI_ATTR_WITH_SHOW(sys_vendor, 0444, DMI_SYS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_release, 0444, DMI_BIOS_RELEASE);
+DEFINE_DMI_ATTR_WITH_SHOW(ec_firmware_release, 0444, DMI_EC_FIRMWARE_RELEASE);
DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
@@ -78,6 +80,8 @@ static ssize_t get_modalias(char *buffer, size_t buffer_size)
{ "bvn", DMI_BIOS_VENDOR },
{ "bvr", DMI_BIOS_VERSION },
{ "bd", DMI_BIOS_DATE },
+ { "br", DMI_BIOS_RELEASE },
+ { "efr", DMI_EC_FIRMWARE_RELEASE },
{ "svn", DMI_SYS_VENDOR },
{ "pn", DMI_PRODUCT_NAME },
{ "pvr", DMI_PRODUCT_VERSION },
@@ -187,6 +191,8 @@ static void __init dmi_id_init_attr_table(void)
ADD_DMI_ATTR(bios_vendor, DMI_BIOS_VENDOR);
ADD_DMI_ATTR(bios_version, DMI_BIOS_VERSION);
ADD_DMI_ATTR(bios_date, DMI_BIOS_DATE);
+ ADD_DMI_ATTR(bios_release, DMI_BIOS_RELEASE);
+ ADD_DMI_ATTR(ec_firmware_release, DMI_EC_FIRMWARE_RELEASE);
ADD_DMI_ATTR(sys_vendor, DMI_SYS_VENDOR);
ADD_DMI_ATTR(product_name, DMI_PRODUCT_NAME);
ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index f59163cb7cba..5066d1f1d687 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -186,6 +186,34 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
dmi_ident[slot] = p;
}
+static void __init dmi_save_release(const struct dmi_header *dm, int slot,
+ int index)
+{
+ const u8 *minor, *major;
+ char *s;
+
+ /* If the table doesn't have the field, let's return */
+ if (dmi_ident[slot] || dm->length < index)
+ return;
+
+ minor = (u8 *) dm + index;
+ major = (u8 *) dm + index - 1;
+
+ /* As per the spec, if the system doesn't support this field,
+ * the value is FF
+ */
+ if (*major == 0xFF && *minor == 0xFF)
+ return;
+
+ s = dmi_alloc(8);
+ if (!s)
+ return;
+
+ sprintf(s, "%u.%u", *major, *minor);
+
+ dmi_ident[slot] = s;
+}
+
static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
int index)
{
@@ -444,6 +472,8 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
dmi_save_ident(dm, DMI_BIOS_DATE, 8);
+ dmi_save_release(dm, DMI_BIOS_RELEASE, 21);
+ dmi_save_release(dm, DMI_EC_FIRMWARE_RELEASE, 23);
break;
case 1: /* System Information */
dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 613828d3f106..6b38f9e5d203 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -106,12 +106,12 @@ config EFI_PARAMS_FROM_FDT
config EFI_RUNTIME_WRAPPERS
bool
-config EFI_ARMSTUB
+config EFI_GENERIC_STUB
bool
config EFI_ARMSTUB_DTB_LOADER
bool "Enable the DTB loader"
- depends on EFI_ARMSTUB
+ depends on EFI_GENERIC_STUB
default y
help
Select this config option to add support for the dtb= command
@@ -124,6 +124,17 @@ config EFI_ARMSTUB_DTB_LOADER
functionality for bootloaders that do not have such support
this option is necessary.
+config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER
+ bool "Enable the command line initrd loader" if !X86
+ depends on EFI_STUB && (EFI_GENERIC_STUB || X86)
+ default y
+ help
+ Select this config option to add support for the initrd= command
+ line parameter, allowing an initrd that resides on the same volume
+ as the kernel image to be loaded into memory.
+
+ This method is deprecated.
+
config EFI_BOOTLOADER_CONTROL
tristate "EFI Bootloader Control"
depends on EFI_VARS
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 9e5e62f5f94d..c697e70ca7e7 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -54,8 +54,8 @@ static phys_addr_t __init efi_to_phys(unsigned long addr)
static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
static const efi_config_table_type_t arch_tables[] __initconst = {
- {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, NULL, &screen_info_table},
- {NULL_GUID, NULL, NULL}
+ {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table},
+ {}
};
static void __init init_screen_info(void)
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index b876373f2297..3359ae2adf24 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -18,12 +18,12 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/mmu.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#if defined(CONFIG_PTDUMP_DEBUGFS) && defined(CONFIG_ARM64)
#include <asm/ptdump.h>
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 4e3055238f31..7f1657b6c30d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -54,7 +54,7 @@ struct mm_struct efi_mm = {
.mm_rb = RB_ROOT,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
- .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
+ MMAP_LOCK_INITIALIZER(efi_mm)
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
.cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
@@ -499,21 +499,21 @@ void __init efi_mem_reserve(phys_addr_t addr, u64 size)
}
static const efi_config_table_type_t common_tables[] __initconst = {
- {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
- {ACPI_TABLE_GUID, "ACPI", &efi.acpi},
- {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
- {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3},
- {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
- {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi_mem_attr_table},
- {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi_rng_seed},
- {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
- {LINUX_EFI_TPM_FINAL_LOG_GUID, "TPMFinalLog", &efi.tpm_final_log},
- {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &mem_reserve},
- {EFI_RT_PROPERTIES_TABLE_GUID, "RTPROP", &rt_prop},
+ {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
+ {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
+ {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
+ {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
+ {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
+ {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
+ {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
+ {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
+ {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
+ {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
+ {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
#ifdef CONFIG_EFI_RCI2_TABLE
- {DELLEMC_EFI_RCI2_TABLE_GUID, NULL, &rci2_table_phys},
+ {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
#endif
- {NULL_GUID, NULL, NULL},
+ {},
};
static __init int match_config_table(const efi_guid_t *guid,
@@ -522,15 +522,13 @@ static __init int match_config_table(const efi_guid_t *guid,
{
int i;
- if (table_types) {
- for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
- if (!efi_guidcmp(*guid, table_types[i].guid)) {
- *(table_types[i].ptr) = table;
- if (table_types[i].name)
- pr_cont(" %s=0x%lx ",
- table_types[i].name, table);
- return 1;
- }
+ for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
+ if (!efi_guidcmp(*guid, table_types[i].guid)) {
+ *(table_types[i].ptr) = table;
+ if (table_types[i].name[0])
+ pr_cont("%s=0x%lx ",
+ table_types[i].name, table);
+ return 1;
}
}
@@ -567,7 +565,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
table = tbl32[i].table;
}
- if (!match_config_table(guid, table, common_tables))
+ if (!match_config_table(guid, table, common_tables) && arch_tables)
match_config_table(guid, table, arch_tables);
}
pr_cont("\n");
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 78ad1ba8c987..26528a46d99e 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -522,8 +522,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
NULL, "%s", short_name);
kfree(short_name);
- if (ret)
+ if (ret) {
+ kobject_put(&new_var->kobj);
return ret;
+ }
kobject_uevent(&new_var->kobj, KOBJ_ADD);
if (efivar_entry_add(new_var, &efivar_sysfs_list)) {
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 094eabdecfe6..cce4a7436052 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -7,7 +7,7 @@
#
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar \
-Wno-pointer-sign \
@@ -23,15 +23,19 @@ cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
-cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
+cflags-$(CONFIG_EFI_GENERIC_STUB) += -I$(srctree)/scripts/dtc/libfdt
-KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
+KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
-include $(srctree)/drivers/firmware/efi/libstub/hidden.h \
-D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector) \
+ $(call cc-option,-fno-addrsig) \
-D__DISABLE_EXPORTS
+# remove SCS flags from all objects in this directory
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
+
GCOV_PROFILE := n
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
@@ -42,16 +46,17 @@ KCOV_INSTRUMENT := n
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
file.o mem.o random.o randomalloc.o pci.o \
- skip_spaces.o lib-cmdline.o lib-ctype.o
+ skip_spaces.o lib-cmdline.o lib-ctype.o \
+ alignedmem.o relocate.o vsprintf.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
-arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
+efi-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
-lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
- $(patsubst %.c,lib-%.o,$(arm-deps-y))
+lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o fdt.o string.o \
+ $(patsubst %.c,lib-%.o,$(efi-deps-y))
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o
@@ -60,6 +65,25 @@ CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
+# For x86, bootloaders like systemd-boot or grub-efi do not zero-initialize the
+# .bss section, so the .bss section of the EFI stub needs to be included in the
+# .data section of the compressed kernel to ensure initialization. Rename the
+# .bss section here so it's easy to pick out in the linker script.
+#
+STUBCOPY_FLAGS-$(CONFIG_X86) += --rename-section .bss=.bss.efistub,load,alloc
+STUBCOPY_RELOC-$(CONFIG_X86_32) := R_386_32
+STUBCOPY_RELOC-$(CONFIG_X86_64) := R_X86_64_64
+
+#
+# ARM discards the .data section because it disallows r/w data in the
+# decompressor. So move our .data to .data.efistub and .bss to .bss.efistub,
+# which are preserved explicitly by the decompressor linker script.
+#
+STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
+ --rename-section .bss=.bss.efistub,load,alloc
+STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
+
+#
# arm64 puts the stub in the kernel proper, which will unnecessarily retain all
# code indefinitely unless it is annotated as __init/__initdata/__initconst etc.
# So let's apply the __init annotations at the section level, by prefixing
@@ -73,8 +97,8 @@ CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
# a verification pass to see if any absolute relocations exist in any of the
# object files.
#
-extra-$(CONFIG_EFI_ARMSTUB) := $(lib-y)
-lib-$(CONFIG_EFI_ARMSTUB) := $(patsubst %.o,%.stub.o,$(lib-y))
+extra-y := $(lib-y)
+lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
@@ -97,11 +121,3 @@ quiet_cmd_stubcopy = STUBCPY $@
/bin/false; \
fi; \
$(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@
-
-#
-# ARM discards the .data section because it disallows r/w data in the
-# decompressor. So move our .data to .data.efistub, which is preserved
-# explicitly by the decompressor linker script.
-#
-STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
-STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c
new file mode 100644
index 000000000000..cc89c4d6196f
--- /dev/null
+++ b/drivers/firmware/efi/libstub/alignedmem.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/**
+ * efi_allocate_pages_aligned() - Allocate memory pages
+ * @size: minimum number of bytes to allocate
+ * @addr: On return the address of the first allocated page. The first
+ * allocated page has alignment EFI_ALLOC_ALIGN which is an
+ * architecture dependent multiple of the page size.
+ * @max: the address that the last allocated memory page shall not
+ * exceed
+ * @align: minimum alignment of the base of the allocation
+ *
+ * Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
+ * to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will
+ * not exceed the address given by @max.
+ *
+ * Return: status code
+ */
+efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+ unsigned long max, unsigned long align)
+{
+ efi_physical_addr_t alloc_addr;
+ efi_status_t status;
+ int slack;
+
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ alloc_addr = ALIGN_DOWN(max + 1, align) - 1;
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ slack = align / EFI_PAGE_SIZE - 1;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+ EFI_LOADER_DATA, size / EFI_PAGE_SIZE + slack,
+ &alloc_addr);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *addr = ALIGN((unsigned long)alloc_addr, align);
+
+ if (slack > 0) {
+ int l = (alloc_addr % align) / EFI_PAGE_SIZE;
+
+ if (l) {
+ efi_bs_call(free_pages, alloc_addr, slack - l + 1);
+ slack = l - 1;
+ }
+ if (slack)
+ efi_bs_call(free_pages, *addr + size, slack);
+ }
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 7826553af2ba..40243f524556 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -18,7 +18,7 @@ efi_status_t check_platform_features(void)
/* LPAE kernels need compatible hardware */
block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
if (block < 5) {
- pr_efi_err("This LPAE kernel is not supported by your CPU\n");
+ efi_err("This LPAE kernel is not supported by your CPU\n");
return EFI_UNSUPPORTED;
}
return EFI_SUCCESS;
@@ -120,7 +120,7 @@ static efi_status_t reserve_kernel_base(unsigned long dram_base,
*/
status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS) {
- pr_efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n");
+ efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n");
return status;
}
@@ -162,7 +162,7 @@ static efi_status_t reserve_kernel_base(unsigned long dram_base,
(end - start) / EFI_PAGE_SIZE,
&start);
if (status != EFI_SUCCESS) {
- pr_efi_err("reserve_kernel_base(): alloc failed.\n");
+ efi_err("reserve_kernel_base(): alloc failed.\n");
goto out;
}
break;
@@ -199,14 +199,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long kernel_base;
efi_status_t status;
- /*
- * Verify that the DRAM base address is compatible with the ARM
- * boot protocol, which determines the base of DRAM by masking
- * off the low 27 bits of the address at which the zImage is
- * loaded. These assumptions are made by the decompressor,
- * before any memory map is available.
- */
- kernel_base = round_up(dram_base, SZ_128M);
+ /* use a 16 MiB aligned base for the decompressed kernel */
+ kernel_base = round_up(dram_base, SZ_16M) + TEXT_OFFSET;
/*
* Note that some platforms (notably, the Raspberry Pi 2) put
@@ -215,41 +209,14 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
* base of the kernel image is only partially used at the moment.
* (Up to 5 pages are used for the swapper page tables)
*/
- kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE;
-
- status = reserve_kernel_base(kernel_base, reserve_addr, reserve_size);
- if (status != EFI_SUCCESS) {
- pr_efi_err("Unable to allocate memory for uncompressed kernel.\n");
- return status;
- }
-
- /*
- * Relocate the zImage, so that it appears in the lowest 128 MB
- * memory window.
- */
- *image_addr = (unsigned long)image->image_base;
- *image_size = image->image_size;
- status = efi_relocate_kernel(image_addr, *image_size, *image_size,
- kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
+ status = reserve_kernel_base(kernel_base - 5 * PAGE_SIZE, reserve_addr,
+ reserve_size);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to relocate kernel.\n");
- efi_free(*reserve_size, *reserve_addr);
- *reserve_size = 0;
+ efi_err("Unable to allocate memory for uncompressed kernel.\n");
return status;
}
- /*
- * Check to see if we were able to allocate memory low enough
- * in memory. The kernel determines the base of DRAM from the
- * address at which the zImage is loaded.
- */
- if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) {
- pr_efi_err("Failed to relocate kernel, no low memory available.\n");
- efi_free(*reserve_size, *reserve_addr);
- *reserve_size = 0;
- efi_free(*image_size, *image_addr);
- *image_size = 0;
- return EFI_LOAD_ERROR;
- }
+ *image_addr = kernel_base;
+ *image_size = 0;
return EFI_SUCCESS;
}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index fc9f8ab533a7..7f6a57dec513 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -26,14 +26,23 @@ efi_status_t check_platform_features(void)
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
- pr_efi_err("This 64 KB granular kernel is not supported by your CPU\n");
+ efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else
- pr_efi_err("This 16 KB granular kernel is not supported by your CPU\n");
+ efi_err("This 16 KB granular kernel is not supported by your CPU\n");
return EFI_UNSUPPORTED;
}
return EFI_SUCCESS;
}
+/*
+ * Relocatable kernels can fix up the misalignment with respect to
+ * MIN_KIMG_ALIGN, so they only require a minimum alignment of EFI_KIMG_ALIGN
+ * (which accounts for the alignment of statically allocated objects such as
+ * the swapper stack.)
+ */
+static const u64 min_kimg_align = IS_ENABLED(CONFIG_RELOCATABLE) ? EFI_KIMG_ALIGN
+ : MIN_KIMG_ALIGN;
+
efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
@@ -43,106 +52,63 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
- unsigned long preferred_offset;
- u64 phys_seed = 0;
+ u32 phys_seed = 0;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- if (!nokaslr()) {
+ if (!efi_nokaslr) {
status = efi_get_random_bytes(sizeof(phys_seed),
(u8 *)&phys_seed);
if (status == EFI_NOT_FOUND) {
- pr_efi("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
+ efi_info("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
} else if (status != EFI_SUCCESS) {
- pr_efi_err("efi_get_random_bytes() failed\n");
+ efi_err("efi_get_random_bytes() failed\n");
return status;
}
} else {
- pr_efi("KASLR disabled on kernel command line\n");
+ efi_info("KASLR disabled on kernel command line\n");
}
}
- /*
- * The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
- * a 2 MB aligned base, which itself may be lower than dram_base, as
- * long as the resulting offset equals or exceeds it.
- */
- preferred_offset = round_down(dram_base, MIN_KIMG_ALIGN) + TEXT_OFFSET;
- if (preferred_offset < dram_base)
- preferred_offset += MIN_KIMG_ALIGN;
+ if (image->image_base != _text)
+ efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
kernel_size = _edata - _text;
kernel_memsize = kernel_size + (_end - _edata);
+ *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
/*
- * Produce a displacement in the interval [0, MIN_KIMG_ALIGN)
- * that doesn't violate this kernel's de-facto alignment
- * constraints.
- */
- u32 mask = (MIN_KIMG_ALIGN - 1) & ~(EFI_KIMG_ALIGN - 1);
- u32 offset = (phys_seed >> 32) & mask;
-
- /*
- * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not
- * be a multiple of EFI_KIMG_ALIGN, and we must ensure that
- * we preserve the misalignment of 'offset' relative to
- * EFI_KIMG_ALIGN so that statically allocated objects whose
- * alignment exceeds PAGE_SIZE appear correctly aligned in
- * memory.
- */
- offset |= TEXT_OFFSET % EFI_KIMG_ALIGN;
-
- /*
* If KASLR is enabled, and we have some randomness available,
* locate the kernel at a randomized offset in physical memory.
*/
- *reserve_size = kernel_memsize + offset;
- status = efi_random_alloc(*reserve_size,
- MIN_KIMG_ALIGN, reserve_addr,
- (u32)phys_seed);
-
- *image_addr = *reserve_addr + offset;
+ status = efi_random_alloc(*reserve_size, min_kimg_align,
+ reserve_addr, phys_seed);
} else {
- /*
- * Else, try a straight allocation at the preferred offset.
- * This will work around the issue where, if dram_base == 0x0,
- * efi_low_alloc() refuses to allocate at 0x0 (to prevent the
- * address of the allocation to be mistaken for a FAIL return
- * value or a NULL pointer). It will also ensure that, on
- * platforms where the [dram_base, dram_base + TEXT_OFFSET)
- * interval is partially occupied by the firmware (like on APM
- * Mustang), we can still place the kernel at the address
- * 'dram_base + TEXT_OFFSET'.
- */
- *image_addr = (unsigned long)_text;
- if (*image_addr == preferred_offset)
- return EFI_SUCCESS;
-
- *image_addr = *reserve_addr = preferred_offset;
- *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN);
-
- status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA,
- *reserve_size / EFI_PAGE_SIZE,
- (efi_physical_addr_t *)reserve_addr);
+ status = EFI_OUT_OF_RESOURCES;
}
if (status != EFI_SUCCESS) {
- *reserve_size = kernel_memsize + TEXT_OFFSET;
- status = efi_low_alloc(*reserve_size,
- MIN_KIMG_ALIGN, reserve_addr);
+ if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align)) {
+ /*
+ * Just execute from wherever we were loaded by the
+ * UEFI PE/COFF loader if the alignment is suitable.
+ */
+ *image_addr = (u64)_text;
+ *reserve_size = 0;
+ return EFI_SUCCESS;
+ }
+
+ status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
+ ULONG_MAX, min_kimg_align);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to relocate kernel\n");
+ efi_err("Failed to relocate kernel\n");
*reserve_size = 0;
return status;
}
- *image_addr = *reserve_addr + TEXT_OFFSET;
}
- if (image->image_base != _text)
- pr_efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
-
+ *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align;
memcpy((void *)*image_addr, _text, kernel_size);
return EFI_SUCCESS;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 9f34c7242939..89f075275300 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -7,60 +7,151 @@
* Copyright 2011 Intel Corporation; author Matt Fleming
*/
+#include <stdarg.h>
+
+#include <linux/ctype.h>
#include <linux/efi.h>
+#include <linux/kernel.h>
+#include <linux/printk.h> /* For CONSOLE_LOGLEVEL_* */
#include <asm/efi.h>
+#include <asm/setup.h>
#include "efistub.h"
-static bool __efistub_global efi_nochunk;
-static bool __efistub_global efi_nokaslr;
-static bool __efistub_global efi_noinitrd;
-static bool __efistub_global efi_quiet;
-static bool __efistub_global efi_novamap;
-static bool __efistub_global efi_nosoftreserve;
-static bool __efistub_global efi_disable_pci_dma =
- IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
+bool efi_nochunk;
+bool efi_nokaslr;
+bool efi_noinitrd;
+int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
+bool efi_novamap;
-bool __pure nochunk(void)
-{
- return efi_nochunk;
-}
-bool __pure nokaslr(void)
-{
- return efi_nokaslr;
-}
-bool __pure noinitrd(void)
+static bool efi_nosoftreserve;
+static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
+
+bool __pure __efi_soft_reserve_enabled(void)
{
- return efi_noinitrd;
+ return !efi_nosoftreserve;
}
-bool __pure is_quiet(void)
+
+void efi_char16_puts(efi_char16_t *str)
{
- return efi_quiet;
+ efi_call_proto(efi_table_attr(efi_system_table, con_out),
+ output_string, str);
}
-bool __pure novamap(void)
+
+static
+u32 utf8_to_utf32(const u8 **s8)
{
- return efi_novamap;
+ u32 c32;
+ u8 c0, cx;
+ size_t clen, i;
+
+ c0 = cx = *(*s8)++;
+ /*
+ * The position of the most-significant 0 bit gives us the length of
+ * a multi-octet encoding.
+ */
+ for (clen = 0; cx & 0x80; ++clen)
+ cx <<= 1;
+ /*
+ * If the 0 bit is in position 8, this is a valid single-octet
+ * encoding. If the 0 bit is in position 7 or positions 1-3, the
+ * encoding is invalid.
+ * In either case, we just return the first octet.
+ */
+ if (clen < 2 || clen > 4)
+ return c0;
+ /* Get the bits from the first octet. */
+ c32 = cx >> clen--;
+ for (i = 0; i < clen; ++i) {
+ /* Trailing octets must have 10 in most significant bits. */
+ cx = (*s8)[i] ^ 0x80;
+ if (cx & 0xc0)
+ return c0;
+ c32 = (c32 << 6) | cx;
+ }
+ /*
+ * Check for validity:
+ * - The character must be in the Unicode range.
+ * - It must not be a surrogate.
+ * - It must be encoded using the correct number of octets.
+ */
+ if (c32 > 0x10ffff ||
+ (c32 & 0xf800) == 0xd800 ||
+ clen != (c32 >= 0x80) + (c32 >= 0x800) + (c32 >= 0x10000))
+ return c0;
+ *s8 += clen;
+ return c32;
}
-bool __pure __efi_soft_reserve_enabled(void)
+
+void efi_puts(const char *str)
{
- return !efi_nosoftreserve;
+ efi_char16_t buf[128];
+ size_t pos = 0, lim = ARRAY_SIZE(buf);
+ const u8 *s8 = (const u8 *)str;
+ u32 c32;
+
+ while (*s8) {
+ if (*s8 == '\n')
+ buf[pos++] = L'\r';
+ c32 = utf8_to_utf32(&s8);
+ if (c32 < 0x10000) {
+ /* Characters in plane 0 use a single word. */
+ buf[pos++] = c32;
+ } else {
+ /*
+ * Characters in other planes encode into a surrogate
+ * pair.
+ */
+ buf[pos++] = (0xd800 - (0x10000 >> 10)) + (c32 >> 10);
+ buf[pos++] = 0xdc00 + (c32 & 0x3ff);
+ }
+ if (*s8 == '\0' || pos >= lim - 2) {
+ buf[pos] = L'\0';
+ efi_char16_puts(buf);
+ pos = 0;
+ }
+ }
}
-void efi_printk(char *str)
+int efi_printk(const char *fmt, ...)
{
- char *s8;
+ char printf_buf[256];
+ va_list args;
+ int printed;
+ int loglevel = printk_get_level(fmt);
+
+ switch (loglevel) {
+ case '0' ... '9':
+ loglevel -= '0';
+ break;
+ default:
+ /*
+ * Use loglevel -1 for cases where we just want to print to
+ * the screen.
+ */
+ loglevel = -1;
+ break;
+ }
- for (s8 = str; *s8; s8++) {
- efi_char16_t ch[2] = { 0 };
+ if (loglevel >= efi_loglevel)
+ return 0;
- ch[0] = *s8;
- if (*s8 == '\n') {
- efi_char16_t nl[2] = { '\r', 0 };
- efi_char16_printk(nl);
- }
+ if (loglevel >= 0)
+ efi_puts("EFI stub: ");
+
+ fmt = printk_skip_level(fmt);
+
+ va_start(args, fmt);
+ printed = vsnprintf(printf_buf, sizeof(printf_buf), fmt, args);
+ va_end(args);
- efi_char16_printk(ch);
+ efi_puts(printf_buf);
+ if (printed >= sizeof(printf_buf)) {
+ efi_puts("[Message truncated]\n");
+ return -1;
}
+
+ return printed;
}
/*
@@ -91,7 +182,7 @@ efi_status_t efi_parse_options(char const *cmdline)
if (!strcmp(param, "nokaslr")) {
efi_nokaslr = true;
} else if (!strcmp(param, "quiet")) {
- efi_quiet = true;
+ efi_loglevel = CONSOLE_LOGLEVEL_QUIET;
} else if (!strcmp(param, "noinitrd")) {
efi_noinitrd = true;
} else if (!strcmp(param, "efi") && val) {
@@ -105,6 +196,11 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_disable_pci_dma = true;
if (parse_option_str(val, "no_disable_early_pci_dma"))
efi_disable_pci_dma = false;
+ if (parse_option_str(val, "debug"))
+ efi_loglevel = CONSOLE_LOGLEVEL_DEBUG;
+ } else if (!strcmp(param, "video") &&
+ val && strstarts(val, "efifb:")) {
+ efi_parse_option_graphics(val + strlen("efifb:"));
}
}
efi_bs_call(free_pool, buf);
@@ -112,97 +208,79 @@ efi_status_t efi_parse_options(char const *cmdline)
}
/*
- * Get the number of UTF-8 bytes corresponding to an UTF-16 character.
- * This overestimates for surrogates, but that is okay.
- */
-static int efi_utf8_bytes(u16 c)
-{
- return 1 + (c >= 0x80) + (c >= 0x800);
-}
-
-/*
- * Convert an UTF-16 string, not necessarily null terminated, to UTF-8.
- */
-static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
-{
- unsigned int c;
-
- while (n--) {
- c = *src++;
- if (n && c >= 0xd800 && c <= 0xdbff &&
- *src >= 0xdc00 && *src <= 0xdfff) {
- c = 0x10000 + ((c & 0x3ff) << 10) + (*src & 0x3ff);
- src++;
- n--;
- }
- if (c >= 0xd800 && c <= 0xdfff)
- c = 0xfffd; /* Unmatched surrogate */
- if (c < 0x80) {
- *dst++ = c;
- continue;
- }
- if (c < 0x800) {
- *dst++ = 0xc0 + (c >> 6);
- goto t1;
- }
- if (c < 0x10000) {
- *dst++ = 0xe0 + (c >> 12);
- goto t2;
- }
- *dst++ = 0xf0 + (c >> 18);
- *dst++ = 0x80 + ((c >> 12) & 0x3f);
- t2:
- *dst++ = 0x80 + ((c >> 6) & 0x3f);
- t1:
- *dst++ = 0x80 + (c & 0x3f);
- }
-
- return dst;
-}
-
-/*
* Convert the unicode UEFI command line to ASCII to pass to kernel.
* Size of memory allocated return in *cmd_line_len.
* Returns NULL on error.
*/
-char *efi_convert_cmdline(efi_loaded_image_t *image,
- int *cmd_line_len, unsigned long max_addr)
+char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
{
const u16 *s2;
- u8 *s1 = NULL;
unsigned long cmdline_addr = 0;
- int load_options_chars = efi_table_attr(image, load_options_size) / 2;
+ int options_chars = efi_table_attr(image, load_options_size) / 2;
const u16 *options = efi_table_attr(image, load_options);
- int options_bytes = 0; /* UTF-8 bytes */
- int options_chars = 0; /* UTF-16 chars */
+ int options_bytes = 0, safe_options_bytes = 0; /* UTF-8 bytes */
+ bool in_quote = false;
efi_status_t status;
- u16 zero = 0;
if (options) {
s2 = options;
- while (*s2 && *s2 != '\n'
- && options_chars < load_options_chars) {
- options_bytes += efi_utf8_bytes(*s2++);
- options_chars++;
+ while (options_bytes < COMMAND_LINE_SIZE && options_chars--) {
+ u16 c = *s2++;
+
+ if (c < 0x80) {
+ if (c == L'\0' || c == L'\n')
+ break;
+ if (c == L'"')
+ in_quote = !in_quote;
+ else if (!in_quote && isspace((char)c))
+ safe_options_bytes = options_bytes;
+
+ options_bytes++;
+ continue;
+ }
+
+ /*
+ * Get the number of UTF-8 bytes corresponding to a
+ * UTF-16 character.
+ * The first part handles everything in the BMP.
+ */
+ options_bytes += 2 + (c >= 0x800);
+ /*
+ * Add one more byte for valid surrogate pairs. Invalid
+ * surrogates will be replaced with 0xfffd and take up
+ * only 3 bytes.
+ */
+ if ((c & 0xfc00) == 0xd800) {
+ /*
+ * If the very last word is a high surrogate,
+ * we must ignore it since we can't access the
+ * low surrogate.
+ */
+ if (!options_chars) {
+ options_bytes -= 3;
+ } else if ((*s2 & 0xfc00) == 0xdc00) {
+ options_bytes++;
+ options_chars--;
+ s2++;
+ }
+ }
+ }
+ if (options_bytes >= COMMAND_LINE_SIZE) {
+ options_bytes = safe_options_bytes;
+ efi_err("Command line is too long: truncated to %d bytes\n",
+ options_bytes);
}
- }
-
- if (!options_chars) {
- /* No command line options, so return empty string*/
- options = &zero;
}
options_bytes++; /* NUL termination */
- status = efi_allocate_pages(options_bytes, &cmdline_addr, max_addr);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, options_bytes,
+ (void **)&cmdline_addr);
if (status != EFI_SUCCESS)
return NULL;
- s1 = (u8 *)cmdline_addr;
- s2 = (const u16 *)options;
-
- s1 = efi_utf16_to_utf8(s1, s2, options_chars);
- *s1 = '\0';
+ snprintf((char *)cmdline_addr, options_bytes, "%.*ls",
+ options_bytes - 1, options);
*cmd_line_len = options_bytes;
return (char *)cmdline_addr;
@@ -285,8 +363,8 @@ fail:
void *get_efi_config_table(efi_guid_t guid)
{
- unsigned long tables = efi_table_attr(efi_system_table(), tables);
- int nr_tables = efi_table_attr(efi_system_table(), nr_tables);
+ unsigned long tables = efi_table_attr(efi_system_table, tables);
+ int nr_tables = efi_table_attr(efi_system_table, nr_tables);
int i;
for (i = 0; i < nr_tables; i++) {
@@ -301,12 +379,6 @@ void *get_efi_config_table(efi_guid_t guid)
return NULL;
}
-void efi_char16_printk(efi_char16_t *str)
-{
- efi_call_proto(efi_table_attr(efi_system_table(), con_out),
- output_string, str);
-}
-
/*
* The LINUX_EFI_INITRD_MEDIA_GUID vendor media device path below provides a way
* for the firmware or bootloader to expose the initrd data directly to the stub
@@ -348,6 +420,7 @@ static const struct {
* %EFI_OUT_OF_RESOURCES if memory allocation failed
* %EFI_LOAD_ERROR in all other cases
*/
+static
efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
unsigned long *load_size,
unsigned long max)
@@ -360,9 +433,6 @@ efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
efi_handle_t handle;
efi_status_t status;
- if (!load_addr || !load_size)
- return EFI_INVALID_PARAMETER;
-
dp = (efi_device_path_protocol_t *)&initrd_dev_path;
status = efi_bs_call(locate_device_path, &lf2_proto_guid, &dp, &handle);
if (status != EFI_SUCCESS)
@@ -392,3 +462,80 @@ efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
*load_size = initrd_size;
return EFI_SUCCESS;
}
+
+static
+efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit)
+{
+ if (!IS_ENABLED(CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER) ||
+ (IS_ENABLED(CONFIG_X86) && (!efi_is_native() || image == NULL))) {
+ *load_addr = *load_size = 0;
+ return EFI_SUCCESS;
+ }
+
+ return handle_cmdline_files(image, L"initrd=", sizeof(L"initrd=") - 2,
+ soft_limit, hard_limit,
+ load_addr, load_size);
+}
+
+efi_status_t efi_load_initrd(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit)
+{
+ efi_status_t status;
+
+ if (!load_addr || !load_size)
+ return EFI_INVALID_PARAMETER;
+
+ status = efi_load_initrd_dev_path(load_addr, load_size, hard_limit);
+ if (status == EFI_SUCCESS) {
+ efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
+ } else if (status == EFI_NOT_FOUND) {
+ status = efi_load_initrd_cmdline(image, load_addr, load_size,
+ soft_limit, hard_limit);
+ if (status == EFI_SUCCESS && *load_size > 0)
+ efi_info("Loaded initrd from command line option\n");
+ }
+
+ return status;
+}
+
+efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key)
+{
+ efi_event_t events[2], timer;
+ unsigned long index;
+ efi_simple_text_input_protocol_t *con_in;
+ efi_status_t status;
+
+ con_in = efi_table_attr(efi_system_table, con_in);
+ if (!con_in)
+ return EFI_UNSUPPORTED;
+ efi_set_event_at(events, 0, efi_table_attr(con_in, wait_for_key));
+
+ status = efi_bs_call(create_event, EFI_EVT_TIMER, 0, NULL, NULL, &timer);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(set_timer, timer, EfiTimerRelative,
+ EFI_100NSEC_PER_USEC * usec);
+ if (status != EFI_SUCCESS)
+ return status;
+ efi_set_event_at(events, 1, timer);
+
+ status = efi_bs_call(wait_for_event, 2, events, &index);
+ if (status == EFI_SUCCESS) {
+ if (index == 0)
+ status = efi_call_proto(con_in, read_keystroke, key);
+ else
+ status = EFI_TIMEOUT;
+ }
+
+ efi_bs_call(close_event, timer);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 48161b1dd098..e97370bdfdb0 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -36,14 +36,9 @@
#endif
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
-static bool __efistub_global flat_va_mapping;
+static bool flat_va_mapping;
-static efi_system_table_t *__efistub_global sys_table;
-
-__pure efi_system_table_t *efi_system_table(void)
-{
- return sys_table;
-}
+const efi_system_table_t *efi_system_table;
static struct screen_info *setup_graphics(void)
{
@@ -69,7 +64,7 @@ static struct screen_info *setup_graphics(void)
return si;
}
-void install_memreserve_table(void)
+static void install_memreserve_table(void)
{
struct linux_efi_memreserve *rsv;
efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
@@ -78,7 +73,7 @@ void install_memreserve_table(void)
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
(void **)&rsv);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to allocate memreserve entry!\n");
+ efi_err("Failed to allocate memreserve entry!\n");
return;
}
@@ -89,7 +84,7 @@ void install_memreserve_table(void)
status = efi_bs_call(install_configuration_table,
&memreserve_table_guid, rsv);
if (status != EFI_SUCCESS)
- pr_efi_err("Failed to install memreserve config table!\n");
+ efi_err("Failed to install memreserve config table!\n");
}
static unsigned long get_dram_base(void)
@@ -149,7 +144,8 @@ asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
* for both archictectures, with the arch-specific code provided in the
* handle_kernel_image() function.
*/
-efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg)
{
efi_loaded_image_t *image;
efi_status_t status;
@@ -171,10 +167,10 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
efi_properties_table_t *prop_tbl;
unsigned long max_addr;
- sys_table = sys_table_arg;
+ efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
- if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
status = EFI_INVALID_PARAMETER;
goto fail;
}
@@ -188,16 +184,16 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
* information about the running image, such as size and the command
* line.
*/
- status = sys_table->boottime->handle_protocol(handle,
+ status = efi_system_table->boottime->handle_protocol(handle,
&loaded_image_proto, (void *)&image);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to get loaded image protocol\n");
+ efi_err("Failed to get loaded image protocol\n");
goto fail;
}
dram_base = get_dram_base();
if (dram_base == EFI_ERROR) {
- pr_efi_err("Failed to find DRAM base\n");
+ efi_err("Failed to find DRAM base\n");
status = EFI_LOAD_ERROR;
goto fail;
}
@@ -207,22 +203,32 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
* protocol. We are going to copy the command line into the
* device tree, so this can be allocated anywhere.
*/
- cmdline_ptr = efi_convert_cmdline(image, &cmdline_size, ULONG_MAX);
+ cmdline_ptr = efi_convert_cmdline(image, &cmdline_size);
if (!cmdline_ptr) {
- pr_efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
+ efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
status = EFI_OUT_OF_RESOURCES;
goto fail;
}
if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
- cmdline_size == 0)
- efi_parse_options(CONFIG_CMDLINE);
+ cmdline_size == 0) {
+ status = efi_parse_options(CONFIG_CMDLINE);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail_free_cmdline;
+ }
+ }
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0)
- efi_parse_options(cmdline_ptr);
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) {
+ status = efi_parse_options(cmdline_ptr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail_free_cmdline;
+ }
+ }
- pr_efi("Booting Linux Kernel...\n");
+ efi_info("Booting Linux Kernel...\n");
si = setup_graphics();
@@ -231,8 +237,8 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
&reserve_size,
dram_base, image);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to relocate kernel\n");
- goto fail_free_cmdline;
+ efi_err("Failed to relocate kernel\n");
+ goto fail_free_screeninfo;
}
efi_retrieve_tpm2_eventlog();
@@ -250,42 +256,34 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
secure_boot != efi_secureboot_mode_disabled) {
if (strstr(cmdline_ptr, "dtb="))
- pr_efi("Ignoring DTB from command line.\n");
+ efi_err("Ignoring DTB from command line.\n");
} else {
status = efi_load_dtb(image, &fdt_addr, &fdt_size);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to load device tree!\n");
+ efi_err("Failed to load device tree!\n");
goto fail_free_image;
}
}
if (fdt_addr) {
- pr_efi("Using DTB from command line\n");
+ efi_info("Using DTB from command line\n");
} else {
/* Look for a device tree configuration table entry. */
fdt_addr = (uintptr_t)get_fdt(&fdt_size);
if (fdt_addr)
- pr_efi("Using DTB from configuration table\n");
+ efi_info("Using DTB from configuration table\n");
}
if (!fdt_addr)
- pr_efi("Generating empty DTB\n");
+ efi_info("Generating empty DTB\n");
- if (!noinitrd()) {
+ if (!efi_noinitrd) {
max_addr = efi_get_max_initrd_addr(dram_base, image_addr);
- status = efi_load_initrd_dev_path(&initrd_addr, &initrd_size,
- max_addr);
- if (status == EFI_SUCCESS) {
- pr_efi("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
- } else if (status == EFI_NOT_FOUND) {
- status = efi_load_initrd(image, &initrd_addr, &initrd_size,
- ULONG_MAX, max_addr);
- if (status == EFI_SUCCESS && initrd_size > 0)
- pr_efi("Loaded initrd from command line option\n");
- }
+ status = efi_load_initrd(image, &initrd_addr, &initrd_size,
+ ULONG_MAX, max_addr);
if (status != EFI_SUCCESS)
- pr_efi_err("Failed to load initrd!\n");
+ efi_err("Failed to load initrd!\n");
}
efi_random_get_seed();
@@ -303,7 +301,7 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
/* hibernation expects the runtime regions to stay in the same place */
- if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr() && !flat_va_mapping) {
+ if (!IS_ENABLED(CONFIG_HIBERNATION) && !efi_nokaslr && !flat_va_mapping) {
/*
* Randomize the base of the UEFI runtime services region.
* Preserve the 2 MB alignment of the region by taking a
@@ -335,7 +333,7 @@ efi_status_t efi_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg)
/* not reached */
fail_free_initrd:
- pr_efi_err("Failed to update FDT and exit boot services\n");
+ efi_err("Failed to update FDT and exit boot services\n");
efi_free(initrd_size, initrd_addr);
efi_free(fdt_size, fdt_addr);
@@ -343,9 +341,10 @@ fail_free_initrd:
fail_free_image:
efi_free(image_size, image_addr);
efi_free(reserve_size, reserve_addr);
-fail_free_cmdline:
+fail_free_screeninfo:
free_screen_info(si);
- efi_free(cmdline_size, (unsigned long)cmdline_ptr);
+fail_free_cmdline:
+ efi_bs_call(free_pool, cmdline_ptr);
fail:
return status;
}
@@ -376,7 +375,7 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
size = in->num_pages * EFI_PAGE_SIZE;
in->virt_addr = in->phys_addr;
- if (novamap()) {
+ if (efi_novamap) {
continue;
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 62943992f02f..bcd8c0a785f0 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -3,6 +3,13 @@
#ifndef _DRIVERS_FIRMWARE_EFI_EFISTUB_H
#define _DRIVERS_FIRMWARE_EFI_EFISTUB_H
+#include <linux/compiler.h>
+#include <linux/efi.h>
+#include <linux/kernel.h>
+#include <linux/kern_levels.h>
+#include <linux/types.h>
+#include <asm/efi.h>
+
/* error code which can't be mistaken for valid address */
#define EFI_ERROR (~0UL)
@@ -25,25 +32,33 @@
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
-#if defined(CONFIG_ARM) || defined(CONFIG_X86)
-#define __efistub_global __section(.data)
-#else
-#define __efistub_global
-#endif
+extern bool efi_nochunk;
+extern bool efi_nokaslr;
+extern bool efi_noinitrd;
+extern int efi_loglevel;
+extern bool efi_novamap;
-extern bool __pure nochunk(void);
-extern bool __pure nokaslr(void);
-extern bool __pure noinitrd(void);
-extern bool __pure is_quiet(void);
-extern bool __pure novamap(void);
+extern const efi_system_table_t *efi_system_table;
-extern __pure efi_system_table_t *efi_system_table(void);
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg);
-#define pr_efi(msg) do { \
- if (!is_quiet()) efi_printk("EFI stub: "msg); \
-} while (0)
+#ifndef ARCH_HAS_EFISTUB_WRAPPERS
-#define pr_efi_err(msg) efi_printk("EFI stub: ERROR: "msg)
+#define efi_is_native() (true)
+#define efi_bs_call(func, ...) efi_system_table->boottime->func(__VA_ARGS__)
+#define efi_rt_call(func, ...) efi_system_table->runtime->func(__VA_ARGS__)
+#define efi_table_attr(inst, attr) (inst->attr)
+#define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__)
+
+#endif
+
+#define efi_info(fmt, ...) \
+ efi_printk(KERN_INFO fmt, ##__VA_ARGS__)
+#define efi_err(fmt, ...) \
+ efi_printk(KERN_ERR "ERROR: " fmt, ##__VA_ARGS__)
+#define efi_debug(fmt, ...) \
+ efi_printk(KERN_DEBUG "DEBUG: " fmt, ##__VA_ARGS__)
/* Helper macros for the usual case of using simple C variables: */
#ifndef fdt_setprop_inplace_var
@@ -77,6 +92,13 @@ extern __pure efi_system_table_t *efi_system_table(void);
((handle = efi_get_handle_at((array), i)) || true); \
i++)
+static inline
+void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
+{
+ *lo = lower_32_bits(data);
+ *hi = upper_32_bits(data);
+}
+
/*
* Allocation types for calls to boottime->allocate_pages.
*/
@@ -93,6 +115,16 @@ extern __pure efi_system_table_t *efi_system_table(void);
#define EFI_LOCATE_BY_PROTOCOL 2
/*
+ * boottime->stall takes the time period in microseconds
+ */
+#define EFI_USEC_PER_SEC 1000000
+
+/*
+ * boottime->set_timer takes the time in 100ns units
+ */
+#define EFI_100NSEC_PER_USEC ((u64)10)
+
+/*
* An efi_boot_memmap is used by efi_get_memory_map() to return the
* EFI memory map in a dynamically allocated buffer.
*
@@ -116,6 +148,39 @@ struct efi_boot_memmap {
typedef struct efi_generic_dev_path efi_device_path_protocol_t;
+typedef void *efi_event_t;
+/* Note that notifications won't work in mixed mode */
+typedef void (__efiapi *efi_event_notify_t)(efi_event_t, void *);
+
+#define EFI_EVT_TIMER 0x80000000U
+#define EFI_EVT_RUNTIME 0x40000000U
+#define EFI_EVT_NOTIFY_WAIT 0x00000100U
+#define EFI_EVT_NOTIFY_SIGNAL 0x00000200U
+
+/*
+ * boottime->wait_for_event takes an array of events as input.
+ * Provide a helper to set it up correctly for mixed mode.
+ */
+static inline
+void efi_set_event_at(efi_event_t *events, size_t idx, efi_event_t event)
+{
+ if (efi_is_native())
+ events[idx] = event;
+ else
+ ((u32 *)events)[idx] = (u32)(unsigned long)event;
+}
+
+#define EFI_TPL_APPLICATION 4
+#define EFI_TPL_CALLBACK 8
+#define EFI_TPL_NOTIFY 16
+#define EFI_TPL_HIGH_LEVEL 31
+
+typedef enum {
+ EfiTimerCancel,
+ EfiTimerPeriodic,
+ EfiTimerRelative
+} EFI_TIMER_DELAY;
+
/*
* EFI Boot Services table
*/
@@ -134,11 +199,16 @@ union efi_boot_services {
efi_status_t (__efiapi *allocate_pool)(int, unsigned long,
void **);
efi_status_t (__efiapi *free_pool)(void *);
- void *create_event;
- void *set_timer;
- void *wait_for_event;
+ efi_status_t (__efiapi *create_event)(u32, unsigned long,
+ efi_event_notify_t, void *,
+ efi_event_t *);
+ efi_status_t (__efiapi *set_timer)(efi_event_t,
+ EFI_TIMER_DELAY, u64);
+ efi_status_t (__efiapi *wait_for_event)(unsigned long,
+ efi_event_t *,
+ unsigned long *);
void *signal_event;
- void *close_event;
+ efi_status_t (__efiapi *close_event)(efi_event_t);
void *check_event;
void *install_protocol_interface;
void *reinstall_protocol_interface;
@@ -165,7 +235,7 @@ union efi_boot_services {
efi_status_t (__efiapi *exit_boot_services)(efi_handle_t,
unsigned long);
void *get_next_monotonic_count;
- void *stall;
+ efi_status_t (__efiapi *stall)(unsigned long);
void *set_watchdog_timer;
void *connect_controller;
efi_status_t (__efiapi *disconnect_controller)(efi_handle_t,
@@ -250,6 +320,27 @@ union efi_uga_draw_protocol {
} mixed_mode;
};
+typedef struct {
+ u16 scan_code;
+ efi_char16_t unicode_char;
+} efi_input_key_t;
+
+union efi_simple_text_input_protocol {
+ struct {
+ void *reset;
+ efi_status_t (__efiapi *read_keystroke)(efi_simple_text_input_protocol_t *,
+ efi_input_key_t *);
+ efi_event_t wait_for_key;
+ };
+ struct {
+ u32 reset;
+ u32 read_keystroke;
+ u32 wait_for_key;
+ } mixed_mode;
+};
+
+efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key);
+
union efi_simple_text_output_protocol {
struct {
void *reset;
@@ -311,8 +402,10 @@ typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t;
union efi_graphics_output_protocol {
struct {
- void *query_mode;
- void *set_mode;
+ efi_status_t (__efiapi *query_mode)(efi_graphics_output_protocol_t *,
+ u32, unsigned long *,
+ efi_graphics_output_mode_info_t **);
+ efi_status_t (__efiapi *set_mode) (efi_graphics_output_protocol_t *, u32);
void *blt;
efi_graphics_output_protocol_mode_t *mode;
};
@@ -600,8 +693,6 @@ efi_status_t efi_exit_boot_services(void *handle,
void *priv,
efi_exit_boot_map_processing priv_func);
-void efi_char16_printk(efi_char16_t *);
-
efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
unsigned long max_addr,
@@ -625,33 +716,24 @@ efi_status_t check_platform_features(void);
void *get_efi_config_table(efi_guid_t guid);
-void efi_printk(char *str);
+/* NOTE: These functions do not print a trailing newline after the string */
+void efi_char16_puts(efi_char16_t *);
+void efi_puts(const char *str);
+
+__printf(1, 2) int efi_printk(char const *fmt, ...);
void efi_free(unsigned long size, unsigned long addr);
-char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len,
- unsigned long max_addr);
+char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
efi_status_t efi_get_memory_map(struct efi_boot_memmap *map);
-efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long min);
-
-static inline
-efi_status_t efi_low_alloc(unsigned long size, unsigned long align,
- unsigned long *addr)
-{
- /*
- * Don't allocate at 0x0. It will confuse code that
- * checks pointers against NULL. Skip the first 8
- * bytes so we start at a nice even number.
- */
- return efi_low_alloc_above(size, align, addr, 0x8);
-}
-
efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
unsigned long max);
+efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+ unsigned long max, unsigned long align);
+
efi_status_t efi_relocate_kernel(unsigned long *image_addr,
unsigned long image_size,
unsigned long alloc_size,
@@ -661,12 +743,27 @@ efi_status_t efi_relocate_kernel(unsigned long *image_addr,
efi_status_t efi_parse_options(char const *cmdline);
+void efi_parse_option_graphics(char *option);
+
efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
unsigned long size);
-efi_status_t efi_load_dtb(efi_loaded_image_t *image,
- unsigned long *load_addr,
- unsigned long *load_size);
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
+ const efi_char16_t *optstr,
+ int optstr_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit,
+ unsigned long *load_addr,
+ unsigned long *load_size);
+
+
+static inline efi_status_t efi_load_dtb(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size)
+{
+ return handle_cmdline_files(image, L"dtb=", sizeof(L"dtb=") - 2,
+ ULONG_MAX, ULONG_MAX, load_addr, load_size);
+}
efi_status_t efi_load_initrd(efi_loaded_image_t *image,
unsigned long *load_addr,
@@ -674,8 +771,4 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
unsigned long soft_limit,
unsigned long hard_limit);
-efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
- unsigned long *load_size,
- unsigned long max);
-
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 46cffac7a5f1..11ecf3c4640e 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -39,7 +39,7 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
/* Do some checks on provided FDT, if it exists: */
if (orig_fdt) {
if (fdt_check_header(orig_fdt)) {
- pr_efi_err("Device Tree header not valid!\n");
+ efi_err("Device Tree header not valid!\n");
return EFI_LOAD_ERROR;
}
/*
@@ -47,7 +47,7 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
* configuration table:
*/
if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) {
- pr_efi_err("Truncated device tree! foo!\n");
+ efi_err("Truncated device tree! foo!\n");
return EFI_LOAD_ERROR;
}
}
@@ -110,7 +110,7 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
/* Add FDT entries for EFI runtime services in chosen node. */
node = fdt_subnode_offset(fdt, 0, "chosen");
- fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table());
+ fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table);
status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64);
if (status)
@@ -270,16 +270,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
*/
status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS) {
- pr_efi_err("Unable to retrieve UEFI memory map.\n");
+ efi_err("Unable to retrieve UEFI memory map.\n");
return status;
}
- pr_efi("Exiting boot services and installing virtual address map...\n");
+ efi_info("Exiting boot services and installing virtual address map...\n");
map.map = &memory_map;
status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, max_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err("Unable to allocate memory for new device tree.\n");
+ efi_err("Unable to allocate memory for new device tree.\n");
goto fail;
}
@@ -296,7 +296,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
initrd_addr, initrd_size);
if (status != EFI_SUCCESS) {
- pr_efi_err("Unable to construct new device tree.\n");
+ efi_err("Unable to construct new device tree.\n");
goto fail_free_new_fdt;
}
@@ -310,11 +310,11 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
- if (novamap())
+ if (efi_novamap)
return EFI_SUCCESS;
/* Install the new virtual address map */
- svam = efi_system_table()->runtime->set_virtual_address_map;
+ svam = efi_system_table->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
desc_ver, runtime_map);
@@ -342,13 +342,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
return EFI_SUCCESS;
}
- pr_efi_err("Exit boot services failed.\n");
+ efi_err("Exit boot services failed.\n");
fail_free_new_fdt:
efi_free(MAX_FDT_SIZE, *new_fdt_addr);
fail:
- efi_system_table()->boottime->free_pool(runtime_map);
+ efi_system_table->boottime->free_pool(runtime_map);
return EFI_LOAD_ERROR;
}
@@ -363,7 +363,7 @@ void *get_fdt(unsigned long *fdt_size)
return NULL;
if (fdt_check_header(fdt) != 0) {
- pr_efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+ efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n");
return NULL;
}
*fdt_size = fdt_totalsize(fdt);
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
index ea66b1f16a79..2005e33b33d5 100644
--- a/drivers/firmware/efi/libstub/file.c
+++ b/drivers/firmware/efi/libstub/file.c
@@ -46,16 +46,14 @@ static efi_status_t efi_open_file(efi_file_protocol_t *volume,
status = volume->open(volume, &fh, fi->filename, EFI_FILE_MODE_READ, 0);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to open file: ");
- efi_char16_printk(fi->filename);
- efi_printk("\n");
+ efi_err("Failed to open file: %ls\n", fi->filename);
return status;
}
info_sz = sizeof(struct finfo);
status = fh->get_info(fh, &info_guid, &info_sz, fi);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to get file info\n");
+ efi_err("Failed to get file info\n");
fh->close(fh);
return status;
}
@@ -75,13 +73,13 @@ static efi_status_t efi_open_volume(efi_loaded_image_t *image,
status = efi_bs_call(handle_protocol, image->device_handle, &fs_proto,
(void **)&io);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to handle fs_proto\n");
+ efi_err("Failed to handle fs_proto\n");
return status;
}
status = io->open_volume(io, fh);
if (status != EFI_SUCCESS)
- pr_efi_err("Failed to open volume\n");
+ efi_err("Failed to open volume\n");
return status;
}
@@ -121,13 +119,13 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
* We only support loading a file from the same filesystem as
* the kernel image.
*/
-static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
- const efi_char16_t *optstr,
- int optstr_size,
- unsigned long soft_limit,
- unsigned long hard_limit,
- unsigned long *load_addr,
- unsigned long *load_size)
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
+ const efi_char16_t *optstr,
+ int optstr_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit,
+ unsigned long *load_addr,
+ unsigned long *load_size)
{
const efi_char16_t *cmdline = image->load_options;
int cmdline_len = image->load_options_size / 2;
@@ -142,7 +140,7 @@ static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
if (!load_addr || !load_size)
return EFI_INVALID_PARAMETER;
- if (IS_ENABLED(CONFIG_X86) && !nochunk())
+ if (IS_ENABLED(CONFIG_X86) && !efi_nochunk)
efi_chunk_size = EFI_READ_CHUNK_SIZE;
alloc_addr = alloc_size = 0;
@@ -191,7 +189,7 @@ static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
&alloc_addr,
hard_limit);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to allocate memory for files\n");
+ efi_err("Failed to allocate memory for files\n");
goto err_close_file;
}
@@ -215,7 +213,7 @@ static efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
status = file->read(file, &chunksize, addr);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to read file\n");
+ efi_err("Failed to read file\n");
goto err_close_file;
}
addr += chunksize;
@@ -239,21 +237,3 @@ err_close_volume:
efi_free(alloc_size, alloc_addr);
return status;
}
-
-efi_status_t efi_load_dtb(efi_loaded_image_t *image,
- unsigned long *load_addr,
- unsigned long *load_size)
-{
- return handle_cmdline_files(image, L"dtb=", sizeof(L"dtb=") - 2,
- ULONG_MAX, ULONG_MAX, load_addr, load_size);
-}
-
-efi_status_t efi_load_initrd(efi_loaded_image_t *image,
- unsigned long *load_addr,
- unsigned long *load_size,
- unsigned long soft_limit,
- unsigned long hard_limit)
-{
- return handle_cmdline_files(image, L"initrd=", sizeof(L"initrd=") - 2,
- soft_limit, hard_limit, load_addr, load_size);
-}
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 55e6b3f286fe..ea5da307d542 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -5,169 +5,546 @@
*
* ----------------------------------------------------------------------- */
+#include <linux/bitops.h>
+#include <linux/ctype.h>
#include <linux/efi.h>
#include <linux/screen_info.h>
+#include <linux/string.h>
#include <asm/efi.h>
#include <asm/setup.h>
#include "efistub.h"
-static void find_bits(unsigned long mask, u8 *pos, u8 *size)
+enum efi_cmdline_option {
+ EFI_CMDLINE_NONE,
+ EFI_CMDLINE_MODE_NUM,
+ EFI_CMDLINE_RES,
+ EFI_CMDLINE_AUTO,
+ EFI_CMDLINE_LIST
+};
+
+static struct {
+ enum efi_cmdline_option option;
+ union {
+ u32 mode;
+ struct {
+ u32 width, height;
+ int format;
+ u8 depth;
+ } res;
+ };
+} cmdline = { .option = EFI_CMDLINE_NONE };
+
+static bool parse_modenum(char *option, char **next)
+{
+ u32 m;
+
+ if (!strstarts(option, "mode="))
+ return false;
+ option += strlen("mode=");
+ m = simple_strtoull(option, &option, 0);
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_MODE_NUM;
+ cmdline.mode = m;
+
+ *next = option;
+ return true;
+}
+
+static bool parse_res(char *option, char **next)
+{
+ u32 w, h, d = 0;
+ int pf = -1;
+
+ if (!isdigit(*option))
+ return false;
+ w = simple_strtoull(option, &option, 10);
+ if (*option++ != 'x' || !isdigit(*option))
+ return false;
+ h = simple_strtoull(option, &option, 10);
+ if (*option == '-') {
+ option++;
+ if (strstarts(option, "rgb")) {
+ option += strlen("rgb");
+ pf = PIXEL_RGB_RESERVED_8BIT_PER_COLOR;
+ } else if (strstarts(option, "bgr")) {
+ option += strlen("bgr");
+ pf = PIXEL_BGR_RESERVED_8BIT_PER_COLOR;
+ } else if (isdigit(*option))
+ d = simple_strtoull(option, &option, 10);
+ else
+ return false;
+ }
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_RES;
+ cmdline.res.width = w;
+ cmdline.res.height = h;
+ cmdline.res.format = pf;
+ cmdline.res.depth = d;
+
+ *next = option;
+ return true;
+}
+
+static bool parse_auto(char *option, char **next)
+{
+ if (!strstarts(option, "auto"))
+ return false;
+ option += strlen("auto");
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_AUTO;
+
+ *next = option;
+ return true;
+}
+
+static bool parse_list(char *option, char **next)
{
- u8 first, len;
+ if (!strstarts(option, "list"))
+ return false;
+ option += strlen("list");
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_LIST;
+
+ *next = option;
+ return true;
+}
+
+void efi_parse_option_graphics(char *option)
+{
+ while (*option) {
+ if (parse_modenum(option, &option))
+ continue;
+ if (parse_res(option, &option))
+ continue;
+ if (parse_auto(option, &option))
+ continue;
+ if (parse_list(option, &option))
+ continue;
+
+ while (*option && *option++ != ',')
+ ;
+ }
+}
+
+static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode;
+ int pf;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ if (cmdline.mode == cur_mode)
+ return cur_mode;
+
+ max_mode = efi_table_attr(mode, max_mode);
+ if (cmdline.mode >= max_mode) {
+ efi_err("Requested mode is invalid\n");
+ return cur_mode;
+ }
+
+ status = efi_call_proto(gop, query_mode, cmdline.mode,
+ &info_size, &info);
+ if (status != EFI_SUCCESS) {
+ efi_err("Couldn't get mode information\n");
+ return cur_mode;
+ }
+
+ pf = info->pixel_format;
+
+ efi_bs_call(free_pool, info);
+
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) {
+ efi_err("Invalid PixelFormat\n");
+ return cur_mode;
+ }
+
+ return cmdline.mode;
+}
+
+static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
+{
+ if (pixel_format == PIXEL_BIT_MASK) {
+ u32 mask = pixel_info.red_mask | pixel_info.green_mask |
+ pixel_info.blue_mask | pixel_info.reserved_mask;
+ if (!mask)
+ return 0;
+ return __fls(mask) - __ffs(mask) + 1;
+ } else
+ return 32;
+}
+
+static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode;
+ int pf;
+ efi_pixel_bitmask_t pi;
+ u32 m, w, h;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ info = efi_table_attr(mode, info);
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ if (w == cmdline.res.width && h == cmdline.res.height &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ return cur_mode;
+
+ max_mode = efi_table_attr(mode, max_mode);
+
+ for (m = 0; m < max_mode; m++) {
+ if (m == cur_mode)
+ continue;
+
+ status = efi_call_proto(gop, query_mode, m,
+ &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ efi_bs_call(free_pool, info);
+
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ continue;
+ if (w == cmdline.res.width && h == cmdline.res.height &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ return m;
+ }
+
+ efi_err("Couldn't find requested mode\n");
+
+ return cur_mode;
+}
+
+static u32 choose_mode_auto(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode, best_mode, area;
+ u8 depth;
+ int pf;
+ efi_pixel_bitmask_t pi;
+ u32 m, w, h, a;
+ u8 d;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ max_mode = efi_table_attr(mode, max_mode);
- first = 0;
- len = 0;
+ info = efi_table_attr(mode, info);
- if (mask) {
- while (!(mask & 0x1)) {
- mask = mask >> 1;
- first++;
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ best_mode = cur_mode;
+ area = w * h;
+ depth = pixel_bpp(pf, pi);
+
+ for (m = 0; m < max_mode; m++) {
+ if (m == cur_mode)
+ continue;
+
+ status = efi_call_proto(gop, query_mode, m,
+ &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ efi_bs_call(free_pool, info);
+
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ continue;
+ a = w * h;
+ if (a < area)
+ continue;
+ d = pixel_bpp(pf, pi);
+ if (a > area || d > depth) {
+ best_mode = m;
+ area = a;
+ depth = d;
}
+ }
+
+ return best_mode;
+}
+
+static u32 choose_mode_list(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode;
+ int pf;
+ efi_pixel_bitmask_t pi;
+ u32 m, w, h;
+ u8 d;
+ const char *dstr;
+ bool valid;
+ efi_input_key_t key;
- while (mask & 0x1) {
- mask = mask >> 1;
- len++;
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ max_mode = efi_table_attr(mode, max_mode);
+
+ efi_printk("Available graphics modes are 0-%u\n", max_mode-1);
+ efi_puts(" * = current mode\n"
+ " - = unusable mode\n");
+ for (m = 0; m < max_mode; m++) {
+ status = efi_call_proto(gop, query_mode, m,
+ &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ efi_bs_call(free_pool, info);
+
+ valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
+ d = 0;
+ switch (pf) {
+ case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
+ dstr = "rgb";
+ break;
+ case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
+ dstr = "bgr";
+ break;
+ case PIXEL_BIT_MASK:
+ dstr = "";
+ d = pixel_bpp(pf, pi);
+ break;
+ case PIXEL_BLT_ONLY:
+ dstr = "blt";
+ break;
+ default:
+ dstr = "xxx";
+ break;
}
+
+ efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
+ m,
+ m == cur_mode ? '*' : ' ',
+ !valid ? '-' : ' ',
+ w, h, dstr, d);
+ }
+
+ efi_puts("\nPress any key to continue (or wait 10 seconds)\n");
+ status = efi_wait_for_key(10 * EFI_USEC_PER_SEC, &key);
+ if (status != EFI_SUCCESS && status != EFI_TIMEOUT) {
+ efi_err("Unable to read key, continuing in 10 seconds\n");
+ efi_bs_call(stall, 10 * EFI_USEC_PER_SEC);
+ }
+
+ return cur_mode;
+}
+
+static void set_mode(efi_graphics_output_protocol_t *gop)
+{
+ efi_graphics_output_protocol_mode_t *mode;
+ u32 cur_mode, new_mode;
+
+ switch (cmdline.option) {
+ case EFI_CMDLINE_MODE_NUM:
+ new_mode = choose_mode_modenum(gop);
+ break;
+ case EFI_CMDLINE_RES:
+ new_mode = choose_mode_res(gop);
+ break;
+ case EFI_CMDLINE_AUTO:
+ new_mode = choose_mode_auto(gop);
+ break;
+ case EFI_CMDLINE_LIST:
+ new_mode = choose_mode_list(gop);
+ break;
+ default:
+ return;
+ }
+
+ mode = efi_table_attr(gop, mode);
+ cur_mode = efi_table_attr(mode, mode);
+
+ if (new_mode == cur_mode)
+ return;
+
+ if (efi_call_proto(gop, set_mode, new_mode) != EFI_SUCCESS)
+ efi_err("Failed to set requested mode\n");
+}
+
+static void find_bits(u32 mask, u8 *pos, u8 *size)
+{
+ if (!mask) {
+ *pos = *size = 0;
+ return;
}
- *pos = first;
- *size = len;
+ /* UEFI spec guarantees that the set bits are contiguous */
+ *pos = __ffs(mask);
+ *size = __fls(mask) - *pos + 1;
}
static void
setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
efi_pixel_bitmask_t pixel_info, int pixel_format)
{
- if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
- si->lfb_depth = 32;
- si->lfb_linelength = pixels_per_scan_line * 4;
- si->red_size = 8;
- si->red_pos = 0;
- si->green_size = 8;
- si->green_pos = 8;
- si->blue_size = 8;
- si->blue_pos = 16;
- si->rsvd_size = 8;
- si->rsvd_pos = 24;
- } else if (pixel_format == PIXEL_BGR_RESERVED_8BIT_PER_COLOR) {
- si->lfb_depth = 32;
- si->lfb_linelength = pixels_per_scan_line * 4;
- si->red_size = 8;
- si->red_pos = 16;
- si->green_size = 8;
- si->green_pos = 8;
- si->blue_size = 8;
- si->blue_pos = 0;
- si->rsvd_size = 8;
- si->rsvd_pos = 24;
- } else if (pixel_format == PIXEL_BIT_MASK) {
- find_bits(pixel_info.red_mask, &si->red_pos, &si->red_size);
- find_bits(pixel_info.green_mask, &si->green_pos,
- &si->green_size);
- find_bits(pixel_info.blue_mask, &si->blue_pos, &si->blue_size);
- find_bits(pixel_info.reserved_mask, &si->rsvd_pos,
- &si->rsvd_size);
+ if (pixel_format == PIXEL_BIT_MASK) {
+ find_bits(pixel_info.red_mask,
+ &si->red_pos, &si->red_size);
+ find_bits(pixel_info.green_mask,
+ &si->green_pos, &si->green_size);
+ find_bits(pixel_info.blue_mask,
+ &si->blue_pos, &si->blue_size);
+ find_bits(pixel_info.reserved_mask,
+ &si->rsvd_pos, &si->rsvd_size);
si->lfb_depth = si->red_size + si->green_size +
si->blue_size + si->rsvd_size;
si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
} else {
- si->lfb_depth = 4;
- si->lfb_linelength = si->lfb_width / 2;
- si->red_size = 0;
- si->red_pos = 0;
- si->green_size = 0;
- si->green_pos = 0;
- si->blue_size = 0;
- si->blue_pos = 0;
- si->rsvd_size = 0;
- si->rsvd_pos = 0;
+ if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
+ si->red_pos = 0;
+ si->blue_pos = 16;
+ } else /* PIXEL_BGR_RESERVED_8BIT_PER_COLOR */ {
+ si->blue_pos = 0;
+ si->red_pos = 16;
+ }
+
+ si->green_pos = 8;
+ si->rsvd_pos = 24;
+ si->red_size = si->green_size =
+ si->blue_size = si->rsvd_size = 8;
+
+ si->lfb_depth = 32;
+ si->lfb_linelength = pixels_per_scan_line * 4;
}
}
-static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size, void **handles)
+static efi_graphics_output_protocol_t *
+find_gop(efi_guid_t *proto, unsigned long size, void **handles)
{
- efi_graphics_output_protocol_t *gop, *first_gop;
- u16 width, height;
- u32 pixels_per_scan_line;
- u32 ext_lfb_base;
- efi_physical_addr_t fb_base;
- efi_pixel_bitmask_t pixel_info;
- int pixel_format;
- efi_status_t status;
+ efi_graphics_output_protocol_t *first_gop;
efi_handle_t h;
int i;
first_gop = NULL;
- gop = NULL;
for_each_efi_handle(h, handles, size, i) {
+ efi_status_t status;
+
+ efi_graphics_output_protocol_t *gop;
efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info = NULL;
+ efi_graphics_output_mode_info_t *info;
+
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
- bool conout_found = false;
void *dummy = NULL;
- efi_physical_addr_t current_fb_base;
status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
if (status != EFI_SUCCESS)
continue;
+ mode = efi_table_attr(gop, mode);
+ info = efi_table_attr(mode, info);
+ if (info->pixel_format == PIXEL_BLT_ONLY ||
+ info->pixel_format >= PIXEL_FORMAT_MAX)
+ continue;
+
+ /*
+ * Systems that use the UEFI Console Splitter may
+ * provide multiple GOP devices, not all of which are
+ * backed by real hardware. The workaround is to search
+ * for a GOP implementing the ConOut protocol, and if
+ * one isn't found, to just fall back to the first GOP.
+ *
+ * Once we've found a GOP supporting ConOut,
+ * don't bother looking any further.
+ */
status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
if (status == EFI_SUCCESS)
- conout_found = true;
+ return gop;
- mode = efi_table_attr(gop, mode);
- info = efi_table_attr(mode, info);
- current_fb_base = efi_table_attr(mode, frame_buffer_base);
-
- if ((!first_gop || conout_found) &&
- info->pixel_format != PIXEL_BLT_ONLY) {
- /*
- * Systems that use the UEFI Console Splitter may
- * provide multiple GOP devices, not all of which are
- * backed by real hardware. The workaround is to search
- * for a GOP implementing the ConOut protocol, and if
- * one isn't found, to just fall back to the first GOP.
- */
- width = info->horizontal_resolution;
- height = info->vertical_resolution;
- pixel_format = info->pixel_format;
- pixel_info = info->pixel_information;
- pixels_per_scan_line = info->pixels_per_scan_line;
- fb_base = current_fb_base;
-
- /*
- * Once we've found a GOP supporting ConOut,
- * don't bother looking any further.
- */
+ if (!first_gop)
first_gop = gop;
- if (conout_found)
- break;
- }
}
+ return first_gop;
+}
+
+static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size, void **handles)
+{
+ efi_graphics_output_protocol_t *gop;
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+
+ gop = find_gop(proto, size, handles);
+
/* Did we find any GOPs? */
- if (!first_gop)
+ if (!gop)
return EFI_NOT_FOUND;
+ /* Change mode if requested */
+ set_mode(gop);
+
/* EFI framebuffer */
+ mode = efi_table_attr(gop, mode);
+ info = efi_table_attr(mode, info);
+
si->orig_video_isVGA = VIDEO_TYPE_EFI;
- si->lfb_width = width;
- si->lfb_height = height;
- si->lfb_base = fb_base;
+ si->lfb_width = info->horizontal_resolution;
+ si->lfb_height = info->vertical_resolution;
- ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
- if (ext_lfb_base) {
+ efi_set_u64_split(efi_table_attr(mode, frame_buffer_base),
+ &si->lfb_base, &si->ext_lfb_base);
+ if (si->ext_lfb_base)
si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
- si->ext_lfb_base = ext_lfb_base;
- }
si->pages = 1;
- setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+ setup_pixel_info(si, info->pixels_per_scan_line,
+ info->pixel_information, info->pixel_format);
si->lfb_size = si->lfb_linelength * si->lfb_height;
diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
index 09f4fa01914e..feef8d4be113 100644
--- a/drivers/firmware/efi/libstub/mem.c
+++ b/drivers/firmware/efi/libstub/mem.c
@@ -91,120 +91,23 @@ fail:
efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
unsigned long max)
{
- efi_physical_addr_t alloc_addr = ALIGN_DOWN(max + 1, EFI_ALLOC_ALIGN) - 1;
- int slack = EFI_ALLOC_ALIGN / EFI_PAGE_SIZE - 1;
+ efi_physical_addr_t alloc_addr;
efi_status_t status;
- size = round_up(size, EFI_ALLOC_ALIGN);
+ if (EFI_ALLOC_ALIGN > EFI_PAGE_SIZE)
+ return efi_allocate_pages_aligned(size, addr, max,
+ EFI_ALLOC_ALIGN);
+
+ alloc_addr = ALIGN_DOWN(max + 1, EFI_ALLOC_ALIGN) - 1;
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
- EFI_LOADER_DATA, size / EFI_PAGE_SIZE + slack,
+ EFI_LOADER_DATA, DIV_ROUND_UP(size, EFI_PAGE_SIZE),
&alloc_addr);
if (status != EFI_SUCCESS)
return status;
- *addr = ALIGN((unsigned long)alloc_addr, EFI_ALLOC_ALIGN);
-
- if (slack > 0) {
- int l = (alloc_addr % EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
-
- if (l) {
- efi_bs_call(free_pages, alloc_addr, slack - l + 1);
- slack = l - 1;
- }
- if (slack)
- efi_bs_call(free_pages, *addr + size, slack);
- }
+ *addr = alloc_addr;
return EFI_SUCCESS;
}
-/**
- * efi_low_alloc_above() - allocate pages at or above given address
- * @size: size of the memory area to allocate
- * @align: minimum alignment of the allocated memory area. It should
- * a power of two.
- * @addr: on exit the address of the allocated memory
- * @min: minimum address to used for the memory allocation
- *
- * Allocate at the lowest possible address that is not below @min as
- * EFI_LOADER_DATA. The allocated pages are aligned according to @align but at
- * least EFI_ALLOC_ALIGN. The first allocated page will not below the address
- * given by @min.
- *
- * Return: status code
- */
-efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long min)
-{
- unsigned long map_size, desc_size, buff_size;
- efi_memory_desc_t *map;
- efi_status_t status;
- unsigned long nr_pages;
- int i;
- struct efi_boot_memmap boot_map;
-
- boot_map.map = &map;
- boot_map.map_size = &map_size;
- boot_map.desc_size = &desc_size;
- boot_map.desc_ver = NULL;
- boot_map.key_ptr = NULL;
- boot_map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&boot_map);
- if (status != EFI_SUCCESS)
- goto fail;
-
- /*
- * Enforce minimum alignment that EFI or Linux requires when
- * requesting a specific address. We are doing page-based (or
- * larger) allocations, and both the address and size must meet
- * alignment constraints.
- */
- if (align < EFI_ALLOC_ALIGN)
- align = EFI_ALLOC_ALIGN;
-
- size = round_up(size, EFI_ALLOC_ALIGN);
- nr_pages = size / EFI_PAGE_SIZE;
- for (i = 0; i < map_size / desc_size; i++) {
- efi_memory_desc_t *desc;
- unsigned long m = (unsigned long)map;
- u64 start, end;
-
- desc = efi_early_memdesc_ptr(m, desc_size, i);
-
- if (desc->type != EFI_CONVENTIONAL_MEMORY)
- continue;
-
- if (efi_soft_reserve_enabled() &&
- (desc->attribute & EFI_MEMORY_SP))
- continue;
-
- if (desc->num_pages < nr_pages)
- continue;
-
- start = desc->phys_addr;
- end = start + desc->num_pages * EFI_PAGE_SIZE;
-
- if (start < min)
- start = min;
-
- start = round_up(start, align);
- if ((start + size) > end)
- continue;
-
- status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, nr_pages, &start);
- if (status == EFI_SUCCESS) {
- *addr = start;
- break;
- }
- }
-
- if (i == map_size / desc_size)
- status = EFI_NOT_FOUND;
-
- efi_bs_call(free_pool, map);
-fail:
- return status;
-}
/**
* efi_free() - free memory pages
@@ -227,81 +130,3 @@ void efi_free(unsigned long size, unsigned long addr)
nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
efi_bs_call(free_pages, addr, nr_pages);
}
-
-/**
- * efi_relocate_kernel() - copy memory area
- * @image_addr: pointer to address of memory area to copy
- * @image_size: size of memory area to copy
- * @alloc_size: minimum size of memory to allocate, must be greater or
- * equal to image_size
- * @preferred_addr: preferred target address
- * @alignment: minimum alignment of the allocated memory area. It
- * should be a power of two.
- * @min_addr: minimum target address
- *
- * Copy a memory area to a newly allocated memory area aligned according
- * to @alignment but at least EFI_ALLOC_ALIGN. If the preferred address
- * is not available, the allocated address will not be below @min_addr.
- * On exit, @image_addr is updated to the target copy address that was used.
- *
- * This function is used to copy the Linux kernel verbatim. It does not apply
- * any relocation changes.
- *
- * Return: status code
- */
-efi_status_t efi_relocate_kernel(unsigned long *image_addr,
- unsigned long image_size,
- unsigned long alloc_size,
- unsigned long preferred_addr,
- unsigned long alignment,
- unsigned long min_addr)
-{
- unsigned long cur_image_addr;
- unsigned long new_addr = 0;
- efi_status_t status;
- unsigned long nr_pages;
- efi_physical_addr_t efi_addr = preferred_addr;
-
- if (!image_addr || !image_size || !alloc_size)
- return EFI_INVALID_PARAMETER;
- if (alloc_size < image_size)
- return EFI_INVALID_PARAMETER;
-
- cur_image_addr = *image_addr;
-
- /*
- * The EFI firmware loader could have placed the kernel image
- * anywhere in memory, but the kernel has restrictions on the
- * max physical address it can run at. Some architectures
- * also have a prefered address, so first try to relocate
- * to the preferred address. If that fails, allocate as low
- * as possible while respecting the required alignment.
- */
- nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
- status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, nr_pages, &efi_addr);
- new_addr = efi_addr;
- /*
- * If preferred address allocation failed allocate as low as
- * possible.
- */
- if (status != EFI_SUCCESS) {
- status = efi_low_alloc_above(alloc_size, alignment, &new_addr,
- min_addr);
- }
- if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to allocate usable memory for kernel.\n");
- return status;
- }
-
- /*
- * We know source/dest won't overlap since both memory ranges
- * have been allocated by UEFI, so we can safely use memcpy.
- */
- memcpy((void *)new_addr, (void *)cur_image_addr, image_size);
-
- /* Return the new address of the relocated image. */
- *image_addr = new_addr;
-
- return status;
-}
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
index b025e59b94df..99fb25d2bcf5 100644
--- a/drivers/firmware/efi/libstub/pci.c
+++ b/drivers/firmware/efi/libstub/pci.c
@@ -28,21 +28,21 @@ void efi_pci_disable_bridge_busmaster(void)
if (status != EFI_BUFFER_TOO_SMALL) {
if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
- pr_efi_err("Failed to locate PCI I/O handles'\n");
+ efi_err("Failed to locate PCI I/O handles'\n");
return;
}
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
(void **)&pci_handle);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to allocate memory for 'pci_handle'\n");
+ efi_err("Failed to allocate memory for 'pci_handle'\n");
return;
}
status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
NULL, &pci_handle_size, pci_handle);
if (status != EFI_SUCCESS) {
- pr_efi_err("Failed to locate PCI I/O handles'\n");
+ efi_err("Failed to locate PCI I/O handles'\n");
goto free_handle;
}
@@ -69,7 +69,7 @@ void efi_pci_disable_bridge_busmaster(void)
* access to the framebuffer. Drivers for true PCIe graphics
* controllers that are behind a PCIe root port do not use
* DMA to implement the GOP framebuffer anyway [although they
- * may use it in their implentation of Gop->Blt()], and so
+ * may use it in their implementation of Gop->Blt()], and so
* disabling DMA in the PCI bridge should not interfere with
* normal operation of the device.
*/
@@ -106,7 +106,7 @@ void efi_pci_disable_bridge_busmaster(void)
status = efi_call_proto(pci, pci.write, EfiPciIoWidthUint16,
PCI_COMMAND, 1, &command);
if (status != EFI_SUCCESS)
- pr_efi_err("Failed to disable PCI busmastering\n");
+ efi_err("Failed to disable PCI busmastering\n");
}
free_handle:
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 4578f59e160c..a408df474d83 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -74,6 +74,8 @@ efi_status_t efi_random_alloc(unsigned long size,
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
+ size = round_up(size, EFI_ALLOC_ALIGN);
+
/* count the suitable slots in each memory map entry */
for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
efi_memory_desc_t *md = (void *)memory_map + map_offset;
@@ -85,7 +87,7 @@ efi_status_t efi_random_alloc(unsigned long size,
}
/* find a random number between 0 and total_slots */
- target_slot = (total_slots * (u16)random_seed) >> 16;
+ target_slot = (total_slots * (u64)(random_seed & U32_MAX)) >> 32;
/*
* target_slot is now a value in the range [0, total_slots), and so
@@ -109,7 +111,7 @@ efi_status_t efi_random_alloc(unsigned long size,
}
target = round_up(md->phys_addr, align) + target_slot * align;
- pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ pages = size / EFI_PAGE_SIZE;
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
EFI_LOADER_DATA, pages, &target);
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
new file mode 100644
index 000000000000..9b1aaf8b123f
--- /dev/null
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/**
+ * efi_low_alloc_above() - allocate pages at or above given address
+ * @size: size of the memory area to allocate
+ * @align: minimum alignment of the allocated memory area. It should
+ * a power of two.
+ * @addr: on exit the address of the allocated memory
+ * @min: minimum address to used for the memory allocation
+ *
+ * Allocate at the lowest possible address that is not below @min as
+ * EFI_LOADER_DATA. The allocated pages are aligned according to @align but at
+ * least EFI_ALLOC_ALIGN. The first allocated page will not below the address
+ * given by @min.
+ *
+ * Return: status code
+ */
+static efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min)
+{
+ unsigned long map_size, desc_size, buff_size;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ int i;
+ struct efi_boot_memmap boot_map;
+
+ boot_map.map = &map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(&boot_map);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ /*
+ * Enforce minimum alignment that EFI or Linux requires when
+ * requesting a specific address. We are doing page-based (or
+ * larger) allocations, and both the address and size must meet
+ * alignment constraints.
+ */
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ nr_pages = size / EFI_PAGE_SIZE;
+ for (i = 0; i < map_size / desc_size; i++) {
+ efi_memory_desc_t *desc;
+ unsigned long m = (unsigned long)map;
+ u64 start, end;
+
+ desc = efi_early_memdesc_ptr(m, desc_size, i);
+
+ if (desc->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
+ if (desc->num_pages < nr_pages)
+ continue;
+
+ start = desc->phys_addr;
+ end = start + desc->num_pages * EFI_PAGE_SIZE;
+
+ if (start < min)
+ start = min;
+
+ start = round_up(start, align);
+ if ((start + size) > end)
+ continue;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &start);
+ if (status == EFI_SUCCESS) {
+ *addr = start;
+ break;
+ }
+ }
+
+ if (i == map_size / desc_size)
+ status = EFI_NOT_FOUND;
+
+ efi_bs_call(free_pool, map);
+fail:
+ return status;
+}
+
+/**
+ * efi_relocate_kernel() - copy memory area
+ * @image_addr: pointer to address of memory area to copy
+ * @image_size: size of memory area to copy
+ * @alloc_size: minimum size of memory to allocate, must be greater or
+ * equal to image_size
+ * @preferred_addr: preferred target address
+ * @alignment: minimum alignment of the allocated memory area. It
+ * should be a power of two.
+ * @min_addr: minimum target address
+ *
+ * Copy a memory area to a newly allocated memory area aligned according
+ * to @alignment but at least EFI_ALLOC_ALIGN. If the preferred address
+ * is not available, the allocated address will not be below @min_addr.
+ * On exit, @image_addr is updated to the target copy address that was used.
+ *
+ * This function is used to copy the Linux kernel verbatim. It does not apply
+ * any relocation changes.
+ *
+ * Return: status code
+ */
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
+ unsigned long image_size,
+ unsigned long alloc_size,
+ unsigned long preferred_addr,
+ unsigned long alignment,
+ unsigned long min_addr)
+{
+ unsigned long cur_image_addr;
+ unsigned long new_addr = 0;
+ efi_status_t status;
+ unsigned long nr_pages;
+ efi_physical_addr_t efi_addr = preferred_addr;
+
+ if (!image_addr || !image_size || !alloc_size)
+ return EFI_INVALID_PARAMETER;
+ if (alloc_size < image_size)
+ return EFI_INVALID_PARAMETER;
+
+ cur_image_addr = *image_addr;
+
+ /*
+ * The EFI firmware loader could have placed the kernel image
+ * anywhere in memory, but the kernel has restrictions on the
+ * max physical address it can run at. Some architectures
+ * also have a preferred address, so first try to relocate
+ * to the preferred address. If that fails, allocate as low
+ * as possible while respecting the required alignment.
+ */
+ nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &efi_addr);
+ new_addr = efi_addr;
+ /*
+ * If preferred address allocation failed allocate as low as
+ * possible.
+ */
+ if (status != EFI_SUCCESS) {
+ status = efi_low_alloc_above(alloc_size, alignment, &new_addr,
+ min_addr);
+ }
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate usable memory for kernel.\n");
+ return status;
+ }
+
+ /*
+ * We know source/dest won't overlap since both memory ranges
+ * have been allocated by UEFI, so we can safely use memcpy.
+ */
+ memcpy((void *)new_addr, (void *)cur_image_addr, image_size);
+
+ /* Return the new address of the relocated image. */
+ *image_addr = new_addr;
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index a765378ad18c..5efc524b14be 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -67,10 +67,10 @@ enum efi_secureboot_mode efi_get_secureboot(void)
return efi_secureboot_mode_disabled;
secure_boot_enabled:
- pr_efi("UEFI Secure Boot is enabled.\n");
+ efi_info("UEFI Secure Boot is enabled.\n");
return efi_secureboot_mode_enabled;
out_efi_err:
- pr_efi_err("Could not determine UEFI Secure Boot status.\n");
+ efi_err("Could not determine UEFI Secure Boot status.\n");
return efi_secureboot_mode_unknown;
}
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index e9a684637b70..7acbac16eae0 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -119,7 +119,7 @@ void efi_retrieve_tpm2_eventlog(void)
sizeof(*log_tbl) + log_size, (void **)&log_tbl);
if (status != EFI_SUCCESS) {
- efi_printk("Unable to allocate memory for event log\n");
+ efi_err("Unable to allocate memory for event log\n");
return;
}
diff --git a/drivers/firmware/efi/libstub/vsprintf.c b/drivers/firmware/efi/libstub/vsprintf.c
new file mode 100644
index 000000000000..e65ef49a54cd
--- /dev/null
+++ b/drivers/firmware/efi/libstub/vsprintf.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright 2007 rPath, Inc. - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * Oh, it's a waste of space, but oh-so-yummy for debugging.
+ */
+
+#include <stdarg.h>
+
+#include <linux/compiler.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+static
+int skip_atoi(const char **s)
+{
+ int i = 0;
+
+ while (isdigit(**s))
+ i = i * 10 + *((*s)++) - '0';
+ return i;
+}
+
+/*
+ * put_dec_full4 handles numbers in the range 0 <= r < 10000.
+ * The multiplier 0xccd is round(2^15/10), and the approximation
+ * r/10 == (r * 0xccd) >> 15 is exact for all r < 16389.
+ */
+static
+void put_dec_full4(char *end, unsigned int r)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ unsigned int q = (r * 0xccd) >> 15;
+ *--end = '0' + (r - q * 10);
+ r = q;
+ }
+ *--end = '0' + r;
+}
+
+/* put_dec is copied from lib/vsprintf.c with small modifications */
+
+/*
+ * Call put_dec_full4 on x % 10000, return x / 10000.
+ * The approximation x/10000 == (x * 0x346DC5D7) >> 43
+ * holds for all x < 1,128,869,999. The largest value this
+ * helper will ever be asked to convert is 1,125,520,955.
+ * (second call in the put_dec code, assuming n is all-ones).
+ */
+static
+unsigned int put_dec_helper4(char *end, unsigned int x)
+{
+ unsigned int q = (x * 0x346DC5D7ULL) >> 43;
+
+ put_dec_full4(end, x - q * 10000);
+ return q;
+}
+
+/* Based on code by Douglas W. Jones found at
+ * <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
+ * (with permission from the author).
+ * Performs no 64-bit division and hence should be fast on 32-bit machines.
+ */
+static
+char *put_dec(char *end, unsigned long long n)
+{
+ unsigned int d3, d2, d1, q, h;
+ char *p = end;
+
+ d1 = ((unsigned int)n >> 16); /* implicit "& 0xffff" */
+ h = (n >> 32);
+ d2 = (h ) & 0xffff;
+ d3 = (h >> 16); /* implicit "& 0xffff" */
+
+ /* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0
+ = 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */
+ q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((unsigned int)n & 0xffff);
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ q += 7671 * d3 + 9496 * d2 + 6 * d1;
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ q += 4749 * d3 + 42 * d2;
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ q += 281 * d3;
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ put_dec_full4(p, q);
+ p -= 4;
+
+ /* strip off the extra 0's we printed */
+ while (p < end && *p == '0')
+ ++p;
+
+ return p;
+}
+
+static
+char *number(char *end, unsigned long long num, int base, char locase)
+{
+ /*
+ * locase = 0 or 0x20. ORing digits or letters with 'locase'
+ * produces same digits or (maybe lowercased) letters
+ */
+
+ /* we are called with base 8, 10 or 16, only, thus don't need "G..." */
+ static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
+
+ switch (base) {
+ case 10:
+ if (num != 0)
+ end = put_dec(end, num);
+ break;
+ case 8:
+ for (; num != 0; num >>= 3)
+ *--end = '0' + (num & 07);
+ break;
+ case 16:
+ for (; num != 0; num >>= 4)
+ *--end = digits[num & 0xf] | locase;
+ break;
+ default:
+ unreachable();
+ };
+
+ return end;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SMALL 32 /* Must be 32 == 0x20 */
+#define SPECIAL 64 /* 0x */
+#define WIDE 128 /* UTF-16 string */
+
+static
+int get_flags(const char **fmt)
+{
+ int flags = 0;
+
+ do {
+ switch (**fmt) {
+ case '-':
+ flags |= LEFT;
+ break;
+ case '+':
+ flags |= PLUS;
+ break;
+ case ' ':
+ flags |= SPACE;
+ break;
+ case '#':
+ flags |= SPECIAL;
+ break;
+ case '0':
+ flags |= ZEROPAD;
+ break;
+ default:
+ return flags;
+ }
+ ++(*fmt);
+ } while (1);
+}
+
+static
+int get_int(const char **fmt, va_list *ap)
+{
+ if (isdigit(**fmt))
+ return skip_atoi(fmt);
+ if (**fmt == '*') {
+ ++(*fmt);
+ /* it's the next argument */
+ return va_arg(*ap, int);
+ }
+ return 0;
+}
+
+static
+unsigned long long get_number(int sign, int qualifier, va_list *ap)
+{
+ if (sign) {
+ switch (qualifier) {
+ case 'L':
+ return va_arg(*ap, long long);
+ case 'l':
+ return va_arg(*ap, long);
+ case 'h':
+ return (short)va_arg(*ap, int);
+ case 'H':
+ return (signed char)va_arg(*ap, int);
+ default:
+ return va_arg(*ap, int);
+ };
+ } else {
+ switch (qualifier) {
+ case 'L':
+ return va_arg(*ap, unsigned long long);
+ case 'l':
+ return va_arg(*ap, unsigned long);
+ case 'h':
+ return (unsigned short)va_arg(*ap, int);
+ case 'H':
+ return (unsigned char)va_arg(*ap, int);
+ default:
+ return va_arg(*ap, unsigned int);
+ }
+ }
+}
+
+static
+char get_sign(long long *num, int flags)
+{
+ if (!(flags & SIGN))
+ return 0;
+ if (*num < 0) {
+ *num = -(*num);
+ return '-';
+ }
+ if (flags & PLUS)
+ return '+';
+ if (flags & SPACE)
+ return ' ';
+ return 0;
+}
+
+static
+size_t utf16s_utf8nlen(const u16 *s16, size_t maxlen)
+{
+ size_t len, clen;
+
+ for (len = 0; len < maxlen && *s16; len += clen) {
+ u16 c0 = *s16++;
+
+ /* First, get the length for a BMP character */
+ clen = 1 + (c0 >= 0x80) + (c0 >= 0x800);
+ if (len + clen > maxlen)
+ break;
+ /*
+ * If this is a high surrogate, and we're already at maxlen, we
+ * can't include the character if it's a valid surrogate pair.
+ * Avoid accessing one extra word just to check if it's valid
+ * or not.
+ */
+ if ((c0 & 0xfc00) == 0xd800) {
+ if (len + clen == maxlen)
+ break;
+ if ((*s16 & 0xfc00) == 0xdc00) {
+ ++s16;
+ ++clen;
+ }
+ }
+ }
+
+ return len;
+}
+
+static
+u32 utf16_to_utf32(const u16 **s16)
+{
+ u16 c0, c1;
+
+ c0 = *(*s16)++;
+ /* not a surrogate */
+ if ((c0 & 0xf800) != 0xd800)
+ return c0;
+ /* invalid: low surrogate instead of high */
+ if (c0 & 0x0400)
+ return 0xfffd;
+ c1 = **s16;
+ /* invalid: missing low surrogate */
+ if ((c1 & 0xfc00) != 0xdc00)
+ return 0xfffd;
+ /* valid surrogate pair */
+ ++(*s16);
+ return (0x10000 - (0xd800 << 10) - 0xdc00) + (c0 << 10) + c1;
+}
+
+#define PUTC(c) \
+do { \
+ if (pos < size) \
+ buf[pos] = (c); \
+ ++pos; \
+} while (0);
+
+int vsnprintf(char *buf, size_t size, const char *fmt, va_list ap)
+{
+ /* The maximum space required is to print a 64-bit number in octal */
+ char tmp[(sizeof(unsigned long long) * 8 + 2) / 3];
+ char *tmp_end = &tmp[ARRAY_SIZE(tmp)];
+ long long num;
+ int base;
+ const char *s;
+ size_t len, pos;
+ char sign;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'hh', 'l' or 'll' for integer fields */
+
+ va_list args;
+
+ /*
+ * We want to pass our input va_list to helper functions by reference,
+ * but there's an annoying edge case. If va_list was originally passed
+ * to us by value, we could just pass &ap down to the helpers. This is
+ * the case on, for example, X86_32.
+ * However, on X86_64 (and possibly others), va_list is actually a
+ * size-1 array containing a structure. Our function parameter ap has
+ * decayed from T[1] to T*, and &ap has type T** rather than T(*)[1],
+ * which is what will be expected by a function taking a va_list *
+ * parameter.
+ * One standard way to solve this mess is by creating a copy in a local
+ * variable of type va_list and then passing a pointer to that local
+ * copy instead, which is what we do here.
+ */
+ va_copy(args, ap);
+
+ for (pos = 0; *fmt; ++fmt) {
+ if (*fmt != '%' || *++fmt == '%') {
+ PUTC(*fmt);
+ continue;
+ }
+
+ /* process flags */
+ flags = get_flags(&fmt);
+
+ /* get field width */
+ field_width = get_int(&fmt, &args);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+
+ if (flags & LEFT)
+ flags &= ~ZEROPAD;
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ precision = get_int(&fmt, &args);
+ if (precision >= 0)
+ flags &= ~ZEROPAD;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l') {
+ qualifier = *fmt;
+ ++fmt;
+ if (qualifier == *fmt) {
+ qualifier -= 'a'-'A';
+ ++fmt;
+ }
+ }
+
+ sign = 0;
+
+ switch (*fmt) {
+ case 'c':
+ flags &= LEFT;
+ s = tmp;
+ if (qualifier == 'l') {
+ ((u16 *)tmp)[0] = (u16)va_arg(args, unsigned int);
+ ((u16 *)tmp)[1] = L'\0';
+ precision = INT_MAX;
+ goto wstring;
+ } else {
+ tmp[0] = (unsigned char)va_arg(args, int);
+ precision = len = 1;
+ }
+ goto output;
+
+ case 's':
+ flags &= LEFT;
+ if (precision < 0)
+ precision = INT_MAX;
+ s = va_arg(args, void *);
+ if (!s)
+ s = precision < 6 ? "" : "(null)";
+ else if (qualifier == 'l') {
+ wstring:
+ flags |= WIDE;
+ precision = len = utf16s_utf8nlen((const u16 *)s, precision);
+ goto output;
+ }
+ precision = len = strnlen(s, precision);
+ goto output;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'p':
+ if (precision < 0)
+ precision = 2 * sizeof(void *);
+ fallthrough;
+ case 'x':
+ flags |= SMALL;
+ fallthrough;
+ case 'X':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ fallthrough;
+ case 'u':
+ flags &= ~SPECIAL;
+ base = 10;
+ break;
+
+ default:
+ /*
+ * Bail out if the conversion specifier is invalid.
+ * There's probably a typo in the format string and the
+ * remaining specifiers are unlikely to match up with
+ * the arguments.
+ */
+ goto fail;
+ }
+ if (*fmt == 'p') {
+ num = (unsigned long)va_arg(args, void *);
+ } else {
+ num = get_number(flags & SIGN, qualifier, &args);
+ }
+
+ sign = get_sign(&num, flags);
+ if (sign)
+ --field_width;
+
+ s = number(tmp_end, num, base, flags & SMALL);
+ len = tmp_end - s;
+ /* default precision is 1 */
+ if (precision < 0)
+ precision = 1;
+ /* precision is minimum number of digits to print */
+ if (precision < len)
+ precision = len;
+ if (flags & SPECIAL) {
+ /*
+ * For octal, a leading 0 is printed only if necessary,
+ * i.e. if it's not already there because of the
+ * precision.
+ */
+ if (base == 8 && precision == len)
+ ++precision;
+ /*
+ * For hexadecimal, the leading 0x is skipped if the
+ * output is empty, i.e. both the number and the
+ * precision are 0.
+ */
+ if (base == 16 && precision > 0)
+ field_width -= 2;
+ else
+ flags &= ~SPECIAL;
+ }
+ /*
+ * For zero padding, increase the precision to fill the field
+ * width.
+ */
+ if ((flags & ZEROPAD) && field_width > precision)
+ precision = field_width;
+
+output:
+ /* Calculate the padding necessary */
+ field_width -= precision;
+ /* Leading padding with ' ' */
+ if (!(flags & LEFT))
+ while (field_width-- > 0)
+ PUTC(' ');
+ /* sign */
+ if (sign)
+ PUTC(sign);
+ /* 0x/0X for hexadecimal */
+ if (flags & SPECIAL) {
+ PUTC('0');
+ PUTC( 'X' | (flags & SMALL));
+ }
+ /* Zero padding and excess precision */
+ while (precision-- > len)
+ PUTC('0');
+ /* Actual output */
+ if (flags & WIDE) {
+ const u16 *ws = (const u16 *)s;
+
+ while (len-- > 0) {
+ u32 c32 = utf16_to_utf32(&ws);
+ u8 *s8;
+ size_t clen;
+
+ if (c32 < 0x80) {
+ PUTC(c32);
+ continue;
+ }
+
+ /* Number of trailing octets */
+ clen = 1 + (c32 >= 0x800) + (c32 >= 0x10000);
+
+ len -= clen;
+ s8 = (u8 *)&buf[pos];
+
+ /* Avoid writing partial character */
+ PUTC('\0');
+ pos += clen;
+ if (pos >= size)
+ continue;
+
+ /* Set high bits of leading octet */
+ *s8 = (0xf00 >> 1) >> clen;
+ /* Write trailing octets in reverse order */
+ for (s8 += clen; clen; --clen, c32 >>= 6)
+ *s8-- = 0x80 | (c32 & 0x3f);
+ /* Set low bits of leading octet */
+ *s8 |= c32;
+ }
+ } else {
+ while (len-- > 0)
+ PUTC(*s++);
+ }
+ /* Trailing padding with ' ' */
+ while (field_width-- > 0)
+ PUTC(' ');
+ }
+fail:
+ va_end(args);
+
+ if (size)
+ buf[min(pos, size-1)] = '\0';
+
+ return pos;
+}
+
+int snprintf(char *buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, size, fmt, args);
+ va_end(args);
+ return i;
+}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index f0339b5d3658..5a48d996ed71 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -20,21 +20,9 @@
/* Maximum physical address for 64-bit kernel with 4-level paging */
#define MAXMEM_X86_64_4LEVEL (1ull << 46)
-static efi_system_table_t *sys_table __efistub_global;
-extern const bool efi_is64;
+const efi_system_table_t *efi_system_table;
extern u32 image_offset;
-
-__pure efi_system_table_t *efi_system_table(void)
-{
- return sys_table;
-}
-
-__attribute_const__ bool efi_is_64bit(void)
-{
- if (IS_ENABLED(CONFIG_EFI_MIXED))
- return efi_is64;
- return IS_ENABLED(CONFIG_X86_64);
-}
+static efi_loaded_image_t *image = NULL;
static efi_status_t
preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
@@ -62,7 +50,7 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
(void **)&rom);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to allocate memory for 'rom'\n");
+ efi_err("Failed to allocate memory for 'rom'\n");
return status;
}
@@ -78,7 +66,7 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
PCI_VENDOR_ID, 1, &rom->vendor);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to read rom->vendor\n");
+ efi_err("Failed to read rom->vendor\n");
goto free_struct;
}
@@ -86,7 +74,7 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
PCI_DEVICE_ID, 1, &rom->devid);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to read rom->devid\n");
+ efi_err("Failed to read rom->devid\n");
goto free_struct;
}
@@ -131,7 +119,7 @@ static void setup_efi_pci(struct boot_params *params)
(void **)&pci_handle);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to allocate memory for 'pci_handle'\n");
+ efi_err("Failed to allocate memory for 'pci_handle'\n");
return;
}
@@ -185,7 +173,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
return;
if (efi_table_attr(p, version) != 0x10000) {
- efi_printk("Unsupported properties proto version\n");
+ efi_err("Unsupported properties proto version\n");
return;
}
@@ -198,7 +186,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
size + sizeof(struct setup_data),
(void **)&new);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to allocate memory for 'properties'\n");
+ efi_err("Failed to allocate memory for 'properties'\n");
return;
}
@@ -227,7 +215,7 @@ static const efi_char16_t apple[] = L"Apple";
static void setup_quirks(struct boot_params *boot_params)
{
efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
- efi_table_attr(efi_system_table(), fw_vendor);
+ efi_table_attr(efi_system_table, fw_vendor);
if (!memcmp(fw_vendor, apple, sizeof(apple))) {
if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
@@ -368,7 +356,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
{
struct boot_params *boot_params;
struct setup_header *hdr;
- efi_loaded_image_t *image;
void *image_base;
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
int options_size = 0;
@@ -377,28 +364,29 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
unsigned long ramdisk_addr;
unsigned long ramdisk_size;
- sys_table = sys_table_arg;
+ efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
- if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
efi_exit(handle, EFI_INVALID_PARAMETER);
status = efi_bs_call(handle_protocol, handle, &proto, (void **)&image);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
+ efi_err("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
efi_exit(handle, status);
}
image_base = efi_table_attr(image, image_base);
image_offset = (void *)startup_32 - image_base;
- status = efi_allocate_pages(0x4000, (unsigned long *)&boot_params, ULONG_MAX);
+ status = efi_allocate_pages(sizeof(struct boot_params),
+ (unsigned long *)&boot_params, ULONG_MAX);
if (status != EFI_SUCCESS) {
- efi_printk("Failed to allocate lowmem for boot params\n");
+ efi_err("Failed to allocate lowmem for boot params\n");
efi_exit(handle, status);
}
- memset(boot_params, 0x0, 0x4000);
+ memset(boot_params, 0x0, sizeof(struct boot_params));
hdr = &boot_params->hdr;
@@ -416,43 +404,21 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->type_of_loader = 0x21;
/* Convert unicode cmdline to ascii */
- cmdline_ptr = efi_convert_cmdline(image, &options_size, ULONG_MAX);
+ cmdline_ptr = efi_convert_cmdline(image, &options_size);
if (!cmdline_ptr)
goto fail;
- hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
- /* Fill in upper bits of command line address, NOP on 32 bit */
- boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
+ efi_set_u64_split((unsigned long)cmdline_ptr,
+ &hdr->cmd_line_ptr, &boot_params->ext_cmd_line_ptr);
hdr->ramdisk_image = 0;
hdr->ramdisk_size = 0;
- if (efi_is_native()) {
- status = efi_parse_options(cmdline_ptr);
- if (status != EFI_SUCCESS)
- goto fail2;
-
- if (!noinitrd()) {
- status = efi_load_initrd(image, &ramdisk_addr,
- &ramdisk_size,
- hdr->initrd_addr_max,
- ULONG_MAX);
- if (status != EFI_SUCCESS)
- goto fail2;
- hdr->ramdisk_image = ramdisk_addr & 0xffffffff;
- hdr->ramdisk_size = ramdisk_size & 0xffffffff;
- boot_params->ext_ramdisk_image = (u64)ramdisk_addr >> 32;
- boot_params->ext_ramdisk_size = (u64)ramdisk_size >> 32;
- }
- }
-
- efi_stub_entry(handle, sys_table, boot_params);
+ efi_stub_entry(handle, sys_table_arg, boot_params);
/* not reached */
-fail2:
- efi_free(options_size, (unsigned long)cmdline_ptr);
fail:
- efi_free(0x4000, (unsigned long)boot_params);
+ efi_free(sizeof(struct boot_params), (unsigned long)boot_params);
efi_exit(handle, status);
}
@@ -645,17 +611,14 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
: EFI32_LOADER_SIGNATURE;
memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
- p->efi->efi_systab = (unsigned long)efi_system_table();
+ efi_set_u64_split((unsigned long)efi_system_table,
+ &p->efi->efi_systab, &p->efi->efi_systab_hi);
p->efi->efi_memdesc_size = *map->desc_size;
p->efi->efi_memdesc_version = *map->desc_ver;
- p->efi->efi_memmap = (unsigned long)*map->map;
+ efi_set_u64_split((unsigned long)*map->map,
+ &p->efi->efi_memmap, &p->efi->efi_memmap_hi);
p->efi->efi_memmap_size = *map->map_size;
-#ifdef CONFIG_X86_64
- p->efi->efi_systab_hi = (unsigned long)efi_system_table() >> 32;
- p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
-#endif
-
return EFI_SUCCESS;
}
@@ -711,12 +674,11 @@ unsigned long efi_main(efi_handle_t handle,
unsigned long buffer_start, buffer_end;
struct setup_header *hdr = &boot_params->hdr;
efi_status_t status;
- unsigned long cmdline_paddr;
- sys_table = sys_table_arg;
+ efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
- if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
efi_exit(handle, EFI_INVALID_PARAMETER);
/*
@@ -759,7 +721,7 @@ unsigned long efi_main(efi_handle_t handle,
hdr->kernel_alignment,
LOAD_PHYSICAL_ADDR);
if (status != EFI_SUCCESS) {
- efi_printk("efi_relocate_kernel() failed!\n");
+ efi_err("efi_relocate_kernel() failed!\n");
goto fail;
}
/*
@@ -770,35 +732,48 @@ unsigned long efi_main(efi_handle_t handle,
image_offset = 0;
}
- /*
- * efi_pe_entry() may have been called before efi_main(), in which
- * case this is the second time we parse the cmdline. This is ok,
- * parsing the cmdline multiple times does not have side-effects.
- */
- cmdline_paddr = ((u64)hdr->cmd_line_ptr |
- ((u64)boot_params->ext_cmd_line_ptr << 32));
- efi_parse_options((char *)cmdline_paddr);
+#ifdef CONFIG_CMDLINE_BOOL
+ status = efi_parse_options(CONFIG_CMDLINE);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail;
+ }
+#endif
+ if (!IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
+ unsigned long cmdline_paddr = ((u64)hdr->cmd_line_ptr |
+ ((u64)boot_params->ext_cmd_line_ptr << 32));
+ status = efi_parse_options((char *)cmdline_paddr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail;
+ }
+ }
/*
- * At this point, an initrd may already have been loaded, either by
- * the bootloader and passed via bootparams, or loaded from a initrd=
- * command line option by efi_pe_entry() above. In either case, we
- * permit an initrd loaded from the LINUX_EFI_INITRD_MEDIA_GUID device
- * path to supersede it.
+ * At this point, an initrd may already have been loaded by the
+ * bootloader and passed via bootparams. We permit an initrd loaded
+ * from the LINUX_EFI_INITRD_MEDIA_GUID device path to supersede it.
+ *
+ * If the device path is not present, any command-line initrd=
+ * arguments will be processed only if image is not NULL, which will be
+ * the case only if we were loaded via the PE entry point.
*/
- if (!noinitrd()) {
+ if (!efi_noinitrd) {
unsigned long addr, size;
- status = efi_load_initrd_dev_path(&addr, &size, ULONG_MAX);
- if (status == EFI_SUCCESS) {
- hdr->ramdisk_image = (u32)addr;
- hdr->ramdisk_size = (u32)size;
- boot_params->ext_ramdisk_image = (u64)addr >> 32;
- boot_params->ext_ramdisk_size = (u64)size >> 32;
- } else if (status != EFI_NOT_FOUND) {
- efi_printk("efi_load_initrd_dev_path() failed!\n");
+ status = efi_load_initrd(image, &addr, &size,
+ hdr->initrd_addr_max, ULONG_MAX);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to load initrd!\n");
goto fail;
}
+ if (size > 0) {
+ efi_set_u64_split(addr, &hdr->ramdisk_image,
+ &boot_params->ext_ramdisk_image);
+ efi_set_u64_split(size, &hdr->ramdisk_size,
+ &boot_params->ext_ramdisk_size);
+ }
}
/*
@@ -823,13 +798,13 @@ unsigned long efi_main(efi_handle_t handle,
status = exit_boot(boot_params, handle);
if (status != EFI_SUCCESS) {
- efi_printk("exit_boot() failed!\n");
+ efi_err("exit_boot() failed!\n");
goto fail;
}
return bzimage_addr;
fail:
- efi_printk("efi_main() failed!\n");
+ efi_err("efi_main() failed!\n");
efi_exit(handle, status);
}
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 7baf48c01e72..ddf9eae396fe 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -70,9 +70,6 @@ copy_ucs2_from_user_len(efi_char16_t **dst, efi_char16_t __user *src,
return 0;
}
- if (!access_ok(src, 1))
- return -EFAULT;
-
buf = memdup_user(src, len);
if (IS_ERR(buf)) {
*dst = NULL;
@@ -91,9 +88,6 @@ copy_ucs2_from_user_len(efi_char16_t **dst, efi_char16_t __user *src,
static inline int
get_ucs2_strsize_from_user(efi_char16_t __user *src, size_t *len)
{
- if (!access_ok(src, 1))
- return -EFAULT;
-
*len = user_ucs2_strsize(src);
if (*len == 0)
return -EFAULT;
@@ -118,9 +112,6 @@ copy_ucs2_from_user(efi_char16_t **dst, efi_char16_t __user *src)
{
size_t len;
- if (!access_ok(src, 1))
- return -EFAULT;
-
len = user_ucs2_strsize(src);
if (len == 0)
return -EFAULT;
@@ -142,9 +133,6 @@ copy_ucs2_to_user_len(efi_char16_t __user *dst, efi_char16_t *src, size_t len)
if (!src)
return 0;
- if (!access_ok(dst, 1))
- return -EFAULT;
-
return copy_to_user(dst, src, len);
}
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index f71eaa5bf52d..2ab048222fe9 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -8,7 +8,6 @@
*/
#include <linux/err.h>
-#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/interrupt.h>
@@ -38,6 +37,7 @@ struct imx_sc_ipc {
struct device *dev;
struct mutex lock;
struct completion done;
+ bool fast_ipc;
/* temporarily store the SCU msg */
u32 *msg;
@@ -115,6 +115,7 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc;
struct imx_sc_rpc_msg *hdr;
u32 *data = msg;
+ int i;
if (!sc_ipc->msg) {
dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n",
@@ -122,6 +123,19 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
return;
}
+ if (sc_ipc->fast_ipc) {
+ hdr = msg;
+ sc_ipc->rx_size = hdr->size;
+ sc_ipc->msg[0] = *data++;
+
+ for (i = 1; i < sc_ipc->rx_size; i++)
+ sc_ipc->msg[i] = *data++;
+
+ complete(&sc_ipc->done);
+
+ return;
+ }
+
if (sc_chan->idx == 0) {
hdr = msg;
sc_ipc->rx_size = hdr->size;
@@ -143,20 +157,22 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
{
- struct imx_sc_rpc_msg *hdr = msg;
+ struct imx_sc_rpc_msg hdr = *(struct imx_sc_rpc_msg *)msg;
struct imx_sc_chan *sc_chan;
u32 *data = msg;
int ret;
+ int size;
int i;
/* Check size */
- if (hdr->size > IMX_SC_RPC_MAX_MSG)
+ if (hdr.size > IMX_SC_RPC_MAX_MSG)
return -EINVAL;
- dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr->svc,
- hdr->func, hdr->size);
+ dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr.svc,
+ hdr.func, hdr.size);
- for (i = 0; i < hdr->size; i++) {
+ size = sc_ipc->fast_ipc ? 1 : hdr.size;
+ for (i = 0; i < size; i++) {
sc_chan = &sc_ipc->chans[i % 4];
/*
@@ -168,8 +184,10 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
* Wait for tx_done before every send to ensure that no
* queueing happens at the mailbox channel level.
*/
- wait_for_completion(&sc_chan->tx_done);
- reinit_completion(&sc_chan->tx_done);
+ if (!sc_ipc->fast_ipc) {
+ wait_for_completion(&sc_chan->tx_done);
+ reinit_completion(&sc_chan->tx_done);
+ }
ret = mbox_send_message(sc_chan->ch, &data[i]);
if (ret < 0)
@@ -246,6 +264,8 @@ static int imx_scu_probe(struct platform_device *pdev)
struct imx_sc_chan *sc_chan;
struct mbox_client *cl;
char *chan_name;
+ struct of_phandle_args args;
+ int num_channel;
int ret;
int i;
@@ -253,11 +273,20 @@ static int imx_scu_probe(struct platform_device *pdev)
if (!sc_ipc)
return -ENOMEM;
- for (i = 0; i < SCU_MU_CHAN_NUM; i++) {
- if (i < 4)
+ ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
+ "#mbox-cells", 0, &args);
+ if (ret)
+ return ret;
+
+ sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
+
+ num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
+ for (i = 0; i < num_channel; i++) {
+ if (i < num_channel / 2)
chan_name = kasprintf(GFP_KERNEL, "tx%d", i);
else
- chan_name = kasprintf(GFP_KERNEL, "rx%d", i - 4);
+ chan_name = kasprintf(GFP_KERNEL, "rx%d",
+ i - num_channel / 2);
if (!chan_name)
return -ENOMEM;
@@ -269,19 +298,22 @@ static int imx_scu_probe(struct platform_device *pdev)
cl->knows_txdone = true;
cl->rx_callback = imx_scu_rx_callback;
- /* Initial tx_done completion as "done" */
- cl->tx_done = imx_scu_tx_done;
- init_completion(&sc_chan->tx_done);
- complete(&sc_chan->tx_done);
+ if (!sc_ipc->fast_ipc) {
+ /* Initial tx_done completion as "done" */
+ cl->tx_done = imx_scu_tx_done;
+ init_completion(&sc_chan->tx_done);
+ complete(&sc_chan->tx_done);
+ }
sc_chan->sc_ipc = sc_ipc;
- sc_chan->idx = i % 4;
+ sc_chan->idx = i % (num_channel / 2);
sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
if (IS_ERR(sc_chan->ch)) {
ret = PTR_ERR(sc_chan->ch);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request mbox chan %s ret %d\n",
chan_name, ret);
+ kfree(chan_name);
return ret;
}
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 2937d44b5df4..92013ecc2d9e 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -46,25 +46,14 @@
* require cooperation with a Trusted OS driver.
*/
static int resident_cpu = -1;
+struct psci_operations psci_ops;
+static enum arm_smccc_conduit psci_conduit = SMCCC_CONDUIT_NONE;
bool psci_tos_resident_on(int cpu)
{
return cpu == resident_cpu;
}
-struct psci_operations psci_ops = {
- .conduit = SMCCC_CONDUIT_NONE,
- .smccc_version = SMCCC_VERSION_1_0,
-};
-
-enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
-{
- if (psci_ops.smccc_version < SMCCC_VERSION_1_1)
- return SMCCC_CONDUIT_NONE;
-
- return psci_ops.conduit;
-}
-
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
static psci_fn *invoke_psci_fn;
@@ -242,7 +231,7 @@ static void set_conduit(enum arm_smccc_conduit conduit)
WARN(1, "Unexpected PSCI conduit %d\n", conduit);
}
- psci_ops.conduit = conduit;
+ psci_conduit = conduit;
}
static int get_set_conduit_method(struct device_node *np)
@@ -411,8 +400,8 @@ static void __init psci_init_smccc(void)
if (feature != PSCI_RET_NOT_SUPPORTED) {
u32 ret;
ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
- if (ret == ARM_SMCCC_VERSION_1_1) {
- psci_ops.smccc_version = SMCCC_VERSION_1_1;
+ if (ret >= ARM_SMCCC_VERSION_1_1) {
+ arm_smccc_version_init(ret, psci_conduit);
ver = ret;
}
}
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
index 8532e7c78ef7..eba6b60bfb61 100644
--- a/drivers/firmware/qcom_scm-legacy.c
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -56,7 +56,7 @@ struct scm_legacy_command {
__le32 buf_offset;
__le32 resp_hdr_offset;
__le32 id;
- __le32 buf[0];
+ __le32 buf[];
};
/**
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 059bb0fbae9e..0e7233a20f34 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -6,7 +6,6 @@
#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/export.h>
-#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -806,8 +805,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
struct qcom_scm_mem_map_info *mem_to_map;
phys_addr_t mem_to_map_phys;
phys_addr_t dest_phys;
- phys_addr_t ptr_phys;
- dma_addr_t ptr_dma;
+ dma_addr_t ptr_phys;
size_t mem_to_map_sz;
size_t dest_sz;
size_t src_sz;
@@ -824,10 +822,9 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
ALIGN(dest_sz, SZ_64);
- ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
- ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
/* Fill source vmid detail */
src = ptr;
@@ -855,7 +852,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
ptr_phys, src_sz, dest_phys, dest_sz);
- dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
+ dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d\n", ret);
@@ -943,7 +940,7 @@ bool qcom_scm_hdcp_available(void)
qcom_scm_clk_disable();
- return ret > 0 ? true : false;
+ return ret > 0;
}
EXPORT_SYMBOL(qcom_scm_hdcp_available);
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index da26a584dca0..ef8098856a47 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -12,6 +12,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
#define MBOX_MSG(chan, data28) (((data28) & ~0xf) | ((chan) & 0xf))
@@ -19,6 +21,8 @@
#define MBOX_DATA28(msg) ((msg) & ~0xf)
#define MBOX_CHAN_PROPERTY 8
+#define VL805_PCI_CONFIG_VERSION_OFFSET 0x50
+
static struct platform_device *rpi_hwmon;
static struct platform_device *rpi_clk;
@@ -182,16 +186,10 @@ rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
RPI_FIRMWARE_GET_FIRMWARE_REVISION,
&packet, sizeof(packet));
- if (ret == 0) {
- struct tm tm;
-
- time64_to_tm(packet, 0, &tm);
+ if (ret)
+ return;
- dev_info(fw->cl.dev,
- "Attached to firmware from %04ld-%02d-%02d %02d:%02d\n",
- tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
- tm.tm_hour, tm.tm_min);
- }
+ dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &packet);
}
static void
@@ -286,6 +284,63 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
}
EXPORT_SYMBOL_GPL(rpi_firmware_get);
+/*
+ * The Raspberry Pi 4 gets its USB functionality from VL805, a PCIe chip that
+ * implements xHCI. After a PCI reset, VL805's firmware may either be loaded
+ * directly from an EEPROM or, if not present, by the SoC's co-processor,
+ * VideoCore. RPi4's VideoCore OS contains both the non public firmware load
+ * logic and the VL805 firmware blob. This function triggers the aforementioned
+ * process.
+ */
+int rpi_firmware_init_vl805(struct pci_dev *pdev)
+{
+ struct device_node *fw_np;
+ struct rpi_firmware *fw;
+ u32 dev_addr, version;
+ int ret;
+
+ fw_np = of_find_compatible_node(NULL, NULL,
+ "raspberrypi,bcm2835-firmware");
+ if (!fw_np)
+ return 0;
+
+ fw = rpi_firmware_get(fw_np);
+ of_node_put(fw_np);
+ if (!fw)
+ return -ENODEV;
+
+ /*
+ * Make sure we don't trigger a firmware load unnecessarily.
+ *
+ * If something went wrong with PCI, this whole exercise would be
+ * futile as VideoCore expects from us a configured PCI bus. Just take
+ * the faulty version (likely ~0) and let xHCI's registration fail
+ * further down the line.
+ */
+ pci_read_config_dword(pdev, VL805_PCI_CONFIG_VERSION_OFFSET, &version);
+ if (version)
+ goto exit;
+
+ dev_addr = pdev->bus->number << 20 | PCI_SLOT(pdev->devfn) << 15 |
+ PCI_FUNC(pdev->devfn) << 12;
+
+ ret = rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_XHCI_RESET,
+ &dev_addr, sizeof(dev_addr));
+ if (ret)
+ return ret;
+
+ /* Wait for vl805 to startup */
+ usleep_range(200, 1000);
+
+ pci_read_config_dword(pdev, VL805_PCI_CONFIG_VERSION_OFFSET,
+ &version);
+exit:
+ pci_info(pdev, "VL805 firmware version %08x\n", version);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_init_vl805);
+
static const struct of_device_id rpi_firmware_of_match[] = {
{ .compatible = "raspberrypi,bcm2835-firmware", },
{},
diff --git a/drivers/firmware/smccc/Kconfig b/drivers/firmware/smccc/Kconfig
new file mode 100644
index 000000000000..27b675d76235
--- /dev/null
+++ b/drivers/firmware/smccc/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HAVE_ARM_SMCCC
+ bool
+ help
+ Include support for the Secure Monitor Call (SMC) and Hypervisor
+ Call (HVC) instructions on Armv7 and above architectures.
+
+config HAVE_ARM_SMCCC_DISCOVERY
+ bool
+ depends on ARM_PSCI_FW
+ default y
+ help
+ SMCCC v1.0 lacked discoverability and hence PSCI v1.0 was updated
+ to add SMCCC discovery mechanism though the PSCI firmware
+ implementation of PSCI_FEATURES(SMCCC_VERSION) which returns
+ success on firmware compliant to SMCCC v1.1 and above.
diff --git a/drivers/firmware/smccc/Makefile b/drivers/firmware/smccc/Makefile
new file mode 100644
index 000000000000..6f369fe3f0b9
--- /dev/null
+++ b/drivers/firmware/smccc/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
new file mode 100644
index 000000000000..4e80921ee212
--- /dev/null
+++ b/drivers/firmware/smccc/smccc.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Arm Limited
+ */
+
+#define pr_fmt(fmt) "smccc: " fmt
+
+#include <linux/init.h>
+#include <linux/arm-smccc.h>
+
+static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
+static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
+
+void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
+{
+ smccc_version = version;
+ smccc_conduit = conduit;
+}
+
+enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
+{
+ if (smccc_version < ARM_SMCCC_VERSION_1_1)
+ return SMCCC_CONDUIT_NONE;
+
+ return smccc_conduit;
+}
+
+u32 arm_smccc_get_version(void)
+{
+ return smccc_version;
+}
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index f8533338b018..4379475c99ed 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -72,7 +72,7 @@ static void rsu_status_callback(struct stratix10_svc_client *client,
struct stratix10_rsu_priv *priv = client->priv;
struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
- if (data->status == BIT(SVC_STATUS_RSU_OK)) {
+ if (data->status == BIT(SVC_STATUS_OK)) {
priv->status.version = FIELD_GET(RSU_VERSION_MASK,
res->a2);
priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
@@ -108,9 +108,9 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
{
struct stratix10_rsu_priv *priv = client->priv;
- if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
dev_warn(client->dev, "Secure FW doesn't support notify\n");
- else if (data->status == BIT(SVC_STATUS_RSU_ERROR))
+ else if (data->status == BIT(SVC_STATUS_ERROR))
dev_err(client->dev, "Failure, returned status is %lu\n",
BIT(data->status));
@@ -133,9 +133,9 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
struct stratix10_rsu_priv *priv = client->priv;
unsigned int *counter = (unsigned int *)data->kaddr1;
- if (data->status == BIT(SVC_STATUS_RSU_OK))
+ if (data->status == BIT(SVC_STATUS_OK))
priv->retry_counter = *counter;
- else if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
dev_warn(client->dev, "Secure FW doesn't support retry\n");
else
dev_err(client->dev, "Failed to get retry counter %lu\n",
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index d5f0769f3761..e0db8dbfc9d1 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -214,7 +214,7 @@ static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
complete(&ctrl->complete_status);
break;
}
- cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_DONE);
+ cb_data->status = BIT(SVC_STATUS_BUFFER_DONE);
cb_data->kaddr1 = svc_pa_to_va(res.a1);
cb_data->kaddr2 = (res.a2) ?
svc_pa_to_va(res.a2) : NULL;
@@ -227,7 +227,7 @@ static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
__func__);
}
} while (res.a0 == INTEL_SIP_SMC_STATUS_OK ||
- res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY ||
+ res.a0 == INTEL_SIP_SMC_STATUS_BUSY ||
wait_for_completion_timeout(&ctrl->complete_status, timeout));
}
@@ -250,7 +250,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
cb_data->kaddr1 = NULL;
cb_data->kaddr2 = NULL;
cb_data->kaddr3 = NULL;
- cb_data->status = BIT(SVC_STATUS_RECONFIG_ERROR);
+ cb_data->status = BIT(SVC_STATUS_ERROR);
pr_debug("%s: polling config status\n", __func__);
@@ -259,7 +259,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_ISDONE,
0, 0, 0, 0, 0, 0, 0, &res);
if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) ||
- (res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR))
+ (res.a0 == INTEL_SIP_SMC_STATUS_ERROR))
break;
/*
@@ -271,7 +271,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
}
if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec)
- cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
+ cb_data->status = BIT(SVC_STATUS_COMPLETED);
p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
}
@@ -294,24 +294,18 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
switch (p_data->command) {
case COMMAND_RECONFIG:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_REQUEST_OK);
+ case COMMAND_RSU_UPDATE:
+ case COMMAND_RSU_NOTIFY:
+ cb_data->status = BIT(SVC_STATUS_OK);
break;
case COMMAND_RECONFIG_DATA_SUBMIT:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED);
- break;
- case COMMAND_NOOP:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED);
- cb_data->kaddr1 = svc_pa_to_va(res.a1);
+ cb_data->status = BIT(SVC_STATUS_BUFFER_SUBMITTED);
break;
case COMMAND_RECONFIG_STATUS:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
- break;
- case COMMAND_RSU_UPDATE:
- case COMMAND_RSU_NOTIFY:
- cb_data->status = BIT(SVC_STATUS_RSU_OK);
+ cb_data->status = BIT(SVC_STATUS_COMPLETED);
break;
case COMMAND_RSU_RETRY:
- cb_data->status = BIT(SVC_STATUS_RSU_OK);
+ cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
break;
default:
@@ -430,9 +424,9 @@ static int svc_normal_to_secure_thread(void *data)
if (pdata->command == COMMAND_RSU_STATUS) {
if (res.a0 == INTEL_SIP_SMC_RSU_ERROR)
- cbdata->status = BIT(SVC_STATUS_RSU_ERROR);
+ cbdata->status = BIT(SVC_STATUS_ERROR);
else
- cbdata->status = BIT(SVC_STATUS_RSU_OK);
+ cbdata->status = BIT(SVC_STATUS_OK);
cbdata->kaddr1 = &res;
cbdata->kaddr2 = NULL;
@@ -445,7 +439,7 @@ static int svc_normal_to_secure_thread(void *data)
case INTEL_SIP_SMC_STATUS_OK:
svc_thread_recv_status_ok(pdata, cbdata, res);
break;
- case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY:
+ case INTEL_SIP_SMC_STATUS_BUSY:
switch (pdata->command) {
case COMMAND_RECONFIG_DATA_SUBMIT:
svc_thread_cmd_data_claim(ctrl,
@@ -460,33 +454,13 @@ static int svc_normal_to_secure_thread(void *data)
break;
}
break;
- case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED:
+ case INTEL_SIP_SMC_STATUS_REJECTED:
pr_debug("%s: STATUS_REJECTED\n", __func__);
break;
- case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
+ case INTEL_SIP_SMC_STATUS_ERROR:
case INTEL_SIP_SMC_RSU_ERROR:
pr_err("%s: STATUS_ERROR\n", __func__);
- switch (pdata->command) {
- /* for FPGA mgr */
- case COMMAND_RECONFIG_DATA_CLAIM:
- case COMMAND_RECONFIG:
- case COMMAND_RECONFIG_DATA_SUBMIT:
- case COMMAND_RECONFIG_STATUS:
- cbdata->status =
- BIT(SVC_STATUS_RECONFIG_ERROR);
- break;
-
- /* for RSU */
- case COMMAND_RSU_STATUS:
- case COMMAND_RSU_UPDATE:
- case COMMAND_RSU_NOTIFY:
- case COMMAND_RSU_RETRY:
- cbdata->status =
- BIT(SVC_STATUS_RSU_ERROR);
- break;
- }
-
- cbdata->status = BIT(SVC_STATUS_RECONFIG_ERROR);
+ cbdata->status = BIT(SVC_STATUS_ERROR);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
@@ -502,7 +476,7 @@ static int svc_normal_to_secure_thread(void *data)
if ((pdata->command == COMMAND_RSU_RETRY) ||
(pdata->command == COMMAND_RSU_NOTIFY)) {
cbdata->status =
- BIT(SVC_STATUS_RSU_NO_SUPPORT);
+ BIT(SVC_STATUS_NO_SUPPORT);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
index ea308751635f..63ab21d89c2c 100644
--- a/drivers/firmware/tegra/bpmp-tegra186.c
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -176,7 +176,7 @@ static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
if (!priv->tx.pool) {
dev_err(bpmp->dev, "TX shmem pool not found\n");
- return -ENOMEM;
+ return -EPROBE_DEFER;
}
priv->tx.virt = gen_pool_dma_alloc(priv->tx.pool, 4096, &priv->tx.phys);
@@ -188,7 +188,7 @@ static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
if (!priv->rx.pool) {
dev_err(bpmp->dev, "RX shmem pool not found\n");
- err = -ENOMEM;
+ err = -EPROBE_DEFER;
goto free_tx;
}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 6741fcda0c37..fe6702df24bf 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -6,6 +6,7 @@
#include <linux/clk/tegra.h>
#include <linux/genalloc.h>
#include <linux/mailbox_client.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -869,12 +870,8 @@ static struct platform_driver tegra_bpmp_driver = {
.name = "tegra-bpmp",
.of_match_table = tegra_bpmp_match,
.pm = &tegra_bpmp_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = tegra_bpmp_probe,
};
-
-static int __init tegra_bpmp_init(void)
-{
- return platform_driver_register(&tegra_bpmp_driver);
-}
-core_initcall(tegra_bpmp_init);
+builtin_platform_driver(tegra_bpmp_driver);
diff --git a/drivers/firmware/trusted_foundations.c b/drivers/firmware/trusted_foundations.c
index fc544e19b0a1..1389fa9418a7 100644
--- a/drivers/firmware/trusted_foundations.c
+++ b/drivers/firmware/trusted_foundations.c
@@ -19,6 +19,7 @@
#define TF_CACHE_ENABLE 1
#define TF_CACHE_DISABLE 2
+#define TF_CACHE_REENABLE 4
#define TF_SET_CPU_BOOT_ADDR_SMC 0xfffff200
@@ -29,6 +30,7 @@
#define TF_CPU_PM_S1 0xffffffe4
#define TF_CPU_PM_S1_NOFLUSH_L2 0xffffffe7
+static unsigned long tf_idle_mode = TF_PM_MODE_NONE;
static unsigned long cpu_boot_addr;
static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
@@ -85,25 +87,40 @@ static int tf_prepare_idle(unsigned long mode)
cpu_boot_addr);
break;
+ case TF_PM_MODE_NONE:
+ break;
+
default:
return -EINVAL;
}
+ tf_idle_mode = mode;
+
return 0;
}
#ifdef CONFIG_CACHE_L2X0
static void tf_cache_write_sec(unsigned long val, unsigned int reg)
{
- u32 l2x0_way_mask = 0xff;
+ u32 enable_op, l2x0_way_mask = 0xff;
switch (reg) {
case L2X0_CTRL:
if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_ASSOCIATIVITY_16)
l2x0_way_mask = 0xffff;
+ switch (tf_idle_mode) {
+ case TF_PM_MODE_LP2:
+ enable_op = TF_CACHE_REENABLE;
+ break;
+
+ default:
+ enable_op = TF_CACHE_ENABLE;
+ break;
+ }
+
if (val == L2X0_CTRL_EN)
- tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_ENABLE,
+ tf_generic_smc(TF_CACHE_MAINT, enable_op,
l2x0_saved_regs.aux_ctrl);
else
tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_DISABLE,
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 43bc6cfdab45..99606b34975e 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -85,14 +85,13 @@ static int get_pm_api_id(char *pm_api_req, u32 *pm_id)
static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
u32 pm_api_version;
int ret;
struct zynqmp_pm_query_data qdata = {0};
switch (pm_id) {
case PM_GET_API_VERSION:
- ret = eemi_ops->get_api_version(&pm_api_version);
+ ret = zynqmp_pm_get_api_version(&pm_api_version);
sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
pm_api_version >> 16, pm_api_version & 0xffff);
break;
@@ -102,7 +101,7 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
qdata.arg2 = pm_api_arg[2];
qdata.arg3 = pm_api_arg[3];
- ret = eemi_ops->query_data(qdata, pm_api_ret);
+ ret = zynqmp_pm_query_data(qdata, pm_api_ret);
if (ret)
break;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 41b65164a367..8d1ff2454e2e 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2018 Xilinx, Inc.
+ * Copyright (C) 2014-2020 Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -24,8 +24,6 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
-static const struct zynqmp_eemi_ops *eemi_ops_tbl;
-
static bool feature_check_enabled;
static u32 zynqmp_pm_features[PM_API_MAX];
@@ -219,7 +217,7 @@ static u32 pm_tz_version;
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_get_api_version(u32 *version)
+int zynqmp_pm_get_api_version(u32 *version)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -237,6 +235,7 @@ static int zynqmp_pm_get_api_version(u32 *version)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_api_version);
/**
* zynqmp_pm_get_chipid - Get silicon ID registers
@@ -246,7 +245,7 @@ static int zynqmp_pm_get_api_version(u32 *version)
* Return: Returns the status of the operation and the idcode and version
* registers in @idcode and @version.
*/
-static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -260,6 +259,7 @@ static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid);
/**
* zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
@@ -324,7 +324,7 @@ static int get_set_conduit_method(struct device_node *np)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
+int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
{
int ret;
@@ -338,6 +338,7 @@ static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
*/
return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_query_data);
/**
* zynqmp_pm_clock_enable() - Enable the clock for given id
@@ -348,10 +349,11 @@ static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_enable(u32 clock_id)
+int zynqmp_pm_clock_enable(u32 clock_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, clock_id, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable);
/**
* zynqmp_pm_clock_disable() - Disable the clock for given id
@@ -362,10 +364,11 @@ static int zynqmp_pm_clock_enable(u32 clock_id)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_disable(u32 clock_id)
+int zynqmp_pm_clock_disable(u32 clock_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, clock_id, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_disable);
/**
* zynqmp_pm_clock_getstate() - Get the clock state for given id
@@ -377,7 +380,7 @@ static int zynqmp_pm_clock_disable(u32 clock_id)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
+int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -388,6 +391,7 @@ static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getstate);
/**
* zynqmp_pm_clock_setdivider() - Set the clock divider for given id
@@ -399,11 +403,12 @@ static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
+int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, clock_id, divider,
0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setdivider);
/**
* zynqmp_pm_clock_getdivider() - Get the clock divider for given id
@@ -415,7 +420,7 @@ static int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
+int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -426,6 +431,7 @@ static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getdivider);
/**
* zynqmp_pm_clock_setrate() - Set the clock rate for given id
@@ -436,13 +442,14 @@ static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
+int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETRATE, clock_id,
lower_32_bits(rate),
upper_32_bits(rate),
0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setrate);
/**
* zynqmp_pm_clock_getrate() - Get the clock rate for given id
@@ -454,7 +461,7 @@ static int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
+int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -465,6 +472,7 @@ static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate);
/**
* zynqmp_pm_clock_setparent() - Set the clock parent for given id
@@ -475,11 +483,12 @@ static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
+int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, clock_id,
parent_id, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setparent);
/**
* zynqmp_pm_clock_getparent() - Get the clock parent for given id
@@ -491,7 +500,7 @@ static int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
+int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -502,48 +511,191 @@ static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getparent);
/**
- * zynqmp_is_valid_ioctl() - Check whether IOCTL ID is valid or not
- * @ioctl_id: IOCTL ID
+ * zynqmp_pm_set_pll_frac_mode() - PM API for set PLL mode
+ *
+ * @clk_id: PLL clock ID
+ * @mode: PLL mode (PLL_MODE_FRAC/PLL_MODE_INT)
+ *
+ * This function sets PLL mode
*
- * Return: 1 if IOCTL is valid else 0
+ * Return: Returns status, either success or error+reason
*/
-static inline int zynqmp_is_valid_ioctl(u32 ioctl_id)
+int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
{
- switch (ioctl_id) {
- case IOCTL_SD_DLL_RESET:
- case IOCTL_SET_SD_TAPDELAY:
- case IOCTL_SET_PLL_FRAC_MODE:
- case IOCTL_GET_PLL_FRAC_MODE:
- case IOCTL_SET_PLL_FRAC_DATA:
- case IOCTL_GET_PLL_FRAC_DATA:
- return 1;
- default:
- return 0;
- }
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_MODE,
+ clk_id, mode, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode);
/**
- * zynqmp_pm_ioctl() - PM IOCTL API for device control and configs
- * @node_id: Node ID of the device
- * @ioctl_id: ID of the requested IOCTL
- * @arg1: Argument 1 to requested IOCTL call
- * @arg2: Argument 2 to requested IOCTL call
- * @out: Returned output value
+ * zynqmp_pm_get_pll_frac_mode() - PM API for get PLL mode
+ *
+ * @clk_id: PLL clock ID
+ * @mode: PLL mode
*
- * This function calls IOCTL to firmware for device control and configuration.
+ * This function return current PLL mode
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
- u32 *out)
+int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
{
- if (!zynqmp_is_valid_ioctl(ioctl_id))
- return -EINVAL;
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_MODE,
+ clk_id, 0, mode);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode);
+
+/**
+ * zynqmp_pm_set_pll_frac_data() - PM API for setting pll fraction data
+ *
+ * @clk_id: PLL clock ID
+ * @data: fraction data
+ *
+ * This function sets fraction data.
+ * It is valid for fraction mode only.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_DATA,
+ clk_id, data, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data);
+
+/**
+ * zynqmp_pm_get_pll_frac_data() - PM API for getting pll fraction data
+ *
+ * @clk_id: PLL clock ID
+ * @data: fraction data
+ *
+ * This function returns fraction data value.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_DATA,
+ clk_id, 0, data);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
+
+/**
+ * zynqmp_pm_set_sd_tapdelay() - Set tap delay for the SD device
+ *
+ * @node_id Node ID of the device
+ * @type Type of tap delay to set (input/output)
+ * @value Value to set fot the tap delay
+ *
+ * This function sets input/output tap delay for the SD device.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
+ type, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
+
+/**
+ * zynqmp_pm_sd_dll_reset() - Reset DLL logic
+ *
+ * @node_id Node ID of the device
+ * @type Reset type
+ *
+ * This function resets DLL logic for the SD device.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
+ type, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
+
+/**
+ * zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
+ * @index GGS register index
+ * @value Register value to be written
+ *
+ * This function writes value to GGS register.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_write_ggs(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_GGS,
+ index, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
+
+/**
+ * zynqmp_pm_write_ggs() - PM API for reading global general storage (ggs)
+ * @index GGS register index
+ * @value Register value to be written
+ *
+ * This function returns GGS register value.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_read_ggs(u32 index, u32 *value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_GGS,
+ index, 0, value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
+
+/**
+ * zynqmp_pm_write_pggs() - PM API for writing persistent global general
+ * storage (pggs)
+ * @index PGGS register index
+ * @value Register value to be written
+ *
+ * This function writes value to PGGS register.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_write_pggs(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_PGGS, index, value,
+ NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
+
+/**
+ * zynqmp_pm_write_pggs() - PM API for reading persistent global general
+ * storage (pggs)
+ * @index PGGS register index
+ * @value Register value to be written
+ *
+ * This function returns PGGS register value.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_read_pggs(u32 index, u32 *value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_PGGS, index, 0,
+ value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, ioctl_id,
- arg1, arg2, out);
+/**
+ * zynqmp_pm_set_boot_health_status() - PM API for setting healthy boot status
+ * @value Status value to be written
+ *
+ * This function sets healthy bit value to indicate boot health status
+ * to firmware.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_boot_health_status(u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_BOOT_HEALTH_STATUS,
+ value, 0, NULL);
}
/**
@@ -554,12 +706,13 @@ static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
- const enum zynqmp_pm_reset_action assert_flag)
+int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
{
return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert);
/**
* zynqmp_pm_reset_get_status - Get status of the reset
@@ -568,8 +721,7 @@ static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
- u32 *status)
+int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -583,6 +735,7 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_reset_get_status);
/**
* zynqmp_pm_fpga_load - Perform the fpga load
@@ -597,12 +750,12 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_fpga_load(const u64 address, const u32 size,
- const u32 flags)
+int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags)
{
return zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
upper_32_bits(address), size, flags, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_load);
/**
* zynqmp_pm_fpga_get_status - Read value from PCAP status register
@@ -613,7 +766,7 @@ static int zynqmp_pm_fpga_load(const u64 address, const u32 size,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_fpga_get_status(u32 *value)
+int zynqmp_pm_fpga_get_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -626,6 +779,7 @@ static int zynqmp_pm_fpga_get_status(u32 *value)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
/**
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
@@ -636,10 +790,11 @@ static int zynqmp_pm_fpga_get_status(u32 *value)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_init_finalize(void)
+int zynqmp_pm_init_finalize(void)
{
return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
/**
* zynqmp_pm_set_suspend_mode() - Set system suspend mode
@@ -649,10 +804,11 @@ static int zynqmp_pm_init_finalize(void)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_set_suspend_mode(u32 mode)
+int zynqmp_pm_set_suspend_mode(u32 mode)
{
return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode);
/**
* zynqmp_pm_request_node() - Request a node with specific capabilities
@@ -666,13 +822,13 @@ static int zynqmp_pm_set_suspend_mode(u32 mode)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack)
+int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos, const enum zynqmp_pm_request_ack ack)
{
return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
qos, ack, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_request_node);
/**
* zynqmp_pm_release_node() - Release a node
@@ -684,10 +840,11 @@ static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_release_node(const u32 node)
+int zynqmp_pm_release_node(const u32 node)
{
return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_release_node);
/**
* zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
@@ -701,13 +858,14 @@ static int zynqmp_pm_release_node(const u32 node)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack)
+int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
{
return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
qos, ack, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement);
/**
* zynqmp_pm_aes - Access AES hardware to encrypt/decrypt the data using
@@ -717,7 +875,7 @@ static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
*
* Return: Returns status, either success or error code.
*/
-static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+int zynqmp_pm_aes_engine(const u64 address, u32 *out)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -732,47 +890,304 @@ static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
-static const struct zynqmp_eemi_ops eemi_ops = {
- .get_api_version = zynqmp_pm_get_api_version,
- .get_chipid = zynqmp_pm_get_chipid,
- .query_data = zynqmp_pm_query_data,
- .clock_enable = zynqmp_pm_clock_enable,
- .clock_disable = zynqmp_pm_clock_disable,
- .clock_getstate = zynqmp_pm_clock_getstate,
- .clock_setdivider = zynqmp_pm_clock_setdivider,
- .clock_getdivider = zynqmp_pm_clock_getdivider,
- .clock_setrate = zynqmp_pm_clock_setrate,
- .clock_getrate = zynqmp_pm_clock_getrate,
- .clock_setparent = zynqmp_pm_clock_setparent,
- .clock_getparent = zynqmp_pm_clock_getparent,
- .ioctl = zynqmp_pm_ioctl,
- .reset_assert = zynqmp_pm_reset_assert,
- .reset_get_status = zynqmp_pm_reset_get_status,
- .init_finalize = zynqmp_pm_init_finalize,
- .set_suspend_mode = zynqmp_pm_set_suspend_mode,
- .request_node = zynqmp_pm_request_node,
- .release_node = zynqmp_pm_release_node,
- .set_requirement = zynqmp_pm_set_requirement,
- .fpga_load = zynqmp_pm_fpga_load,
- .fpga_get_status = zynqmp_pm_fpga_get_status,
- .aes = zynqmp_pm_aes_engine,
+/**
+ * zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart
+ * @type: Shutdown or restart? 0 for shutdown, 1 for restart
+ * @subtype: Specifies which system should be restarted or shut down
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype,
+ 0, 0, NULL);
+}
+
+/**
+ * struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
+ * @subtype: Shutdown subtype
+ * @name: Matching string for scope argument
+ *
+ * This struct encapsulates mapping between shutdown scope ID and string.
+ */
+struct zynqmp_pm_shutdown_scope {
+ const enum zynqmp_pm_shutdown_subtype subtype;
+ const char *name;
+};
+
+static struct zynqmp_pm_shutdown_scope shutdown_scopes[] = {
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ .name = "subsystem",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ .name = "ps_only",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+ .name = "system",
+ },
};
+static struct zynqmp_pm_shutdown_scope *selected_scope =
+ &shutdown_scopes[ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM];
+
/**
- * zynqmp_pm_get_eemi_ops - Get eemi ops functions
+ * zynqmp_pm_is_shutdown_scope_valid - Check if shutdown scope string is valid
+ * @scope_string: Shutdown scope string
*
- * Return: Pointer of eemi_ops structure
+ * Return: Return pointer to matching shutdown scope struct from
+ * array of available options in system if string is valid,
+ * otherwise returns NULL.
*/
-const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
+static struct zynqmp_pm_shutdown_scope*
+ zynqmp_pm_is_shutdown_scope_valid(const char *scope_string)
+{
+ int count;
+
+ for (count = 0; count < ARRAY_SIZE(shutdown_scopes); count++)
+ if (sysfs_streq(scope_string, shutdown_scopes[count].name))
+ return &shutdown_scopes[count];
+
+ return NULL;
+}
+
+static ssize_t shutdown_scope_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(shutdown_scopes); i++) {
+ if (&shutdown_scopes[i] == selected_scope) {
+ strcat(buf, "[");
+ strcat(buf, shutdown_scopes[i].name);
+ strcat(buf, "]");
+ } else {
+ strcat(buf, shutdown_scopes[i].name);
+ }
+ strcat(buf, " ");
+ }
+ strcat(buf, "\n");
+
+ return strlen(buf);
+}
+
+static ssize_t shutdown_scope_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct zynqmp_pm_shutdown_scope *scope;
+
+ scope = zynqmp_pm_is_shutdown_scope_valid(buf);
+ if (!scope)
+ return -EINVAL;
+
+ ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ scope->subtype);
+ if (ret) {
+ pr_err("unable to set shutdown scope %s\n", buf);
+ return ret;
+ }
+
+ selected_scope = scope;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(shutdown_scope);
+
+static ssize_t health_status_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_set_boot_health_status(value);
+ if (ret) {
+ dev_err(device, "unable to set healthy bit value to %u\n",
+ value);
+ return ret;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(health_status);
+
+static ssize_t ggs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_read_ggs(reg, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t ggs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ u32 reg)
+{
+ long value;
+ int ret;
+
+ if (reg >= GSS_NUM_REGS)
+ return -EINVAL;
+
+ ret = kstrtol(buf, 16, &value);
+ if (ret) {
+ count = -EFAULT;
+ goto err;
+ }
+
+ ret = zynqmp_pm_write_ggs(reg, value);
+ if (ret)
+ count = -EFAULT;
+err:
+ return count;
+}
+
+/* GGS register show functions */
+#define GGS0_SHOW(N) \
+ ssize_t ggs##N##_show(struct device *device, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return ggs_show(device, attr, buf, N); \
+ }
+
+static GGS0_SHOW(0);
+static GGS0_SHOW(1);
+static GGS0_SHOW(2);
+static GGS0_SHOW(3);
+
+/* GGS register store function */
+#define GGS0_STORE(N) \
+ ssize_t ggs##N##_store(struct device *device, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return ggs_store(device, attr, buf, count, N); \
+ }
+
+static GGS0_STORE(0);
+static GGS0_STORE(1);
+static GGS0_STORE(2);
+static GGS0_STORE(3);
+
+static ssize_t pggs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_read_pggs(reg, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t pggs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ u32 reg)
{
- if (eemi_ops_tbl)
- return eemi_ops_tbl;
- else
- return ERR_PTR(-EPROBE_DEFER);
+ long value;
+ int ret;
+
+ if (reg >= GSS_NUM_REGS)
+ return -EINVAL;
+ ret = kstrtol(buf, 16, &value);
+ if (ret) {
+ count = -EFAULT;
+ goto err;
+ }
+
+ ret = zynqmp_pm_write_pggs(reg, value);
+ if (ret)
+ count = -EFAULT;
+
+err:
+ return count;
}
-EXPORT_SYMBOL_GPL(zynqmp_pm_get_eemi_ops);
+
+#define PGGS0_SHOW(N) \
+ ssize_t pggs##N##_show(struct device *device, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return pggs_show(device, attr, buf, N); \
+ }
+
+#define PGGS0_STORE(N) \
+ ssize_t pggs##N##_store(struct device *device, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return pggs_store(device, attr, buf, count, N); \
+ }
+
+/* PGGS register show functions */
+static PGGS0_SHOW(0);
+static PGGS0_SHOW(1);
+static PGGS0_SHOW(2);
+static PGGS0_SHOW(3);
+
+/* PGGS register store functions */
+static PGGS0_STORE(0);
+static PGGS0_STORE(1);
+static PGGS0_STORE(2);
+static PGGS0_STORE(3);
+
+/* GGS register attributes */
+static DEVICE_ATTR_RW(ggs0);
+static DEVICE_ATTR_RW(ggs1);
+static DEVICE_ATTR_RW(ggs2);
+static DEVICE_ATTR_RW(ggs3);
+
+/* PGGS register attributes */
+static DEVICE_ATTR_RW(pggs0);
+static DEVICE_ATTR_RW(pggs1);
+static DEVICE_ATTR_RW(pggs2);
+static DEVICE_ATTR_RW(pggs3);
+
+static struct attribute *zynqmp_firmware_attrs[] = {
+ &dev_attr_ggs0.attr,
+ &dev_attr_ggs1.attr,
+ &dev_attr_ggs2.attr,
+ &dev_attr_ggs3.attr,
+ &dev_attr_pggs0.attr,
+ &dev_attr_pggs1.attr,
+ &dev_attr_pggs2.attr,
+ &dev_attr_pggs3.attr,
+ &dev_attr_shutdown_scope.attr,
+ &dev_attr_health_status.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(zynqmp_firmware);
static int zynqmp_firmware_probe(struct platform_device *pdev)
{
@@ -820,11 +1235,6 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
pr_info("%s Trustzone version v%d.%d\n", __func__,
pm_tz_version >> 16, pm_tz_version & 0xFFFF);
- /* Assign eemi_ops_table */
- eemi_ops_tbl = &eemi_ops;
-
- zynqmp_pm_api_debugfs_init();
-
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
ARRAY_SIZE(firmware_devs), NULL, 0, NULL);
if (ret) {
@@ -832,6 +1242,8 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
return ret;
}
+ zynqmp_pm_api_debugfs_init();
+
return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
@@ -854,6 +1266,7 @@ static struct platform_driver zynqmp_firmware_driver = {
.driver = {
.name = "zynqmp_firmware",
.of_match_table = zynqmp_firmware_of_match,
+ .dev_groups = zynqmp_firmware_groups,
},
.probe = zynqmp_firmware_probe,
.remove = zynqmp_firmware_remove,
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 72380e1d31c7..b2408a710662 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -156,7 +156,7 @@ config FPGA_DFL
config FPGA_DFL_FME
tristate "FPGA DFL FME Driver"
- depends on FPGA_DFL && HWMON
+ depends on FPGA_DFL && HWMON && PERF_EVENTS
help
The FPGA Management Engine (FME) is a feature device implemented
under Device Feature List (DFL) framework. Select this option to
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 4865b74b00a4..d8e21dfc6778 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o dfl-fme-error.o
+dfl-fme-objs += dfl-fme-perf.o
dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
dfl-afu-objs += dfl-afu-error.o
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 62f924489db5..02d8cbad1ae2 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -61,10 +61,10 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
region->pages);
if (pinned < 0) {
ret = pinned;
- goto put_pages;
+ goto free_pages;
} else if (pinned != npages) {
ret = -EFAULT;
- goto free_pages;
+ goto put_pages;
}
dev_dbg(dev, "%d pages pinned\n", pinned);
@@ -324,10 +324,6 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
if (user_addr + length < user_addr)
return -EINVAL;
- if (!access_ok((void __user *)(unsigned long)user_addr,
- length))
- return -EINVAL;
-
region = kzalloc(sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 65437b6a6842..b0c31789a909 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -561,14 +561,16 @@ static int afu_open(struct inode *inode, struct file *filp)
if (WARN_ON(!pdata))
return -ENODEV;
- ret = dfl_feature_dev_use_begin(pdata);
- if (ret)
- return ret;
-
- dev_dbg(&fdev->dev, "Device File Open\n");
- filp->private_data = fdev;
+ mutex_lock(&pdata->lock);
+ ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ if (!ret) {
+ dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
+ dfl_feature_dev_use_count(pdata));
+ filp->private_data = fdev;
+ }
+ mutex_unlock(&pdata->lock);
- return 0;
+ return ret;
}
static int afu_release(struct inode *inode, struct file *filp)
@@ -581,12 +583,14 @@ static int afu_release(struct inode *inode, struct file *filp)
pdata = dev_get_platdata(&pdev->dev);
mutex_lock(&pdata->lock);
- __port_reset(pdev);
- afu_dma_region_destroy(pdata);
- mutex_unlock(&pdata->lock);
-
dfl_feature_dev_use_end(pdata);
+ if (!dfl_feature_dev_use_count(pdata)) {
+ __port_reset(pdev);
+ afu_dma_region_destroy(pdata);
+ }
+ mutex_unlock(&pdata->lock);
+
return 0;
}
@@ -746,6 +750,12 @@ static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
+static const struct vm_operations_struct afu_vma_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct platform_device *pdev = filp->private_data;
@@ -775,6 +785,9 @@ static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
!(region.flags & DFL_PORT_REGION_WRITE))
return -EPERM;
+ /* Support debug access to the mapping */
+ vma->vm_ops = &afu_vma_ops;
+
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start,
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 1d4690c99268..fc210d4e1863 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -580,6 +580,10 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
.ops = &fme_power_mgmt_ops,
},
{
+ .id_table = fme_perf_id_table,
+ .ops = &fme_perf_ops,
+ },
+ {
.ops = NULL,
},
};
@@ -600,14 +604,16 @@ static int fme_open(struct inode *inode, struct file *filp)
if (WARN_ON(!pdata))
return -ENODEV;
- ret = dfl_feature_dev_use_begin(pdata);
- if (ret)
- return ret;
-
- dev_dbg(&fdev->dev, "Device File Open\n");
- filp->private_data = pdata;
+ mutex_lock(&pdata->lock);
+ ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ if (!ret) {
+ dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
+ dfl_feature_dev_use_count(pdata));
+ filp->private_data = pdata;
+ }
+ mutex_unlock(&pdata->lock);
- return 0;
+ return ret;
}
static int fme_release(struct inode *inode, struct file *filp)
@@ -616,7 +622,10 @@ static int fme_release(struct inode *inode, struct file *filp)
struct platform_device *pdev = pdata->dev;
dev_dbg(&pdev->dev, "Device File Release\n");
+
+ mutex_lock(&pdata->lock);
dfl_feature_dev_use_end(pdata);
+ mutex_unlock(&pdata->lock);
return 0;
}
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
new file mode 100644
index 000000000000..6ce1ed222ea4
--- /dev/null
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -0,0 +1,1020 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME) Global Performance Reporting
+ *
+ * Copyright 2019 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xu Yilun <yilun.xu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Mitchel, Henry <henry.mitchel@intel.com>
+ */
+
+#include <linux/perf_event.h>
+#include "dfl.h"
+#include "dfl-fme.h"
+
+/*
+ * Performance Counter Registers for Cache.
+ *
+ * Cache Events are listed below as CACHE_EVNT_*.
+ */
+#define CACHE_CTRL 0x8
+#define CACHE_RESET_CNTR BIT_ULL(0)
+#define CACHE_FREEZE_CNTR BIT_ULL(8)
+#define CACHE_CTRL_EVNT GENMASK_ULL(19, 16)
+#define CACHE_EVNT_RD_HIT 0x0
+#define CACHE_EVNT_WR_HIT 0x1
+#define CACHE_EVNT_RD_MISS 0x2
+#define CACHE_EVNT_WR_MISS 0x3
+#define CACHE_EVNT_RSVD 0x4
+#define CACHE_EVNT_HOLD_REQ 0x5
+#define CACHE_EVNT_DATA_WR_PORT_CONTEN 0x6
+#define CACHE_EVNT_TAG_WR_PORT_CONTEN 0x7
+#define CACHE_EVNT_TX_REQ_STALL 0x8
+#define CACHE_EVNT_RX_REQ_STALL 0x9
+#define CACHE_EVNT_EVICTIONS 0xa
+#define CACHE_EVNT_MAX CACHE_EVNT_EVICTIONS
+#define CACHE_CHANNEL_SEL BIT_ULL(20)
+#define CACHE_CHANNEL_RD 0
+#define CACHE_CHANNEL_WR 1
+#define CACHE_CNTR0 0x10
+#define CACHE_CNTR1 0x18
+#define CACHE_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define CACHE_CNTR_EVNT GENMASK_ULL(63, 60)
+
+/*
+ * Performance Counter Registers for Fabric.
+ *
+ * Fabric Events are listed below as FAB_EVNT_*
+ */
+#define FAB_CTRL 0x20
+#define FAB_RESET_CNTR BIT_ULL(0)
+#define FAB_FREEZE_CNTR BIT_ULL(8)
+#define FAB_CTRL_EVNT GENMASK_ULL(19, 16)
+#define FAB_EVNT_PCIE0_RD 0x0
+#define FAB_EVNT_PCIE0_WR 0x1
+#define FAB_EVNT_PCIE1_RD 0x2
+#define FAB_EVNT_PCIE1_WR 0x3
+#define FAB_EVNT_UPI_RD 0x4
+#define FAB_EVNT_UPI_WR 0x5
+#define FAB_EVNT_MMIO_RD 0x6
+#define FAB_EVNT_MMIO_WR 0x7
+#define FAB_EVNT_MAX FAB_EVNT_MMIO_WR
+#define FAB_PORT_ID GENMASK_ULL(21, 20)
+#define FAB_PORT_FILTER BIT_ULL(23)
+#define FAB_PORT_FILTER_DISABLE 0
+#define FAB_PORT_FILTER_ENABLE 1
+#define FAB_CNTR 0x28
+#define FAB_CNTR_EVNT_CNTR GENMASK_ULL(59, 0)
+#define FAB_CNTR_EVNT GENMASK_ULL(63, 60)
+
+/*
+ * Performance Counter Registers for Clock.
+ *
+ * Clock Counter can't be reset or frozen by SW.
+ */
+#define CLK_CNTR 0x30
+#define BASIC_EVNT_CLK 0x0
+#define BASIC_EVNT_MAX BASIC_EVNT_CLK
+
+/*
+ * Performance Counter Registers for IOMMU / VT-D.
+ *
+ * VT-D Events are listed below as VTD_EVNT_* and VTD_SIP_EVNT_*
+ */
+#define VTD_CTRL 0x38
+#define VTD_RESET_CNTR BIT_ULL(0)
+#define VTD_FREEZE_CNTR BIT_ULL(8)
+#define VTD_CTRL_EVNT GENMASK_ULL(19, 16)
+#define VTD_EVNT_AFU_MEM_RD_TRANS 0x0
+#define VTD_EVNT_AFU_MEM_WR_TRANS 0x1
+#define VTD_EVNT_AFU_DEVTLB_RD_HIT 0x2
+#define VTD_EVNT_AFU_DEVTLB_WR_HIT 0x3
+#define VTD_EVNT_DEVTLB_4K_FILL 0x4
+#define VTD_EVNT_DEVTLB_2M_FILL 0x5
+#define VTD_EVNT_DEVTLB_1G_FILL 0x6
+#define VTD_EVNT_MAX VTD_EVNT_DEVTLB_1G_FILL
+#define VTD_CNTR 0x40
+#define VTD_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define VTD_CNTR_EVNT GENMASK_ULL(63, 60)
+
+#define VTD_SIP_CTRL 0x48
+#define VTD_SIP_RESET_CNTR BIT_ULL(0)
+#define VTD_SIP_FREEZE_CNTR BIT_ULL(8)
+#define VTD_SIP_CTRL_EVNT GENMASK_ULL(19, 16)
+#define VTD_SIP_EVNT_IOTLB_4K_HIT 0x0
+#define VTD_SIP_EVNT_IOTLB_2M_HIT 0x1
+#define VTD_SIP_EVNT_IOTLB_1G_HIT 0x2
+#define VTD_SIP_EVNT_SLPWC_L3_HIT 0x3
+#define VTD_SIP_EVNT_SLPWC_L4_HIT 0x4
+#define VTD_SIP_EVNT_RCC_HIT 0x5
+#define VTD_SIP_EVNT_IOTLB_4K_MISS 0x6
+#define VTD_SIP_EVNT_IOTLB_2M_MISS 0x7
+#define VTD_SIP_EVNT_IOTLB_1G_MISS 0x8
+#define VTD_SIP_EVNT_SLPWC_L3_MISS 0x9
+#define VTD_SIP_EVNT_SLPWC_L4_MISS 0xa
+#define VTD_SIP_EVNT_RCC_MISS 0xb
+#define VTD_SIP_EVNT_MAX VTD_SIP_EVNT_SLPWC_L4_MISS
+#define VTD_SIP_CNTR 0X50
+#define VTD_SIP_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define VTD_SIP_CNTR_EVNT GENMASK_ULL(63, 60)
+
+#define PERF_TIMEOUT 30
+
+#define PERF_MAX_PORT_NUM 1U
+
+/**
+ * struct fme_perf_priv - priv data structure for fme perf driver
+ *
+ * @dev: parent device.
+ * @ioaddr: mapped base address of mmio region.
+ * @pmu: pmu data structure for fme perf counters.
+ * @id: id of this fme performance report private feature.
+ * @fab_users: current user number on fabric counters.
+ * @fab_port_id: used to indicate current working mode of fabric counters.
+ * @fab_lock: lock to protect fabric counters working mode.
+ * @cpu: active CPU to which the PMU is bound for accesses.
+ * @cpuhp_node: node for CPU hotplug notifier link.
+ * @cpuhp_state: state for CPU hotplug notification;
+ */
+struct fme_perf_priv {
+ struct device *dev;
+ void __iomem *ioaddr;
+ struct pmu pmu;
+ u64 id;
+
+ u32 fab_users;
+ u32 fab_port_id;
+ spinlock_t fab_lock;
+
+ unsigned int cpu;
+ struct hlist_node node;
+ enum cpuhp_state cpuhp_state;
+};
+
+/**
+ * struct fme_perf_event_ops - callbacks for fme perf events
+ *
+ * @event_init: callback invoked during event init.
+ * @event_destroy: callback invoked during event destroy.
+ * @read_counter: callback to read hardware counters.
+ */
+struct fme_perf_event_ops {
+ int (*event_init)(struct fme_perf_priv *priv, u32 event, u32 portid);
+ void (*event_destroy)(struct fme_perf_priv *priv, u32 event,
+ u32 portid);
+ u64 (*read_counter)(struct fme_perf_priv *priv, u32 event, u32 portid);
+};
+
+#define to_fme_perf_priv(_pmu) container_of(_pmu, struct fme_perf_priv, pmu)
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct fme_perf_priv *priv;
+
+ priv = to_fme_perf_priv(pmu);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(priv->cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *fme_perf_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group fme_perf_cpumask_group = {
+ .attrs = fme_perf_cpumask_attrs,
+};
+
+#define FME_EVENT_MASK GENMASK_ULL(11, 0)
+#define FME_EVENT_SHIFT 0
+#define FME_EVTYPE_MASK GENMASK_ULL(15, 12)
+#define FME_EVTYPE_SHIFT 12
+#define FME_EVTYPE_BASIC 0
+#define FME_EVTYPE_CACHE 1
+#define FME_EVTYPE_FABRIC 2
+#define FME_EVTYPE_VTD 3
+#define FME_EVTYPE_VTD_SIP 4
+#define FME_EVTYPE_MAX FME_EVTYPE_VTD_SIP
+#define FME_PORTID_MASK GENMASK_ULL(23, 16)
+#define FME_PORTID_SHIFT 16
+#define FME_PORTID_ROOT (0xffU)
+
+#define get_event(_config) FIELD_GET(FME_EVENT_MASK, _config)
+#define get_evtype(_config) FIELD_GET(FME_EVTYPE_MASK, _config)
+#define get_portid(_config) FIELD_GET(FME_PORTID_MASK, _config)
+
+PMU_FORMAT_ATTR(event, "config:0-11");
+PMU_FORMAT_ATTR(evtype, "config:12-15");
+PMU_FORMAT_ATTR(portid, "config:16-23");
+
+static struct attribute *fme_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ &format_attr_evtype.attr,
+ &format_attr_portid.attr,
+ NULL,
+};
+
+static struct attribute_group fme_perf_format_group = {
+ .name = "format",
+ .attrs = fme_perf_format_attrs,
+};
+
+/*
+ * There are no default events, but we need to create
+ * "events" group (with empty attrs) before updating
+ * it with detected events (using pmu->attr_update).
+ */
+static struct attribute *fme_perf_events_attrs_empty[] = {
+ NULL,
+};
+
+static struct attribute_group fme_perf_events_group = {
+ .name = "events",
+ .attrs = fme_perf_events_attrs_empty,
+};
+
+static const struct attribute_group *fme_perf_groups[] = {
+ &fme_perf_format_group,
+ &fme_perf_cpumask_group,
+ &fme_perf_events_group,
+ NULL,
+};
+
+static bool is_portid_root(u32 portid)
+{
+ return portid == FME_PORTID_ROOT;
+}
+
+static bool is_portid_port(u32 portid)
+{
+ return portid < PERF_MAX_PORT_NUM;
+}
+
+static bool is_portid_root_or_port(u32 portid)
+{
+ return is_portid_root(portid) || is_portid_port(portid);
+}
+
+static u64 fme_read_perf_cntr_reg(void __iomem *addr)
+{
+ u32 low;
+ u64 v;
+
+ /*
+ * For 64bit counter registers, the counter may increases and carries
+ * out of bit [31] between 2 32bit reads. So add extra reads to help
+ * to prevent this issue. This only happens in platforms which don't
+ * support 64bit read - readq is split into 2 readl.
+ */
+ do {
+ v = readq(addr);
+ low = readl(addr);
+ } while (((u32)v) > low);
+
+ return v;
+}
+
+static int basic_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (event <= BASIC_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 basic_read_event_counter(struct fme_perf_priv *priv,
+ u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+
+ return fme_read_perf_cntr_reg(base + CLK_CNTR);
+}
+
+static int cache_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= CACHE_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 cache_read_event_counter(struct fme_perf_priv *priv,
+ u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v, count;
+ u8 channel;
+
+ if (event == CACHE_EVNT_WR_HIT || event == CACHE_EVNT_WR_MISS ||
+ event == CACHE_EVNT_DATA_WR_PORT_CONTEN ||
+ event == CACHE_EVNT_TAG_WR_PORT_CONTEN)
+ channel = CACHE_CHANNEL_WR;
+ else
+ channel = CACHE_CHANNEL_RD;
+
+ /* set channel access type and cache event code. */
+ v = readq(base + CACHE_CTRL);
+ v &= ~(CACHE_CHANNEL_SEL | CACHE_CTRL_EVNT);
+ v |= FIELD_PREP(CACHE_CHANNEL_SEL, channel);
+ v |= FIELD_PREP(CACHE_CTRL_EVNT, event);
+ writeq(v, base + CACHE_CTRL);
+
+ if (readq_poll_timeout_atomic(base + CACHE_CNTR0, v,
+ FIELD_GET(CACHE_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched cache event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + CACHE_CNTR0);
+ count = FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
+ v = fme_read_perf_cntr_reg(base + CACHE_CNTR1);
+ count += FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
+
+ return count;
+}
+
+static bool is_fabric_event_supported(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ if (event > FAB_EVNT_MAX || !is_portid_root_or_port(portid))
+ return false;
+
+ if (priv->id == FME_FEATURE_ID_GLOBAL_DPERF &&
+ (event == FAB_EVNT_PCIE1_RD || event == FAB_EVNT_UPI_RD ||
+ event == FAB_EVNT_PCIE1_WR || event == FAB_EVNT_UPI_WR))
+ return false;
+
+ return true;
+}
+
+static int fabric_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ int ret = 0;
+ u64 v;
+
+ if (!is_fabric_event_supported(priv, event, portid))
+ return -EINVAL;
+
+ /*
+ * as fabric counter set only can be in either overall or port mode.
+ * In overall mode, it counts overall data for FPGA, and in port mode,
+ * it is configured to monitor on one individual port.
+ *
+ * so every time, a new event is initialized, driver checks
+ * current working mode and if someone is using this counter set.
+ */
+ spin_lock(&priv->fab_lock);
+ if (priv->fab_users && priv->fab_port_id != portid) {
+ dev_dbg(priv->dev, "conflict fabric event monitoring mode.\n");
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ priv->fab_users++;
+
+ /*
+ * skip if current working mode matches, otherwise change the working
+ * mode per input port_id, to monitor overall data or another port.
+ */
+ if (priv->fab_port_id == portid)
+ goto exit;
+
+ priv->fab_port_id = portid;
+
+ v = readq(base + FAB_CTRL);
+ v &= ~(FAB_PORT_FILTER | FAB_PORT_ID);
+
+ if (is_portid_root(portid)) {
+ v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_DISABLE);
+ } else {
+ v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_ENABLE);
+ v |= FIELD_PREP(FAB_PORT_ID, portid);
+ }
+ writeq(v, base + FAB_CTRL);
+
+exit:
+ spin_unlock(&priv->fab_lock);
+ return ret;
+}
+
+static void fabric_event_destroy(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ spin_lock(&priv->fab_lock);
+ priv->fab_users--;
+ spin_unlock(&priv->fab_lock);
+}
+
+static u64 fabric_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ v = readq(base + FAB_CTRL);
+ v &= ~FAB_CTRL_EVNT;
+ v |= FIELD_PREP(FAB_CTRL_EVNT, event);
+ writeq(v, base + FAB_CTRL);
+
+ if (readq_poll_timeout_atomic(base + FAB_CNTR, v,
+ FIELD_GET(FAB_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched fab event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + FAB_CNTR);
+ return FIELD_GET(FAB_CNTR_EVNT_CNTR, v);
+}
+
+static int vtd_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= VTD_EVNT_MAX && is_portid_port(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 vtd_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ event += (portid * (VTD_EVNT_MAX + 1));
+
+ v = readq(base + VTD_CTRL);
+ v &= ~VTD_CTRL_EVNT;
+ v |= FIELD_PREP(VTD_CTRL_EVNT, event);
+ writeq(v, base + VTD_CTRL);
+
+ if (readq_poll_timeout_atomic(base + VTD_CNTR, v,
+ FIELD_GET(VTD_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched vtd event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + VTD_CNTR);
+ return FIELD_GET(VTD_CNTR_EVNT_CNTR, v);
+}
+
+static int vtd_sip_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= VTD_SIP_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 vtd_sip_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ v = readq(base + VTD_SIP_CTRL);
+ v &= ~VTD_SIP_CTRL_EVNT;
+ v |= FIELD_PREP(VTD_SIP_CTRL_EVNT, event);
+ writeq(v, base + VTD_SIP_CTRL);
+
+ if (readq_poll_timeout_atomic(base + VTD_SIP_CNTR, v,
+ FIELD_GET(VTD_SIP_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched vtd sip event code in counter register\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + VTD_SIP_CNTR);
+ return FIELD_GET(VTD_SIP_CNTR_EVNT_CNTR, v);
+}
+
+static struct fme_perf_event_ops fme_perf_event_ops[] = {
+ [FME_EVTYPE_BASIC] = {.event_init = basic_event_init,
+ .read_counter = basic_read_event_counter,},
+ [FME_EVTYPE_CACHE] = {.event_init = cache_event_init,
+ .read_counter = cache_read_event_counter,},
+ [FME_EVTYPE_FABRIC] = {.event_init = fabric_event_init,
+ .event_destroy = fabric_event_destroy,
+ .read_counter = fabric_read_event_counter,},
+ [FME_EVTYPE_VTD] = {.event_init = vtd_event_init,
+ .read_counter = vtd_read_event_counter,},
+ [FME_EVTYPE_VTD_SIP] = {.event_init = vtd_sip_event_init,
+ .read_counter = vtd_sip_read_event_counter,},
+};
+
+static ssize_t fme_perf_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+ unsigned long config;
+ char *ptr = buf;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ config = (unsigned long)eattr->var;
+
+ ptr += sprintf(ptr, "event=0x%02x", (unsigned int)get_event(config));
+ ptr += sprintf(ptr, ",evtype=0x%02x", (unsigned int)get_evtype(config));
+
+ if (is_portid_root(get_portid(config)))
+ ptr += sprintf(ptr, ",portid=0x%02x\n", FME_PORTID_ROOT);
+ else
+ ptr += sprintf(ptr, ",portid=?\n");
+
+ return (ssize_t)(ptr - buf);
+}
+
+#define FME_EVENT_ATTR(_name) \
+ __ATTR(_name, 0444, fme_perf_event_show, NULL)
+
+#define FME_PORT_EVENT_CONFIG(_event, _type) \
+ (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \
+ (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK))
+
+#define FME_EVENT_CONFIG(_event, _type) \
+ (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \
+ (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK) | \
+ (FME_PORTID_ROOT << FME_PORTID_SHIFT))
+
+/* FME Perf Basic Events */
+#define FME_EVENT_BASIC(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_##_name = { \
+ .attr = FME_EVENT_ATTR(_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_BASIC), \
+}
+
+FME_EVENT_BASIC(clock, BASIC_EVNT_CLK);
+
+static struct attribute *fme_perf_basic_events_attrs[] = {
+ &fme_perf_event_clock.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_basic_events_group = {
+ .name = "events",
+ .attrs = fme_perf_basic_events_attrs,
+};
+
+/* FME Perf Cache Events */
+#define FME_EVENT_CACHE(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_cache_##_name = { \
+ .attr = FME_EVENT_ATTR(cache_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_CACHE), \
+}
+
+FME_EVENT_CACHE(read_hit, CACHE_EVNT_RD_HIT);
+FME_EVENT_CACHE(read_miss, CACHE_EVNT_RD_MISS);
+FME_EVENT_CACHE(write_hit, CACHE_EVNT_WR_HIT);
+FME_EVENT_CACHE(write_miss, CACHE_EVNT_WR_MISS);
+FME_EVENT_CACHE(hold_request, CACHE_EVNT_HOLD_REQ);
+FME_EVENT_CACHE(tx_req_stall, CACHE_EVNT_TX_REQ_STALL);
+FME_EVENT_CACHE(rx_req_stall, CACHE_EVNT_RX_REQ_STALL);
+FME_EVENT_CACHE(eviction, CACHE_EVNT_EVICTIONS);
+FME_EVENT_CACHE(data_write_port_contention, CACHE_EVNT_DATA_WR_PORT_CONTEN);
+FME_EVENT_CACHE(tag_write_port_contention, CACHE_EVNT_TAG_WR_PORT_CONTEN);
+
+static struct attribute *fme_perf_cache_events_attrs[] = {
+ &fme_perf_event_cache_read_hit.attr.attr,
+ &fme_perf_event_cache_read_miss.attr.attr,
+ &fme_perf_event_cache_write_hit.attr.attr,
+ &fme_perf_event_cache_write_miss.attr.attr,
+ &fme_perf_event_cache_hold_request.attr.attr,
+ &fme_perf_event_cache_tx_req_stall.attr.attr,
+ &fme_perf_event_cache_rx_req_stall.attr.attr,
+ &fme_perf_event_cache_eviction.attr.attr,
+ &fme_perf_event_cache_data_write_port_contention.attr.attr,
+ &fme_perf_event_cache_tag_write_port_contention.attr.attr,
+ NULL,
+};
+
+static umode_t fme_perf_events_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
+
+ return (priv->id == FME_FEATURE_ID_GLOBAL_IPERF) ? attr->mode : 0;
+}
+
+static const struct attribute_group fme_perf_cache_events_group = {
+ .name = "events",
+ .attrs = fme_perf_cache_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+/* FME Perf Fabric Events */
+#define FME_EVENT_FABRIC(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_fab_##_name = { \
+ .attr = FME_EVENT_ATTR(fab_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \
+}
+
+#define FME_EVENT_FABRIC_PORT(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_fab_port_##_name = { \
+ .attr = FME_EVENT_ATTR(fab_port_##_name), \
+ .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \
+}
+
+FME_EVENT_FABRIC(pcie0_read, FAB_EVNT_PCIE0_RD);
+FME_EVENT_FABRIC(pcie0_write, FAB_EVNT_PCIE0_WR);
+FME_EVENT_FABRIC(pcie1_read, FAB_EVNT_PCIE1_RD);
+FME_EVENT_FABRIC(pcie1_write, FAB_EVNT_PCIE1_WR);
+FME_EVENT_FABRIC(upi_read, FAB_EVNT_UPI_RD);
+FME_EVENT_FABRIC(upi_write, FAB_EVNT_UPI_WR);
+FME_EVENT_FABRIC(mmio_read, FAB_EVNT_MMIO_RD);
+FME_EVENT_FABRIC(mmio_write, FAB_EVNT_MMIO_WR);
+
+FME_EVENT_FABRIC_PORT(pcie0_read, FAB_EVNT_PCIE0_RD);
+FME_EVENT_FABRIC_PORT(pcie0_write, FAB_EVNT_PCIE0_WR);
+FME_EVENT_FABRIC_PORT(pcie1_read, FAB_EVNT_PCIE1_RD);
+FME_EVENT_FABRIC_PORT(pcie1_write, FAB_EVNT_PCIE1_WR);
+FME_EVENT_FABRIC_PORT(upi_read, FAB_EVNT_UPI_RD);
+FME_EVENT_FABRIC_PORT(upi_write, FAB_EVNT_UPI_WR);
+FME_EVENT_FABRIC_PORT(mmio_read, FAB_EVNT_MMIO_RD);
+FME_EVENT_FABRIC_PORT(mmio_write, FAB_EVNT_MMIO_WR);
+
+static struct attribute *fme_perf_fabric_events_attrs[] = {
+ &fme_perf_event_fab_pcie0_read.attr.attr,
+ &fme_perf_event_fab_pcie0_write.attr.attr,
+ &fme_perf_event_fab_pcie1_read.attr.attr,
+ &fme_perf_event_fab_pcie1_write.attr.attr,
+ &fme_perf_event_fab_upi_read.attr.attr,
+ &fme_perf_event_fab_upi_write.attr.attr,
+ &fme_perf_event_fab_mmio_read.attr.attr,
+ &fme_perf_event_fab_mmio_write.attr.attr,
+ &fme_perf_event_fab_port_pcie0_read.attr.attr,
+ &fme_perf_event_fab_port_pcie0_write.attr.attr,
+ &fme_perf_event_fab_port_pcie1_read.attr.attr,
+ &fme_perf_event_fab_port_pcie1_write.attr.attr,
+ &fme_perf_event_fab_port_upi_read.attr.attr,
+ &fme_perf_event_fab_port_upi_write.attr.attr,
+ &fme_perf_event_fab_port_mmio_read.attr.attr,
+ &fme_perf_event_fab_port_mmio_write.attr.attr,
+ NULL,
+};
+
+static umode_t fme_perf_fabric_events_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
+ struct dev_ext_attribute *eattr;
+ unsigned long var;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr.attr);
+ var = (unsigned long)eattr->var;
+
+ if (is_fabric_event_supported(priv, get_event(var), get_portid(var)))
+ return attr->mode;
+
+ return 0;
+}
+
+static const struct attribute_group fme_perf_fabric_events_group = {
+ .name = "events",
+ .attrs = fme_perf_fabric_events_attrs,
+ .is_visible = fme_perf_fabric_events_visible,
+};
+
+/* FME Perf VTD Events */
+#define FME_EVENT_VTD_PORT(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_vtd_port_##_name = { \
+ .attr = FME_EVENT_ATTR(vtd_port_##_name), \
+ .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_VTD), \
+}
+
+FME_EVENT_VTD_PORT(read_transaction, VTD_EVNT_AFU_MEM_RD_TRANS);
+FME_EVENT_VTD_PORT(write_transaction, VTD_EVNT_AFU_MEM_WR_TRANS);
+FME_EVENT_VTD_PORT(devtlb_read_hit, VTD_EVNT_AFU_DEVTLB_RD_HIT);
+FME_EVENT_VTD_PORT(devtlb_write_hit, VTD_EVNT_AFU_DEVTLB_WR_HIT);
+FME_EVENT_VTD_PORT(devtlb_4k_fill, VTD_EVNT_DEVTLB_4K_FILL);
+FME_EVENT_VTD_PORT(devtlb_2m_fill, VTD_EVNT_DEVTLB_2M_FILL);
+FME_EVENT_VTD_PORT(devtlb_1g_fill, VTD_EVNT_DEVTLB_1G_FILL);
+
+static struct attribute *fme_perf_vtd_events_attrs[] = {
+ &fme_perf_event_vtd_port_read_transaction.attr.attr,
+ &fme_perf_event_vtd_port_write_transaction.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_read_hit.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_write_hit.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_4k_fill.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_2m_fill.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_1g_fill.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_vtd_events_group = {
+ .name = "events",
+ .attrs = fme_perf_vtd_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+/* FME Perf VTD SIP Events */
+#define FME_EVENT_VTD_SIP(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_vtd_sip_##_name = { \
+ .attr = FME_EVENT_ATTR(vtd_sip_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_VTD_SIP), \
+}
+
+FME_EVENT_VTD_SIP(iotlb_4k_hit, VTD_SIP_EVNT_IOTLB_4K_HIT);
+FME_EVENT_VTD_SIP(iotlb_2m_hit, VTD_SIP_EVNT_IOTLB_2M_HIT);
+FME_EVENT_VTD_SIP(iotlb_1g_hit, VTD_SIP_EVNT_IOTLB_1G_HIT);
+FME_EVENT_VTD_SIP(slpwc_l3_hit, VTD_SIP_EVNT_SLPWC_L3_HIT);
+FME_EVENT_VTD_SIP(slpwc_l4_hit, VTD_SIP_EVNT_SLPWC_L4_HIT);
+FME_EVENT_VTD_SIP(rcc_hit, VTD_SIP_EVNT_RCC_HIT);
+FME_EVENT_VTD_SIP(iotlb_4k_miss, VTD_SIP_EVNT_IOTLB_4K_MISS);
+FME_EVENT_VTD_SIP(iotlb_2m_miss, VTD_SIP_EVNT_IOTLB_2M_MISS);
+FME_EVENT_VTD_SIP(iotlb_1g_miss, VTD_SIP_EVNT_IOTLB_1G_MISS);
+FME_EVENT_VTD_SIP(slpwc_l3_miss, VTD_SIP_EVNT_SLPWC_L3_MISS);
+FME_EVENT_VTD_SIP(slpwc_l4_miss, VTD_SIP_EVNT_SLPWC_L4_MISS);
+FME_EVENT_VTD_SIP(rcc_miss, VTD_SIP_EVNT_RCC_MISS);
+
+static struct attribute *fme_perf_vtd_sip_events_attrs[] = {
+ &fme_perf_event_vtd_sip_iotlb_4k_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_2m_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_1g_hit.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l3_hit.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l4_hit.attr.attr,
+ &fme_perf_event_vtd_sip_rcc_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_4k_miss.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_2m_miss.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_1g_miss.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l3_miss.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l4_miss.attr.attr,
+ &fme_perf_event_vtd_sip_rcc_miss.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_vtd_sip_events_group = {
+ .name = "events",
+ .attrs = fme_perf_vtd_sip_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+static const struct attribute_group *fme_perf_events_groups[] = {
+ &fme_perf_basic_events_group,
+ &fme_perf_cache_events_group,
+ &fme_perf_fabric_events_group,
+ &fme_perf_vtd_events_group,
+ &fme_perf_vtd_sip_events_group,
+ NULL,
+};
+
+static struct fme_perf_event_ops *get_event_ops(u32 evtype)
+{
+ if (evtype > FME_EVTYPE_MAX)
+ return NULL;
+
+ return &fme_perf_event_ops[evtype];
+}
+
+static void fme_perf_event_destroy(struct perf_event *event)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+
+ if (ops->event_destroy)
+ ops->event_destroy(priv, event->hw.idx, event->hw.config_base);
+}
+
+static int fme_perf_event_init(struct perf_event *event)
+{
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct fme_perf_event_ops *ops;
+ u32 eventid, evtype, portid;
+
+ /* test the event attr type check for PMU enumeration */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * fme counters are shared across all cores.
+ * Therefore, it does not support per-process mode.
+ * Also, it does not support event sampling mode.
+ */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ if (event->cpu != priv->cpu)
+ return -EINVAL;
+
+ eventid = get_event(event->attr.config);
+ portid = get_portid(event->attr.config);
+ evtype = get_evtype(event->attr.config);
+ if (evtype > FME_EVTYPE_MAX)
+ return -EINVAL;
+
+ hwc->event_base = evtype;
+ hwc->idx = (int)eventid;
+ hwc->config_base = portid;
+
+ event->destroy = fme_perf_event_destroy;
+
+ dev_dbg(priv->dev, "%s event=0x%x, evtype=0x%x, portid=0x%x,\n",
+ __func__, eventid, evtype, portid);
+
+ ops = get_event_ops(evtype);
+ if (ops->event_init)
+ return ops->event_init(priv, eventid, portid);
+
+ return 0;
+}
+
+static void fme_perf_event_update(struct perf_event *event)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 now, prev, delta;
+
+ now = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
+ prev = local64_read(&hwc->prev_count);
+ delta = now - prev;
+
+ local64_add(delta, &event->count);
+}
+
+static void fme_perf_event_start(struct perf_event *event, int flags)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count;
+
+ count = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
+ local64_set(&hwc->prev_count, count);
+}
+
+static void fme_perf_event_stop(struct perf_event *event, int flags)
+{
+ fme_perf_event_update(event);
+}
+
+static int fme_perf_event_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ fme_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void fme_perf_event_del(struct perf_event *event, int flags)
+{
+ fme_perf_event_stop(event, PERF_EF_UPDATE);
+}
+
+static void fme_perf_event_read(struct perf_event *event)
+{
+ fme_perf_event_update(event);
+}
+
+static void fme_perf_setup_hardware(struct fme_perf_priv *priv)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ /* read and save current working mode for fabric counters */
+ v = readq(base + FAB_CTRL);
+
+ if (FIELD_GET(FAB_PORT_FILTER, v) == FAB_PORT_FILTER_DISABLE)
+ priv->fab_port_id = FME_PORTID_ROOT;
+ else
+ priv->fab_port_id = FIELD_GET(FAB_PORT_ID, v);
+}
+
+static int fme_perf_pmu_register(struct platform_device *pdev,
+ struct fme_perf_priv *priv)
+{
+ struct pmu *pmu = &priv->pmu;
+ char *name;
+ int ret;
+
+ spin_lock_init(&priv->fab_lock);
+
+ fme_perf_setup_hardware(priv);
+
+ pmu->task_ctx_nr = perf_invalid_context;
+ pmu->attr_groups = fme_perf_groups;
+ pmu->attr_update = fme_perf_events_groups;
+ pmu->event_init = fme_perf_event_init;
+ pmu->add = fme_perf_event_add;
+ pmu->del = fme_perf_event_del;
+ pmu->start = fme_perf_event_start;
+ pmu->stop = fme_perf_event_stop;
+ pmu->read = fme_perf_event_read;
+ pmu->capabilities = PERF_PMU_CAP_NO_INTERRUPT |
+ PERF_PMU_CAP_NO_EXCLUDE;
+
+ name = devm_kasprintf(priv->dev, GFP_KERNEL, "dfl_fme%d", pdev->id);
+
+ ret = perf_pmu_register(pmu, name, -1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void fme_perf_pmu_unregister(struct fme_perf_priv *priv)
+{
+ perf_pmu_unregister(&priv->pmu);
+}
+
+static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct fme_perf_priv *priv;
+ int target;
+
+ priv = hlist_entry_safe(node, struct fme_perf_priv, node);
+
+ if (cpu != priv->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ priv->cpu = target;
+ return 0;
+}
+
+static int fme_perf_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct fme_perf_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ priv->ioaddr = feature->ioaddr;
+ priv->id = feature->id;
+ priv->cpu = raw_smp_processor_id();
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/fpga/dfl_fme:online",
+ NULL, fme_perf_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ priv->cpuhp_state = ret;
+
+ /* Register the pmu instance for cpu hotplug */
+ ret = cpuhp_state_add_instance_nocalls(priv->cpuhp_state, &priv->node);
+ if (ret)
+ goto cpuhp_instance_err;
+
+ ret = fme_perf_pmu_register(pdev, priv);
+ if (ret)
+ goto pmu_register_err;
+
+ feature->priv = priv;
+ return 0;
+
+pmu_register_err:
+ cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
+cpuhp_instance_err:
+ cpuhp_remove_multi_state(priv->cpuhp_state);
+ return ret;
+}
+
+static void fme_perf_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct fme_perf_priv *priv = feature->priv;
+
+ fme_perf_pmu_unregister(priv);
+ cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
+ cpuhp_remove_multi_state(priv->cpuhp_state);
+}
+
+const struct dfl_feature_id fme_perf_id_table[] = {
+ {.id = FME_FEATURE_ID_GLOBAL_IPERF,},
+ {.id = FME_FEATURE_ID_GLOBAL_DPERF,},
+ {0,}
+};
+
+const struct dfl_feature_ops fme_perf_ops = {
+ .init = fme_perf_init,
+ .uinit = fme_perf_uinit,
+};
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index a233a53db708..1194c0e850e0 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -97,10 +97,6 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
return -EINVAL;
}
- if (!access_ok((void __user *)(unsigned long)port_pr.buffer_address,
- port_pr.buffer_size))
- return -EFAULT;
-
/*
* align PR buffer per PR bandwidth, as HW ignores the extra padding
* data automatically.
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
index 6685c8ef965b..4195dd68193e 100644
--- a/drivers/fpga/dfl-fme.h
+++ b/drivers/fpga/dfl-fme.h
@@ -38,5 +38,7 @@ extern const struct dfl_feature_id fme_pr_mgmt_id_table[];
extern const struct dfl_feature_ops fme_global_err_ops;
extern const struct dfl_feature_id fme_global_err_id_table[];
extern const struct attribute_group fme_global_err_group;
+extern const struct dfl_feature_ops fme_perf_ops;
+extern const struct dfl_feature_id fme_perf_id_table[];
#endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 96a2b8274a33..990994874bf1 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -1079,6 +1079,7 @@ static int __init dfl_fpga_init(void)
*/
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
{
+ struct dfl_feature_platform_data *pdata;
struct platform_device *port_pdev;
int ret = -ENODEV;
@@ -1093,7 +1094,11 @@ int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
goto put_dev_exit;
}
- ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev));
+ pdata = dev_get_platdata(&port_pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ ret = dfl_feature_dev_use_begin(pdata, true);
+ mutex_unlock(&pdata->lock);
if (ret)
goto put_dev_exit;
@@ -1120,6 +1125,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
*/
int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
{
+ struct dfl_feature_platform_data *pdata;
struct platform_device *port_pdev;
int ret = -ENODEV;
@@ -1138,7 +1144,12 @@ int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
if (ret)
goto put_dev_exit;
- dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev));
+ pdata = dev_get_platdata(&port_pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ dfl_feature_dev_use_end(pdata);
+ mutex_unlock(&pdata->lock);
+
cdev->released_port_num--;
put_dev_exit:
put_device(&port_pdev->dev);
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 9f0e656de720..2f5d3052e36e 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -197,16 +197,16 @@ struct dfl_feature_driver {
* feature dev (platform device)'s reources.
* @ioaddr: mapped mmio resource address.
* @ops: ops of this sub feature.
+ * @priv: priv data of this feature.
*/
struct dfl_feature {
u64 id;
int resource_index;
void __iomem *ioaddr;
const struct dfl_feature_ops *ops;
+ void *priv;
};
-#define DEV_STATUS_IN_USE 0
-
#define FEATURE_DEV_ID_UNUSED (-1)
/**
@@ -219,8 +219,9 @@ struct dfl_feature {
* @dfl_cdev: ptr to container device.
* @id: id used for this feature device.
* @disable_count: count for port disable.
+ * @excl_open: set on feature device exclusive open.
+ * @open_count: count for feature device open.
* @num: number for sub features.
- * @dev_status: dev status (e.g. DEV_STATUS_IN_USE).
* @private: ptr to feature dev private data.
* @features: sub features of this feature dev.
*/
@@ -232,26 +233,46 @@ struct dfl_feature_platform_data {
struct dfl_fpga_cdev *dfl_cdev;
int id;
unsigned int disable_count;
- unsigned long dev_status;
+ bool excl_open;
+ int open_count;
void *private;
int num;
- struct dfl_feature features[0];
+ struct dfl_feature features[];
};
static inline
-int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata)
+int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata,
+ bool excl)
{
- /* Test and set IN_USE flags to ensure file is exclusively used */
- if (test_and_set_bit_lock(DEV_STATUS_IN_USE, &pdata->dev_status))
+ if (pdata->excl_open)
return -EBUSY;
+ if (excl) {
+ if (pdata->open_count)
+ return -EBUSY;
+
+ pdata->excl_open = true;
+ }
+ pdata->open_count++;
+
return 0;
}
static inline
void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
{
- clear_bit_unlock(DEV_STATUS_IN_USE, &pdata->dev_status);
+ pdata->excl_open = false;
+
+ if (WARN_ON(pdata->open_count <= 0))
+ return;
+
+ pdata->open_count--;
+}
+
+static inline
+int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata)
+{
+ return pdata->open_count;
}
static inline
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 56e112e14a10..8d689fea0dab 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -46,10 +46,16 @@ static int ice40_fpga_ops_write_init(struct fpga_manager *mgr,
struct spi_message message;
struct spi_transfer assert_cs_then_reset_delay = {
.cs_change = 1,
- .delay_usecs = ICE40_SPI_RESET_DELAY
+ .delay = {
+ .value = ICE40_SPI_RESET_DELAY,
+ .unit = SPI_DELAY_UNIT_USECS
+ }
};
struct spi_transfer housekeeping_delay_then_release_cs = {
- .delay_usecs = ICE40_SPI_HOUSEKEEPING_DELAY
+ .delay = {
+ .value = ICE40_SPI_HOUSEKEEPING_DELAY,
+ .unit = SPI_DELAY_UNIT_USECS
+ }
};
int ret;
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index 4d8a87641587..b316369156fe 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -157,7 +157,8 @@ static int machxo2_cleanup(struct fpga_manager *mgr)
spi_message_init(&msg);
tx[1].tx_buf = &refresh;
tx[1].len = sizeof(refresh);
- tx[1].delay_usecs = MACHXO2_REFRESH_USEC;
+ tx[1].delay.value = MACHXO2_REFRESH_USEC;
+ tx[1].delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&tx[1], &msg);
ret = spi_sync(spi, &msg);
if (ret)
@@ -208,7 +209,8 @@ static int machxo2_write_init(struct fpga_manager *mgr,
spi_message_init(&msg);
tx[0].tx_buf = &enable;
tx[0].len = sizeof(enable);
- tx[0].delay_usecs = MACHXO2_LOW_DELAY_USEC;
+ tx[0].delay.value = MACHXO2_LOW_DELAY_USEC;
+ tx[0].delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&tx[0], &msg);
tx[1].tx_buf = &erase;
@@ -269,7 +271,8 @@ static int machxo2_write(struct fpga_manager *mgr, const char *buf,
spi_message_init(&msg);
tx.tx_buf = payload;
tx.len = MACHXO2_BUF_SIZE;
- tx.delay_usecs = MACHXO2_HIGH_DELAY_USEC;
+ tx.delay.value = MACHXO2_HIGH_DELAY_USEC;
+ tx.delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&tx, &msg);
ret = spi_sync(spi, &msg);
if (ret) {
@@ -317,7 +320,8 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
spi_message_init(&msg);
tx[1].tx_buf = &refresh;
tx[1].len = sizeof(refresh);
- tx[1].delay_usecs = MACHXO2_REFRESH_USEC;
+ tx[1].delay.value = MACHXO2_REFRESH_USEC;
+ tx[1].delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&tx[1], &msg);
ret = spi_sync(spi, &msg);
if (ret)
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 215d33789c74..44b7c569d4dc 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -154,11 +154,11 @@ static void s10_receive_callback(struct stratix10_svc_client *client,
* Here we set status bits as we receive them. Elsewhere, we always use
* test_and_clear_bit() to check status in priv->status
*/
- for (i = 0; i <= SVC_STATUS_RECONFIG_ERROR; i++)
+ for (i = 0; i <= SVC_STATUS_ERROR; i++)
if (status & (1 << i))
set_bit(i, &priv->status);
- if (status & BIT(SVC_STATUS_RECONFIG_BUFFER_DONE)) {
+ if (status & BIT(SVC_STATUS_BUFFER_DONE)) {
s10_unlock_bufs(priv, data->kaddr1);
s10_unlock_bufs(priv, data->kaddr2);
s10_unlock_bufs(priv, data->kaddr3);
@@ -209,8 +209,7 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
}
ret = 0;
- if (!test_and_clear_bit(SVC_STATUS_RECONFIG_REQUEST_OK,
- &priv->status)) {
+ if (!test_and_clear_bit(SVC_STATUS_OK, &priv->status)) {
ret = -ETIMEDOUT;
goto init_done;
}
@@ -323,17 +322,15 @@ static int s10_ops_write(struct fpga_manager *mgr, const char *buf,
&priv->status_return_completion,
S10_BUFFER_TIMEOUT);
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_DONE,
- &priv->status) ||
- test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED,
+ if (test_and_clear_bit(SVC_STATUS_BUFFER_DONE, &priv->status) ||
+ test_and_clear_bit(SVC_STATUS_BUFFER_SUBMITTED,
&priv->status)) {
ret = 0;
continue;
}
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR,
- &priv->status)) {
- dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n");
+ if (test_and_clear_bit(SVC_STATUS_ERROR, &priv->status)) {
+ dev_err(dev, "ERROR - giving up - SVC_STATUS_ERROR\n");
ret = -EFAULT;
break;
}
@@ -393,13 +390,11 @@ static int s10_ops_write_complete(struct fpga_manager *mgr,
timeout = ret;
ret = 0;
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_COMPLETED,
- &priv->status))
+ if (test_and_clear_bit(SVC_STATUS_COMPLETED, &priv->status))
break;
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR,
- &priv->status)) {
- dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n");
+ if (test_and_clear_bit(SVC_STATUS_ERROR, &priv->status)) {
+ dev_err(dev, "ERROR - giving up - SVC_STATUS_ERROR\n");
ret = -EFAULT;
break;
}
@@ -482,7 +477,8 @@ static int s10_remove(struct platform_device *pdev)
}
static const struct of_device_id s10_of_match[] = {
- { .compatible = "intel,stratix10-soc-fpga-mgr", },
+ {.compatible = "intel,stratix10-soc-fpga-mgr"},
+ {.compatible = "intel,agilex-soc-fpga-mgr"},
{},
};
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index b8a88d21d038..4a1139e05280 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -40,16 +40,12 @@ static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
const char *buf, size_t size)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
struct zynqmp_fpga_priv *priv;
dma_addr_t dma_addr;
u32 eemi_flags = 0;
char *kbuf;
int ret;
- if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_load)
- return -ENXIO;
-
priv = mgr->priv;
kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
@@ -63,7 +59,7 @@ static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG)
eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL;
- ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags);
+ ret = zynqmp_pm_fpga_load(dma_addr, size, eemi_flags);
dma_free_coherent(priv->dev, size, kbuf, dma_addr);
@@ -78,13 +74,9 @@ static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr,
static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- u32 status;
-
- if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_get_status)
- return FPGA_MGR_STATE_UNKNOWN;
+ u32 status = 0;
- eemi_ops->fpga_get_status(&status);
+ zynqmp_pm_fpga_get_status(&status);
if (status & IXR_FPGA_DONE_MASK)
return FPGA_MGR_STATE_OPERATING;
diff --git a/drivers/gnss/serial.h b/drivers/gnss/serial.h
index 980ffdc86c2a..621953f7821d 100644
--- a/drivers/gnss/serial.h
+++ b/drivers/gnss/serial.h
@@ -16,7 +16,7 @@ struct gnss_serial {
struct gnss_device *gdev;
speed_t speed;
const struct gnss_serial_ops *ops;
- unsigned long drvdata[0];
+ unsigned long drvdata[];
};
enum gnss_serial_pm_state {
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index effed3a8d398..2ecb1d3e8eeb 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -439,14 +439,18 @@ static int sirf_probe(struct serdev_device *serdev)
data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
GPIOD_OUT_LOW);
- if (IS_ERR(data->on_off))
+ if (IS_ERR(data->on_off)) {
+ ret = PTR_ERR(data->on_off);
goto err_put_device;
+ }
if (data->on_off) {
data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup",
GPIOD_IN);
- if (IS_ERR(data->wakeup))
+ if (IS_ERR(data->wakeup)) {
+ ret = PTR_ERR(data->wakeup);
goto err_put_device;
+ }
ret = regulator_enable(data->vcc);
if (ret)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1b96169d84f7..bcacd9c74aa8 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -73,6 +73,10 @@ config GPIO_GENERIC
depends on HAS_IOMEM # Only for IOMEM drivers
tristate
+config GPIO_REGMAP
+ depends on REGMAP
+ tristate
+
# put drivers in the right section, in alphabetical order
# This symbol is selected by both I2C and SPI expanders
@@ -422,7 +426,7 @@ config GPIO_OMAP
Say yes here to enable GPIO support for TI OMAP SoCs.
config GPIO_PL061
- bool "PrimeCell PL061 GPIO support"
+ tristate "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
select IRQ_DOMAIN
select GPIOLIB_IRQCHIP
@@ -439,7 +443,7 @@ config GPIO_PMIC_EIC_SPRD
config GPIO_PXA
bool "PXA GPIO support"
- depends on ARCH_PXA || ARCH_MMP
+ depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
help
Say yes here to support the PXA GPIO device
@@ -638,7 +642,7 @@ config GPIO_XGENE
config GPIO_XGENE_SB
tristate "APM X-Gene GPIO standby controller support"
- depends on ARCH_XGENE && OF_GPIO
+ depends on (ARCH_XGENE || COMPILE_TEST)
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
@@ -952,7 +956,7 @@ config GPIO_PCA953X
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
- depends on GPIO_PCA953X=y
+ depends on GPIO_PCA953X
select GPIOLIB_IRQCHIP
help
Say yes here to enable the pca953x to be used as an interrupt
@@ -1541,6 +1545,18 @@ config GPIO_VIPERBOARD
endmenu
+config GPIO_AGGREGATOR
+ tristate "GPIO Aggregator"
+ help
+ Say yes here to enable the GPIO Aggregator, which provides a way to
+ aggregate existing GPIO lines into a new virtual GPIO chip.
+ This can serve the following purposes:
+ - Assign permissions for a collection of GPIO lines to a user,
+ - Export a collection of GPIO lines to a virtual machine,
+ - Provide a generic driver for a GPIO-operated device in an
+ industrial control context, to be operated from userspace using
+ the GPIO chardev interface.
+
config GPIO_MOCKUP
tristate "GPIO Testing Driver"
select IRQ_SIM
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index b2cfc21a97f3..1e4894e0bf0f 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
# Device drivers. Generally keep list sorted alphabetically
+obj-$(CONFIG_GPIO_REGMAP) += gpio-regmap.o
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
# directly supported by gpio-generic
@@ -25,6 +26,7 @@ obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o
obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
+obj-$(CONFIG_GPIO_AGGREGATOR) += gpio-aggregator.o
obj-$(CONFIG_GPIO_ALTERA_A10SR) += gpio-altera-a10sr.o
obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 3a44e6ae52bd..b989c9352da2 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -99,6 +99,10 @@ similar and probe a proper driver in the gpiolib subsystem.
In some cases it makes sense to create a GPIO chip from the local driver
for a few GPIOs. Those should stay where they are.
+At the same time it makes sense to get rid of code duplication in existing or
+new coming drivers. For example, gpio-ml-ioh should be incorporated into
+gpio-pch. In similar way gpio-intel-mid into gpio-pxa.
+
Generic MMIO GPIO
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
new file mode 100644
index 000000000000..9b0adbdddbfc
--- /dev/null
+++ b/drivers/gpio/gpio-aggregator.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// GPIO Aggregator
+//
+// Copyright (C) 2019-2020 Glider bv
+
+#define DRV_NAME "gpio-aggregator"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+
+/*
+ * GPIO Aggregator sysfs interface
+ */
+
+struct gpio_aggregator {
+ struct gpiod_lookup_table *lookups;
+ struct platform_device *pdev;
+ char args[];
+};
+
+static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */
+static DEFINE_IDR(gpio_aggregator_idr);
+
+static char *get_arg(char **args)
+{
+ char *start = *args, *end;
+
+ start = skip_spaces(start);
+ if (!*start)
+ return NULL;
+
+ if (*start == '"') {
+ /* Quoted arg */
+ end = strchr(++start, '"');
+ if (!end)
+ return ERR_PTR(-EINVAL);
+ } else {
+ /* Unquoted arg */
+ for (end = start; *end && !isspace(*end); end++) ;
+ }
+
+ if (*end)
+ *end++ = '\0';
+
+ *args = end;
+ return start;
+}
+
+static bool isrange(const char *s)
+{
+ size_t n;
+
+ if (IS_ERR_OR_NULL(s))
+ return false;
+
+ while (1) {
+ n = strspn(s, "0123456789");
+ if (!n)
+ return false;
+
+ s += n;
+
+ switch (*s++) {
+ case '\0':
+ return true;
+
+ case '-':
+ case ',':
+ break;
+
+ default:
+ return false;
+ }
+ }
+}
+
+static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
+ int hwnum, unsigned int *n)
+{
+ struct gpiod_lookup_table *lookups;
+
+ lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
+ GFP_KERNEL);
+ if (!lookups)
+ return -ENOMEM;
+
+ lookups->table[*n] =
+ (struct gpiod_lookup)GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
+
+ (*n)++;
+ memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
+
+ aggr->lookups = lookups;
+ return 0;
+}
+
+static int aggr_parse(struct gpio_aggregator *aggr)
+{
+ unsigned int first_index, last_index, i, n = 0;
+ char *name, *offsets, *first, *last, *next;
+ char *args = aggr->args;
+ int error;
+
+ for (name = get_arg(&args), offsets = get_arg(&args); name;
+ offsets = get_arg(&args)) {
+ if (IS_ERR(name)) {
+ pr_err("Cannot get GPIO specifier: %pe\n", name);
+ return PTR_ERR(name);
+ }
+
+ if (!isrange(offsets)) {
+ /* Named GPIO line */
+ error = aggr_add_gpio(aggr, name, U16_MAX, &n);
+ if (error)
+ return error;
+
+ name = offsets;
+ continue;
+ }
+
+ /* GPIO chip + offset(s) */
+ for (first = offsets; *first; first = next) {
+ next = strchrnul(first, ',');
+ if (*next)
+ *next++ = '\0';
+
+ last = strchr(first, '-');
+ if (last)
+ *last++ = '\0';
+
+ if (kstrtouint(first, 10, &first_index)) {
+ pr_err("Cannot parse GPIO index %s\n", first);
+ return -EINVAL;
+ }
+
+ if (!last) {
+ last_index = first_index;
+ } else if (kstrtouint(last, 10, &last_index)) {
+ pr_err("Cannot parse GPIO index %s\n", last);
+ return -EINVAL;
+ }
+
+ for (i = first_index; i <= last_index; i++) {
+ error = aggr_add_gpio(aggr, name, i, &n);
+ if (error)
+ return error;
+ }
+ }
+
+ name = get_arg(&args);
+ }
+
+ if (!n) {
+ pr_err("No GPIOs specified\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t new_device_store(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct gpio_aggregator *aggr;
+ struct platform_device *pdev;
+ int res, id;
+
+ /* kernfs guarantees string termination, so count + 1 is safe */
+ aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
+ if (!aggr)
+ return -ENOMEM;
+
+ memcpy(aggr->args, buf, count + 1);
+
+ aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
+ GFP_KERNEL);
+ if (!aggr->lookups) {
+ res = -ENOMEM;
+ goto free_ga;
+ }
+
+ mutex_lock(&gpio_aggregator_lock);
+ id = idr_alloc(&gpio_aggregator_idr, aggr, 0, 0, GFP_KERNEL);
+ mutex_unlock(&gpio_aggregator_lock);
+
+ if (id < 0) {
+ res = id;
+ goto free_table;
+ }
+
+ aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, id);
+ if (!aggr->lookups->dev_id) {
+ res = -ENOMEM;
+ goto remove_idr;
+ }
+
+ res = aggr_parse(aggr);
+ if (res)
+ goto free_dev_id;
+
+ gpiod_add_lookup_table(aggr->lookups);
+
+ pdev = platform_device_register_simple(DRV_NAME, id, NULL, 0);
+ if (IS_ERR(pdev)) {
+ res = PTR_ERR(pdev);
+ goto remove_table;
+ }
+
+ aggr->pdev = pdev;
+ return count;
+
+remove_table:
+ gpiod_remove_lookup_table(aggr->lookups);
+free_dev_id:
+ kfree(aggr->lookups->dev_id);
+remove_idr:
+ mutex_lock(&gpio_aggregator_lock);
+ idr_remove(&gpio_aggregator_idr, id);
+ mutex_unlock(&gpio_aggregator_lock);
+free_table:
+ kfree(aggr->lookups);
+free_ga:
+ kfree(aggr);
+ return res;
+}
+
+static DRIVER_ATTR_WO(new_device);
+
+static void gpio_aggregator_free(struct gpio_aggregator *aggr)
+{
+ platform_device_unregister(aggr->pdev);
+ gpiod_remove_lookup_table(aggr->lookups);
+ kfree(aggr->lookups->dev_id);
+ kfree(aggr->lookups);
+ kfree(aggr);
+}
+
+static ssize_t delete_device_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ struct gpio_aggregator *aggr;
+ unsigned int id;
+ int error;
+
+ if (!str_has_prefix(buf, DRV_NAME "."))
+ return -EINVAL;
+
+ error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
+ if (error)
+ return error;
+
+ mutex_lock(&gpio_aggregator_lock);
+ aggr = idr_remove(&gpio_aggregator_idr, id);
+ mutex_unlock(&gpio_aggregator_lock);
+ if (!aggr)
+ return -ENOENT;
+
+ gpio_aggregator_free(aggr);
+ return count;
+}
+static DRIVER_ATTR_WO(delete_device);
+
+static struct attribute *gpio_aggregator_attrs[] = {
+ &driver_attr_new_device.attr,
+ &driver_attr_delete_device.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(gpio_aggregator);
+
+static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
+{
+ gpio_aggregator_free(p);
+ return 0;
+}
+
+static void __exit gpio_aggregator_remove_all(void)
+{
+ mutex_lock(&gpio_aggregator_lock);
+ idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
+ idr_destroy(&gpio_aggregator_idr);
+ mutex_unlock(&gpio_aggregator_lock);
+}
+
+
+/*
+ * GPIO Forwarder
+ */
+
+struct gpiochip_fwd {
+ struct gpio_chip chip;
+ struct gpio_desc **descs;
+ union {
+ struct mutex mlock; /* protects tmp[] if can_sleep */
+ spinlock_t slock; /* protects tmp[] if !can_sleep */
+ };
+ unsigned long tmp[]; /* values and descs for multiple ops */
+};
+
+static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_get_direction(fwd->descs[offset]);
+}
+
+static int gpio_fwd_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_direction_input(fwd->descs[offset]);
+}
+
+static int gpio_fwd_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_direction_output(fwd->descs[offset], value);
+}
+
+static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_get_value(fwd->descs[offset]);
+}
+
+static int gpio_fwd_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ unsigned long *values, flags = 0;
+ struct gpio_desc **descs;
+ unsigned int i, j = 0;
+ int error;
+
+ if (chip->can_sleep)
+ mutex_lock(&fwd->mlock);
+ else
+ spin_lock_irqsave(&fwd->slock, flags);
+
+ /* Both values bitmap and desc pointers are stored in tmp[] */
+ values = &fwd->tmp[0];
+ descs = (void *)&fwd->tmp[BITS_TO_LONGS(fwd->chip.ngpio)];
+
+ bitmap_clear(values, 0, fwd->chip.ngpio);
+ for_each_set_bit(i, mask, fwd->chip.ngpio)
+ descs[j++] = fwd->descs[i];
+
+ error = gpiod_get_array_value(j, descs, NULL, values);
+ if (!error) {
+ j = 0;
+ for_each_set_bit(i, mask, fwd->chip.ngpio)
+ __assign_bit(i, bits, test_bit(j++, values));
+ }
+
+ if (chip->can_sleep)
+ mutex_unlock(&fwd->mlock);
+ else
+ spin_unlock_irqrestore(&fwd->slock, flags);
+
+ return error;
+}
+
+static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ gpiod_set_value(fwd->descs[offset], value);
+}
+
+static void gpio_fwd_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ unsigned long *values, flags = 0;
+ struct gpio_desc **descs;
+ unsigned int i, j = 0;
+
+ if (chip->can_sleep)
+ mutex_lock(&fwd->mlock);
+ else
+ spin_lock_irqsave(&fwd->slock, flags);
+
+ /* Both values bitmap and desc pointers are stored in tmp[] */
+ values = &fwd->tmp[0];
+ descs = (void *)&fwd->tmp[BITS_TO_LONGS(fwd->chip.ngpio)];
+
+ for_each_set_bit(i, mask, fwd->chip.ngpio) {
+ __assign_bit(j, values, test_bit(i, bits));
+ descs[j++] = fwd->descs[i];
+ }
+
+ gpiod_set_array_value(j, descs, NULL, values);
+
+ if (chip->can_sleep)
+ mutex_unlock(&fwd->mlock);
+ else
+ spin_unlock_irqrestore(&fwd->slock, flags);
+}
+
+static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_set_config(fwd->descs[offset], config);
+}
+
+/**
+ * gpiochip_fwd_create() - Create a new GPIO forwarder
+ * @dev: Parent device pointer
+ * @ngpios: Number of GPIOs in the forwarder.
+ * @descs: Array containing the GPIO descriptors to forward to.
+ * This array must contain @ngpios entries, and must not be deallocated
+ * before the forwarder has been destroyed again.
+ *
+ * This function creates a new gpiochip, which forwards all GPIO operations to
+ * the passed GPIO descriptors.
+ *
+ * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
+ * code on failure.
+ */
+static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
+ unsigned int ngpios,
+ struct gpio_desc *descs[])
+{
+ const char *label = dev_name(dev);
+ struct gpiochip_fwd *fwd;
+ struct gpio_chip *chip;
+ unsigned int i;
+ int error;
+
+ fwd = devm_kzalloc(dev, struct_size(fwd, tmp,
+ BITS_TO_LONGS(ngpios) + ngpios), GFP_KERNEL);
+ if (!fwd)
+ return ERR_PTR(-ENOMEM);
+
+ chip = &fwd->chip;
+
+ /*
+ * If any of the GPIO lines are sleeping, then the entire forwarder
+ * will be sleeping.
+ * If any of the chips support .set_config(), then the forwarder will
+ * support setting configs.
+ */
+ for (i = 0; i < ngpios; i++) {
+ struct gpio_chip *parent = gpiod_to_chip(descs[i]);
+
+ dev_dbg(dev, "%u => gpio-%d\n", i, desc_to_gpio(descs[i]));
+
+ if (gpiod_cansleep(descs[i]))
+ chip->can_sleep = true;
+ if (parent && parent->set_config)
+ chip->set_config = gpio_fwd_set_config;
+ }
+
+ chip->label = label;
+ chip->parent = dev;
+ chip->owner = THIS_MODULE;
+ chip->get_direction = gpio_fwd_get_direction;
+ chip->direction_input = gpio_fwd_direction_input;
+ chip->direction_output = gpio_fwd_direction_output;
+ chip->get = gpio_fwd_get;
+ chip->get_multiple = gpio_fwd_get_multiple;
+ chip->set = gpio_fwd_set;
+ chip->set_multiple = gpio_fwd_set_multiple;
+ chip->base = -1;
+ chip->ngpio = ngpios;
+ fwd->descs = descs;
+
+ if (chip->can_sleep)
+ mutex_init(&fwd->mlock);
+ else
+ spin_lock_init(&fwd->slock);
+
+ error = devm_gpiochip_add_data(dev, chip, fwd);
+ if (error)
+ return ERR_PTR(error);
+
+ return fwd;
+}
+
+
+/*
+ * GPIO Aggregator platform device
+ */
+
+static int gpio_aggregator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gpio_desc **descs;
+ struct gpiochip_fwd *fwd;
+ int i, n;
+
+ n = gpiod_count(dev, NULL);
+ if (n < 0)
+ return n;
+
+ descs = devm_kmalloc_array(dev, n, sizeof(*descs), GFP_KERNEL);
+ if (!descs)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++) {
+ descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
+ if (IS_ERR(descs[i]))
+ return PTR_ERR(descs[i]);
+ }
+
+ fwd = gpiochip_fwd_create(dev, n, descs);
+ if (IS_ERR(fwd))
+ return PTR_ERR(fwd);
+
+ platform_set_drvdata(pdev, fwd);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_aggregator_dt_ids[] = {
+ /*
+ * Add GPIO-operated devices controlled from userspace below,
+ * or use "driver_override" in sysfs
+ */
+ {},
+};
+MODULE_DEVICE_TABLE(of, gpio_aggregator_dt_ids);
+#endif
+
+static struct platform_driver gpio_aggregator_driver = {
+ .probe = gpio_aggregator_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .groups = gpio_aggregator_groups,
+ .of_match_table = of_match_ptr(gpio_aggregator_dt_ids),
+ },
+};
+
+static int __init gpio_aggregator_init(void)
+{
+ return platform_driver_register(&gpio_aggregator_driver);
+}
+module_init(gpio_aggregator_init);
+
+static void __exit gpio_aggregator_exit(void)
+{
+ gpio_aggregator_remove_all();
+ platform_driver_unregister(&gpio_aggregator_driver);
+}
+module_exit(gpio_aggregator_exit);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("GPIO Aggregator");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 92e127e74813..1d8d55bd63aa 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -49,7 +49,9 @@
#define GPIO_EXT_PORTC 0x58
#define GPIO_EXT_PORTD 0x5c
+#define DWAPB_DRIVER_NAME "gpio-dwapb"
#define DWAPB_MAX_PORTS 4
+
#define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */
#define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */
#define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */
@@ -62,6 +64,8 @@
#define GPIO_INTSTATUS_V2 0x3c
#define GPIO_PORTA_EOI_V2 0x40
+#define DWAPB_NR_CLOCKS 2
+
struct dwapb_gpio;
#ifdef CONFIG_PM_SLEEP
@@ -97,7 +101,7 @@ struct dwapb_gpio {
struct irq_domain *domain;
unsigned int flags;
struct reset_control *rst;
- struct clk *clk;
+ struct clk_bulk_data clks[DWAPB_NR_CLOCKS];
};
static inline u32 gpio_reg_v2_convert(unsigned int offset)
@@ -189,22 +193,21 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
static u32 dwapb_do_irq(struct dwapb_gpio *gpio)
{
- u32 irq_status = dwapb_read(gpio, GPIO_INTSTATUS);
- u32 ret = irq_status;
+ unsigned long irq_status;
+ irq_hw_number_t hwirq;
- while (irq_status) {
- int hwirq = fls(irq_status) - 1;
+ irq_status = dwapb_read(gpio, GPIO_INTSTATUS);
+ for_each_set_bit(hwirq, &irq_status, 32) {
int gpio_irq = irq_find_mapping(gpio->domain, hwirq);
+ u32 irq_type = irq_get_trigger_type(gpio_irq);
generic_handle_irq(gpio_irq);
- irq_status &= ~BIT(hwirq);
- if ((irq_get_trigger_type(gpio_irq) & IRQ_TYPE_SENSE_MASK)
- == IRQ_TYPE_EDGE_BOTH)
+ if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
dwapb_toggle_trigger(gpio, hwirq);
}
- return ret;
+ return irq_status;
}
static void dwapb_irq_handler(struct irq_desc *desc)
@@ -212,10 +215,9 @@ static void dwapb_irq_handler(struct irq_desc *desc)
struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ chained_irq_enter(chip, desc);
dwapb_do_irq(gpio);
-
- if (chip->irq_eoi)
- chip->irq_eoi(irq_desc_get_irq_data(desc));
+ chained_irq_exit(chip, desc);
}
static void dwapb_irq_enable(struct irq_data *d)
@@ -228,7 +230,7 @@ static void dwapb_irq_enable(struct irq_data *d)
spin_lock_irqsave(&gc->bgpio_lock, flags);
val = dwapb_read(gpio, GPIO_INTEN);
- val |= BIT(d->hwirq);
+ val |= BIT(irqd_to_hwirq(d));
dwapb_write(gpio, GPIO_INTEN, val);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
@@ -243,46 +245,20 @@ static void dwapb_irq_disable(struct irq_data *d)
spin_lock_irqsave(&gc->bgpio_lock, flags);
val = dwapb_read(gpio, GPIO_INTEN);
- val &= ~BIT(d->hwirq);
+ val &= ~BIT(irqd_to_hwirq(d));
dwapb_write(gpio, GPIO_INTEN, val);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
-static int dwapb_irq_reqres(struct irq_data *d)
-{
- struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
- struct dwapb_gpio *gpio = igc->private;
- struct gpio_chip *gc = &gpio->ports[0].gc;
- int ret;
-
- ret = gpiochip_lock_as_irq(gc, irqd_to_hwirq(d));
- if (ret) {
- dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n",
- irqd_to_hwirq(d));
- return ret;
- }
- return 0;
-}
-
-static void dwapb_irq_relres(struct irq_data *d)
-{
- struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
- struct dwapb_gpio *gpio = igc->private;
- struct gpio_chip *gc = &gpio->ports[0].gc;
-
- gpiochip_unlock_as_irq(gc, irqd_to_hwirq(d));
-}
-
static int dwapb_irq_set_type(struct irq_data *d, u32 type)
{
struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = igc->private;
struct gpio_chip *gc = &gpio->ports[0].gc;
- int bit = d->hwirq;
+ irq_hw_number_t bit = irqd_to_hwirq(d);
unsigned long level, polarity, flags;
- if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
- IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ if (type & ~IRQ_TYPE_SENSE_MASK)
return -EINVAL;
spin_lock_irqsave(&gc->bgpio_lock, flags);
@@ -328,11 +304,12 @@ static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable)
struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = igc->private;
struct dwapb_context *ctx = gpio->ports[0].ctx;
+ irq_hw_number_t bit = irqd_to_hwirq(d);
if (enable)
- ctx->wake_en |= BIT(d->hwirq);
+ ctx->wake_en |= BIT(bit);
else
- ctx->wake_en &= ~BIT(d->hwirq);
+ ctx->wake_en &= ~BIT(bit);
return 0;
}
@@ -350,9 +327,10 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
val_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
if (debounce)
- dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb | mask);
+ val_deb |= mask;
else
- dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb & ~mask);
+ val_deb &= ~mask;
+ dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -373,12 +351,7 @@ static int dwapb_gpio_set_config(struct gpio_chip *gc, unsigned offset,
static irqreturn_t dwapb_irq_handler_mfd(int irq, void *dev_id)
{
- u32 worked;
- struct dwapb_gpio *gpio = dev_id;
-
- worked = dwapb_do_irq(gpio);
-
- return worked ? IRQ_HANDLED : IRQ_NONE;
+ return IRQ_RETVAL(dwapb_do_irq(dev_id));
}
static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
@@ -388,17 +361,23 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
struct gpio_chip *gc = &port->gc;
struct fwnode_handle *fwnode = pp->fwnode;
struct irq_chip_generic *irq_gc = NULL;
- unsigned int hwirq, ngpio = gc->ngpio;
+ unsigned int ngpio = gc->ngpio;
struct irq_chip_type *ct;
+ irq_hw_number_t hwirq;
int err, i;
+ if (memchr_inv(pp->irq, 0, sizeof(pp->irq)) == NULL) {
+ dev_warn(gpio->dev, "no IRQ for port%d\n", pp->idx);
+ return;
+ }
+
gpio->domain = irq_domain_create_linear(fwnode, ngpio,
&irq_generic_chip_ops, gpio);
if (!gpio->domain)
return;
err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2,
- "gpio-dwapb", handle_level_irq,
+ DWAPB_DRIVER_NAME, handle_bad_irq,
IRQ_NOREQUEST, 0,
IRQ_GC_INIT_NESTED_LOCK);
if (err) {
@@ -426,8 +405,6 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
ct->chip.irq_set_type = dwapb_irq_set_type;
ct->chip.irq_enable = dwapb_irq_enable;
ct->chip.irq_disable = dwapb_irq_disable;
- ct->chip.irq_request_resources = dwapb_irq_reqres;
- ct->chip.irq_release_resources = dwapb_irq_relres;
#ifdef CONFIG_PM_SLEEP
ct->chip.irq_set_wake = dwapb_irq_set_wake;
#endif
@@ -437,6 +414,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
}
irq_gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
+ irq_gc->chip_types[0].handler = handle_level_irq;
irq_gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
irq_gc->chip_types[1].handler = handle_edge_irq;
@@ -444,7 +422,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
int i;
for (i = 0; i < pp->ngpio; i++) {
- if (pp->irq[i] >= 0)
+ if (pp->irq[i])
irq_set_chained_handler_and_data(pp->irq[i],
dwapb_irq_handler, gpio);
}
@@ -455,7 +433,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
*/
err = devm_request_irq(gpio->dev, pp->irq[0],
dwapb_irq_handler_mfd,
- IRQF_SHARED, "gpio-dwapb-mfd", gpio);
+ IRQF_SHARED, DWAPB_DRIVER_NAME, gpio);
if (err) {
dev_err(gpio->dev, "error requesting IRQ\n");
irq_domain_remove(gpio->domain);
@@ -464,7 +442,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
}
}
- for (hwirq = 0 ; hwirq < ngpio ; hwirq++)
+ for (hwirq = 0; hwirq < ngpio; hwirq++)
irq_create_mapping(gpio->domain, hwirq);
port->gc.to_irq = dwapb_gpio_to_irq;
@@ -480,7 +458,7 @@ static void dwapb_irq_teardown(struct dwapb_gpio *gpio)
if (!gpio->domain)
return;
- for (hwirq = 0 ; hwirq < ngpio ; hwirq++)
+ for (hwirq = 0; hwirq < ngpio; hwirq++)
irq_dispose_mapping(irq_find_mapping(gpio->domain, hwirq));
irq_domain_remove(gpio->domain);
@@ -505,10 +483,9 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
return -ENOMEM;
#endif
- dat = gpio->regs + GPIO_EXT_PORTA + (pp->idx * GPIO_EXT_PORT_STRIDE);
- set = gpio->regs + GPIO_SWPORTA_DR + (pp->idx * GPIO_SWPORT_DR_STRIDE);
- dirout = gpio->regs + GPIO_SWPORTA_DDR +
- (pp->idx * GPIO_SWPORT_DDR_STRIDE);
+ dat = gpio->regs + GPIO_EXT_PORTA + pp->idx * GPIO_EXT_PORT_STRIDE;
+ set = gpio->regs + GPIO_SWPORTA_DR + pp->idx * GPIO_SWPORT_DR_STRIDE;
+ dirout = gpio->regs + GPIO_SWPORTA_DDR + pp->idx * GPIO_SWPORT_DDR_STRIDE;
/* This registers 32 GPIO lines per port */
err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout,
@@ -529,40 +506,66 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
if (pp->idx == 0)
port->gc.set_config = dwapb_gpio_set_config;
- if (pp->has_irq)
+ /* Only port A can provide interrupts in all configurations of the IP */
+ if (pp->idx == 0)
dwapb_configure_irqs(gpio, port, pp);
err = gpiochip_add_data(&port->gc, port);
- if (err)
+ if (err) {
dev_err(gpio->dev, "failed to register gpiochip for port%d\n",
port->idx);
- else
- port->is_registered = true;
+ return err;
+ }
/* Add GPIO-signaled ACPI event support */
- if (pp->has_irq)
- acpi_gpiochip_request_interrupts(&port->gc);
+ acpi_gpiochip_request_interrupts(&port->gc);
- return err;
+ port->is_registered = true;
+
+ return 0;
}
static void dwapb_gpio_unregister(struct dwapb_gpio *gpio)
{
unsigned int m;
- for (m = 0; m < gpio->nr_ports; ++m)
- if (gpio->ports[m].is_registered)
- gpiochip_remove(&gpio->ports[m].gc);
+ for (m = 0; m < gpio->nr_ports; ++m) {
+ struct dwapb_gpio_port *port = &gpio->ports[m];
+
+ if (!port->is_registered)
+ continue;
+
+ acpi_gpiochip_free_interrupts(&port->gc);
+ gpiochip_remove(&port->gc);
+ }
+}
+
+static void dwapb_get_irq(struct device *dev, struct fwnode_handle *fwnode,
+ struct dwapb_port_property *pp)
+{
+ struct device_node *np = NULL;
+ int irq = -ENXIO, j;
+
+ if (fwnode_property_read_bool(fwnode, "interrupt-controller"))
+ np = to_of_node(fwnode);
+
+ for (j = 0; j < pp->ngpio; j++) {
+ if (np)
+ irq = of_irq_get(np, j);
+ else if (has_acpi_companion(dev))
+ irq = platform_get_irq_optional(to_platform_device(dev), j);
+ if (irq > 0)
+ pp->irq[j] = irq;
+ }
}
-static struct dwapb_platform_data *
-dwapb_gpio_get_pdata(struct device *dev)
+static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev)
{
struct fwnode_handle *fwnode;
struct dwapb_platform_data *pdata;
struct dwapb_port_property *pp;
int nports;
- int i, j;
+ int i;
nports = device_get_child_node_count(dev);
if (nports == 0)
@@ -580,8 +583,6 @@ dwapb_gpio_get_pdata(struct device *dev)
i = 0;
device_for_each_child_node(dev, fwnode) {
- struct device_node *np = NULL;
-
pp = &pdata->properties[i++];
pp->fwnode = fwnode;
@@ -593,8 +594,7 @@ dwapb_gpio_get_pdata(struct device *dev)
return ERR_PTR(-EINVAL);
}
- if (fwnode_property_read_u32(fwnode, "snps,nr-gpios",
- &pp->ngpio)) {
+ if (fwnode_property_read_u32(fwnode, "snps,nr-gpios", &pp->ngpio)) {
dev_info(dev,
"failed to get number of gpios for port%d\n",
i);
@@ -608,28 +608,8 @@ dwapb_gpio_get_pdata(struct device *dev)
* Only port A can provide interrupts in all configurations of
* the IP.
*/
- if (pp->idx != 0)
- continue;
-
- if (dev->of_node && fwnode_property_read_bool(fwnode,
- "interrupt-controller")) {
- np = to_of_node(fwnode);
- }
-
- for (j = 0; j < pp->ngpio; j++) {
- pp->irq[j] = -ENXIO;
-
- if (np)
- pp->irq[j] = of_irq_get(np, j);
- else if (has_acpi_companion(dev))
- pp->irq[j] = platform_get_irq(to_platform_device(dev), j);
-
- if (pp->irq[j] >= 0)
- pp->has_irq = true;
- }
-
- if (!pp->has_irq)
- dev_warn(dev, "no irq for port%d\n", pp->idx);
+ if (pp->idx == 0)
+ dwapb_get_irq(dev, fwnode, pp);
}
return pdata;
@@ -689,29 +669,24 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio->regs))
return PTR_ERR(gpio->regs);
- /* Optional bus clock */
- gpio->clk = devm_clk_get(&pdev->dev, "bus");
- if (!IS_ERR(gpio->clk)) {
- err = clk_prepare_enable(gpio->clk);
- if (err) {
- dev_info(&pdev->dev, "Cannot enable clock\n");
- return err;
- }
+ /* Optional bus and debounce clocks */
+ gpio->clks[0].id = "bus";
+ gpio->clks[1].id = "db";
+ err = devm_clk_bulk_get_optional(&pdev->dev, DWAPB_NR_CLOCKS,
+ gpio->clks);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot get APB/Debounce clocks\n");
+ return err;
}
- gpio->flags = 0;
- if (dev->of_node) {
- gpio->flags = (uintptr_t)of_device_get_match_data(dev);
- } else if (has_acpi_companion(dev)) {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(dwapb_acpi_match, dev);
- if (acpi_id) {
- if (acpi_id->driver_data)
- gpio->flags = acpi_id->driver_data;
- }
+ err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable APB/Debounce clocks\n");
+ return err;
}
+ gpio->flags = (uintptr_t)device_get_match_data(dev);
+
for (i = 0; i < gpio->nr_ports; i++) {
err = dwapb_gpio_add_port(gpio, &pdata->properties[i], i);
if (err)
@@ -724,7 +699,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
out_unregister:
dwapb_gpio_unregister(gpio);
dwapb_irq_teardown(gpio);
- clk_disable_unprepare(gpio->clk);
+ clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
return err;
}
@@ -736,7 +711,7 @@ static int dwapb_gpio_remove(struct platform_device *pdev)
dwapb_gpio_unregister(gpio);
dwapb_irq_teardown(gpio);
reset_control_assert(gpio->rst);
- clk_disable_unprepare(gpio->clk);
+ clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
return 0;
}
@@ -755,8 +730,6 @@ static int dwapb_gpio_suspend(struct device *dev)
unsigned int idx = gpio->ports[i].idx;
struct dwapb_context *ctx = gpio->ports[i].ctx;
- BUG_ON(!ctx);
-
offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE;
ctx->dir = dwapb_read(gpio, offset);
@@ -775,13 +748,12 @@ static int dwapb_gpio_suspend(struct device *dev)
ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
/* Mask out interrupts */
- dwapb_write(gpio, GPIO_INTMASK,
- 0xffffffff & ~ctx->wake_en);
+ dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en);
}
}
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
- clk_disable_unprepare(gpio->clk);
+ clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
return 0;
}
@@ -791,10 +763,13 @@ static int dwapb_gpio_resume(struct device *dev)
struct dwapb_gpio *gpio = dev_get_drvdata(dev);
struct gpio_chip *gc = &gpio->ports[0].gc;
unsigned long flags;
- int i;
+ int i, err;
- if (!IS_ERR(gpio->clk))
- clk_prepare_enable(gpio->clk);
+ err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
+ if (err) {
+ dev_err(gpio->dev, "Cannot reenable APB/Debounce clocks\n");
+ return err;
+ }
spin_lock_irqsave(&gc->bgpio_lock, flags);
for (i = 0; i < gpio->nr_ports; i++) {
@@ -802,8 +777,6 @@ static int dwapb_gpio_resume(struct device *dev)
unsigned int idx = gpio->ports[i].idx;
struct dwapb_context *ctx = gpio->ports[i].ctx;
- BUG_ON(!ctx);
-
offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE;
dwapb_write(gpio, offset, ctx->data);
@@ -836,10 +809,10 @@ static SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops, dwapb_gpio_suspend,
static struct platform_driver dwapb_gpio_driver = {
.driver = {
- .name = "gpio-dwapb",
+ .name = DWAPB_DRIVER_NAME,
.pm = &dwapb_gpio_pm_ops,
- .of_match_table = of_match_ptr(dwapb_of_match),
- .acpi_match_table = ACPI_PTR(dwapb_acpi_match),
+ .of_match_table = dwapb_of_match,
+ .acpi_match_table = dwapb_acpi_match,
},
.probe = dwapb_gpio_probe,
.remove = dwapb_gpio_remove,
@@ -850,3 +823,4 @@ module_platform_driver(dwapb_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("Synopsys DesignWare APB GPIO driver");
+MODULE_ALIAS("platform:" DWAPB_DRIVER_NAME);
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index cadd02993539..18a3147f5a42 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -36,9 +36,19 @@
#define SIO_F71889A_ID 0x1005 /* F71889A chipset ID */
#define SIO_F81866_ID 0x1010 /* F81866 chipset ID */
#define SIO_F81804_ID 0x1502 /* F81804 chipset ID, same for f81966 */
-
-
-enum chips { f71869, f71869a, f71882fg, f71889a, f71889f, f81866, f81804 };
+#define SIO_F81865_ID 0x0704 /* F81865 chipset ID */
+
+
+enum chips {
+ f71869,
+ f71869a,
+ f71882fg,
+ f71889a,
+ f71889f,
+ f81866,
+ f81804,
+ f81865,
+};
static const char * const f7188x_names[] = {
"f71869",
@@ -48,6 +58,7 @@ static const char * const f7188x_names[] = {
"f71889f",
"f81866",
"f81804",
+ "f81865",
};
struct f7188x_sio {
@@ -233,6 +244,15 @@ static struct f7188x_gpio_bank f81804_gpio_bank[] = {
F7188X_GPIO_BANK(90, 8, 0x98),
};
+static struct f7188x_gpio_bank f81865_gpio_bank[] = {
+ F7188X_GPIO_BANK(0, 8, 0xF0),
+ F7188X_GPIO_BANK(10, 8, 0xE0),
+ F7188X_GPIO_BANK(20, 8, 0xD0),
+ F7188X_GPIO_BANK(30, 8, 0xC0),
+ F7188X_GPIO_BANK(40, 8, 0xB0),
+ F7188X_GPIO_BANK(50, 8, 0xA0),
+ F7188X_GPIO_BANK(60, 5, 0x90),
+};
static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
@@ -425,6 +445,10 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
data->nr_bank = ARRAY_SIZE(f81804_gpio_bank);
data->bank = f81804_gpio_bank;
break;
+ case f81865:
+ data->nr_bank = ARRAY_SIZE(f81865_gpio_bank);
+ data->bank = f81865_gpio_bank;
+ break;
default:
return -ENODEV;
}
@@ -490,6 +514,9 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
case SIO_F81804_ID:
sio->type = f81804;
break;
+ case SIO_F81865_ID:
+ sio->type = f81865;
+ break;
default:
pr_info(DRVNAME ": Unsupported Fintek device 0x%04x\n", devid);
goto err;
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index fbddb1662428..4031164780f7 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -193,7 +193,7 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
if (val == deb_div) {
/*
* The debounce timer happens to already be set to the
- * desireable value, what a coincidence! We can just enable
+ * desirable value, what a coincidence! We can just enable
* debounce on this GPIO line and return. This happens more
* often than you think, for example when all GPIO keys
* on a system are requesting the same debounce interval.
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 2f086d0aa1f4..9960bb8b0f5b 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -89,7 +89,7 @@ static struct {
struct device *dev;
struct gpio_chip chip;
struct resource *gpio_base; /* GPIO IO base */
- struct resource *pm_base; /* Power Mangagment IO base */
+ struct resource *pm_base; /* Power Management IO base */
struct ichx_desc *desc; /* Pointer to chipset-specific description */
u32 orig_gpio_ctrl; /* Orig CTRL value, used to restore on exit */
u8 use_gpio; /* Which GPIO groups are usable */
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 1e1935c51096..b8c1fe20f49a 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -47,7 +47,7 @@
static int max7301_direction_input(struct gpio_chip *chip, unsigned offset)
{
- struct max7301 *ts = gpiochip_get_data(chip);
+ struct max7301 *ts = container_of(chip, struct max7301, chip);
u8 *config;
u8 offset_bits, pin_config;
int ret;
@@ -89,7 +89,7 @@ static int __max7301_set(struct max7301 *ts, unsigned offset, int value)
static int max7301_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
- struct max7301 *ts = gpiochip_get_data(chip);
+ struct max7301 *ts = container_of(chip, struct max7301, chip);
u8 *config;
u8 offset_bits;
int ret;
@@ -189,10 +189,6 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.parent = dev;
ts->chip.owner = THIS_MODULE;
- ret = gpiochip_add_data(&ts->chip, ts);
- if (ret)
- goto exit_destroy;
-
/*
* initialize pullups according to platform data and cache the
* register values for later use.
@@ -214,7 +210,9 @@ int __max730x_probe(struct max7301 *ts)
}
}
- return ret;
+ ret = gpiochip_add_data(&ts->chip, ts);
+ if (!ret)
+ return ret;
exit_destroy:
mutex_destroy(&ts->lock);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 501e89548f53..37c5363e391e 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -145,7 +145,9 @@ static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
for (index = 0;; index++) {
irq = platform_get_irq(to_platform_device(gc->parent), index);
- if (irq <= 0)
+ if (irq < 0)
+ return irq;
+ if (irq == 0)
break;
if (irq_get_irq_data(irq)->hwirq == offset)
return irq;
@@ -168,15 +170,13 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gchip->base))
return PTR_ERR(gchip->base);
- if (!has_acpi_companion(&pdev->dev)) {
- gchip->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(gchip->clk))
- return PTR_ERR(gchip->clk);
+ gchip->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(gchip->clk))
+ return PTR_ERR(gchip->clk);
- ret = clk_prepare_enable(gchip->clk);
- if (ret)
- return ret;
- }
+ ret = clk_prepare_enable(gchip->clk);
+ if (ret)
+ return ret;
spin_lock_init(&gchip->lock);
@@ -186,15 +186,13 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.free = mb86s70_gpio_free;
gchip->gc.get = mb86s70_gpio_get;
gchip->gc.set = mb86s70_gpio_set;
+ gchip->gc.to_irq = mb86s70_gpio_to_irq;
gchip->gc.label = dev_name(&pdev->dev);
gchip->gc.ngpio = 32;
gchip->gc.owner = THIS_MODULE;
gchip->gc.parent = &pdev->dev;
gchip->gc.base = -1;
- if (has_acpi_companion(&pdev->dev))
- gchip->gc.to_irq = mb86s70_gpio_to_irq;
-
ret = gpiochip_add_data(&gchip->gc, gchip);
if (ret) {
dev_err(&pdev->dev, "couldn't register gpio driver\n");
@@ -202,8 +200,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
return ret;
}
- if (has_acpi_companion(&pdev->dev))
- acpi_gpiochip_request_interrupts(&gchip->gc);
+ acpi_gpiochip_request_interrupts(&gchip->gc);
return 0;
}
@@ -212,8 +209,7 @@ static int mb86s70_gpio_remove(struct platform_device *pdev)
{
struct mb86s70_gpio_chip *gchip = platform_get_drvdata(pdev);
- if (has_acpi_companion(&pdev->dev))
- acpi_gpiochip_free_interrupts(&gchip->gc);
+ acpi_gpiochip_free_interrupts(&gchip->gc);
gpiochip_remove(&gchip->gc);
clk_disable_unprepare(gchip->clk);
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 48918a016cd8..706687fab634 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -443,8 +443,8 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
base = pcim_iomap_table(pdev)[1];
- irq_base = readl(base);
- gpio_base = readl(sizeof(u32) + base);
+ irq_base = readl(base + 0 * sizeof(u32));
+ gpio_base = readl(base + 1 * sizeof(u32));
/* Release the IO mapping, since we already get the info from BAR1 */
pcim_iounmap_regions(pdev, BIT(1));
@@ -473,6 +473,10 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
raw_spin_lock_init(&priv->lock);
+ retval = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (retval < 0)
+ return retval;
+
girq = &priv->chip.irq;
girq->chip = &mrfld_irqchip;
girq->init_hw = mrfld_irq_init_hw;
@@ -482,7 +486,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
- girq->parents[0] = pdev->irq;
+ girq->parents[0] = pci_irq_vector(pdev, 0);
girq->first = irq_base;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index da570e63589d..94d5efce1721 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -14,7 +14,6 @@
#include <linux/resource.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/version.h>
/*
* There are 3 YU GPIO blocks:
@@ -110,8 +109,8 @@ static int mlxbf2_gpio_get_lock_res(struct platform_device *pdev)
}
yu_arm_gpio_lock_param.io = devm_ioremap(dev, res->start, size);
- if (IS_ERR(yu_arm_gpio_lock_param.io))
- ret = PTR_ERR(yu_arm_gpio_lock_param.io);
+ if (!yu_arm_gpio_lock_param.io)
+ ret = -ENOMEM;
exit:
mutex_unlock(yu_arm_gpio_lock_param.lock);
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index f460d71b0c92..538e31fe8903 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -36,7 +36,7 @@ struct ltq_mm {
* @chip: Pointer to our private data structure.
*
* Write the shadow value to the EBU to set the gpios. We need to set the
- * global EBU lock to make sure that PCI/MTD dont break.
+ * global EBU lock to make sure that PCI/MTD don't break.
*/
static void ltq_mm_apply(struct ltq_mm *chip)
{
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 3eb94f3740d1..bc345185db26 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irq_sim.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -48,7 +49,7 @@ struct gpio_mockup_line_status {
struct gpio_mockup_chip {
struct gpio_chip gc;
struct gpio_mockup_line_status *lines;
- struct irq_sim irqsim;
+ struct irq_domain *irq_sim_domain;
struct dentry *dbg_dir;
struct mutex lock;
};
@@ -144,14 +145,12 @@ static void gpio_mockup_set_multiple(struct gpio_chip *gc,
static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
unsigned int offset, int value)
{
+ int curr, irq, irq_type, ret = 0;
struct gpio_desc *desc;
struct gpio_chip *gc;
- struct irq_sim *sim;
- int curr, irq, irq_type;
gc = &chip->gc;
desc = &gc->gpiodev->descs[offset];
- sim = &chip->irqsim;
mutex_lock(&chip->lock);
@@ -161,14 +160,28 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
if (curr == value)
goto out;
- irq = irq_sim_irqnum(sim, offset);
+ irq = irq_find_mapping(chip->irq_sim_domain, offset);
+ if (!irq)
+ /*
+ * This is fine - it just means, nobody is listening
+ * for interrupts on this line, otherwise
+ * irq_create_mapping() would have been called from
+ * the to_irq() callback.
+ */
+ goto set_value;
+
irq_type = irq_get_trigger_type(irq);
if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
- (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
- irq_sim_fire(sim, offset);
+ (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING))) {
+ ret = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING,
+ true);
+ if (ret)
+ goto out;
+ }
}
+set_value:
/* Change the value unless we're actively driving the line. */
if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
!test_bit(FLAG_IS_OUT, &desc->flags))
@@ -177,7 +190,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
out:
chip->lines[offset].pull = value;
mutex_unlock(&chip->lock);
- return 0;
+ return ret;
}
static int gpio_mockup_set_config(struct gpio_chip *gc,
@@ -236,7 +249,7 @@ static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
- return irq_sim_irqnum(&chip->irqsim, offset);
+ return irq_create_mapping(chip->irq_sim_domain, offset);
}
static void gpio_mockup_free(struct gpio_chip *gc, unsigned int offset)
@@ -389,6 +402,19 @@ static int gpio_mockup_name_lines(struct device *dev,
return 0;
}
+static void gpio_mockup_dispose_mappings(void *data)
+{
+ struct gpio_mockup_chip *chip = data;
+ struct gpio_chip *gc = &chip->gc;
+ int i, irq;
+
+ for (i = 0; i < gc->ngpio; i++) {
+ irq = irq_find_mapping(chip->irq_sim_domain, i);
+ if (irq)
+ irq_dispose_mapping(irq);
+ }
+}
+
static int gpio_mockup_probe(struct platform_device *pdev)
{
struct gpio_mockup_chip *chip;
@@ -456,8 +482,13 @@ static int gpio_mockup_probe(struct platform_device *pdev)
return rv;
}
- rv = devm_irq_sim_init(dev, &chip->irqsim, gc->ngpio);
- if (rv < 0)
+ chip->irq_sim_domain = devm_irq_domain_create_sim(dev, NULL,
+ gc->ngpio);
+ if (IS_ERR(chip->irq_sim_domain))
+ return PTR_ERR(chip->irq_sim_domain);
+
+ rv = devm_add_action_or_reset(dev, gpio_mockup_dispose_mappings, chip);
+ if (rv)
return rv;
rv = devm_gpiochip_add_data(dev, &chip->gc, chip);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 4269ea9a817e..1fca8dd7824f 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -306,37 +306,39 @@ static const struct regmap_config pca953x_i2c_regmap = {
.writeable_reg = pca953x_writeable_register,
.volatile_reg = pca953x_volatile_register,
+ .disable_locking = true,
.cache_type = REGCACHE_RBTREE,
- /* REVISIT: should be 0x7f but some 24 bit chips use REG_ADDR_AI */
- .max_register = 0xff,
+ .max_register = 0x7f,
};
-static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off,
- bool write, bool addrinc)
+static const struct regmap_config pca953x_ai_i2c_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .read_flag_mask = REG_ADDR_AI,
+ .write_flag_mask = REG_ADDR_AI,
+
+ .readable_reg = pca953x_readable_register,
+ .writeable_reg = pca953x_writeable_register,
+ .volatile_reg = pca953x_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 0x7f,
+};
+
+static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off)
{
int bank_shift = pca953x_bank_shift(chip);
int addr = (reg & PCAL_GPIO_MASK) << bank_shift;
int pinctrl = (reg & PCAL_PINCTRL_MASK) << 1;
u8 regaddr = pinctrl | addr | (off / BANK_SZ);
- /* Single byte read doesn't need AI bit set. */
- if (!addrinc)
- return regaddr;
-
- /* Chips with 24 and more GPIOs always support Auto Increment */
- if (write && NBANK(chip) > 2)
- regaddr |= REG_ADDR_AI;
-
- /* PCA9575 needs address-increment on multi-byte writes */
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE)
- regaddr |= REG_ADDR_AI;
-
return regaddr;
}
static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
- u8 regaddr = pca953x_recalc_addr(chip, reg, 0, true, true);
+ u8 regaddr = pca953x_recalc_addr(chip, reg, 0);
u8 value[MAX_BANK];
int i, ret;
@@ -354,7 +356,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long
static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
- u8 regaddr = pca953x_recalc_addr(chip, reg, 0, false, true);
+ u8 regaddr = pca953x_recalc_addr(chip, reg, 0);
u8 value[MAX_BANK];
int i, ret;
@@ -373,8 +375,7 @@ static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *
static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off,
- true, false);
+ u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
u8 bit = BIT(off % BANK_SZ);
int ret;
@@ -388,10 +389,8 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
unsigned off, int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off,
- true, false);
- u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off,
- true, false);
+ u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
+ u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off);
u8 bit = BIT(off % BANK_SZ);
int ret;
@@ -411,8 +410,7 @@ exit:
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 inreg = pca953x_recalc_addr(chip, chip->regs->input, off,
- true, false);
+ u8 inreg = pca953x_recalc_addr(chip, chip->regs->input, off);
u8 bit = BIT(off % BANK_SZ);
u32 reg_val;
int ret;
@@ -436,8 +434,7 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off,
- true, false);
+ u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off);
u8 bit = BIT(off % BANK_SZ);
mutex_lock(&chip->i2c_lock);
@@ -448,8 +445,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off,
- true, false);
+ u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
u8 bit = BIT(off % BANK_SZ);
u32 reg_val;
int ret;
@@ -466,6 +462,23 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
return GPIO_LINE_DIRECTION_OUT;
}
+static int pca953x_gpio_get_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct pca953x_chip *chip = gpiochip_get_data(gc);
+ DECLARE_BITMAP(reg_val, MAX_LINE);
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+ ret = pca953x_read_regs(chip, chip->regs->input, reg_val);
+ mutex_unlock(&chip->i2c_lock);
+ if (ret)
+ return ret;
+
+ bitmap_replace(bits, bits, reg_val, mask, gc->ngpio);
+ return 0;
+}
+
static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
@@ -489,10 +502,8 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
unsigned int offset,
unsigned long config)
{
- u8 pull_en_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_EN, offset,
- true, false);
- u8 pull_sel_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_SEL, offset,
- true, false);
+ u8 pull_en_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_EN, offset);
+ u8 pull_sel_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_SEL, offset);
u8 bit = BIT(offset % BANK_SZ);
int ret;
@@ -551,6 +562,7 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->get = pca953x_gpio_get_value;
gc->set = pca953x_gpio_set_value;
gc->get_direction = pca953x_gpio_get_direction;
+ gc->get_multiple = pca953x_gpio_get_multiple;
gc->set_multiple = pca953x_gpio_set_multiple;
gc->set_config = pca953x_gpio_set_config;
gc->can_sleep = true;
@@ -863,6 +875,7 @@ static int pca953x_probe(struct i2c_client *client,
int ret;
u32 invert = 0;
struct regulator *reg;
+ const struct regmap_config *regmap_config;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -925,7 +938,17 @@ static int pca953x_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
- chip->regmap = devm_regmap_init_i2c(client, &pca953x_i2c_regmap);
+ pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
+
+ if (NBANK(chip) > 2 || PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
+ dev_info(&client->dev, "using AI\n");
+ regmap_config = &pca953x_ai_i2c_regmap;
+ } else {
+ dev_info(&client->dev, "using no AI\n");
+ regmap_config = &pca953x_i2c_regmap;
+ }
+
+ chip->regmap = devm_regmap_init_i2c(client, regmap_config);
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
goto err_exit;
@@ -956,7 +979,6 @@ static int pca953x_probe(struct i2c_client *client,
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
- pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE) {
chip->regs = &pca953x_regs;
@@ -1154,7 +1176,7 @@ static struct i2c_driver pca953x_driver = {
.name = "pca953x",
.pm = &pca953x_pm_ops,
.of_match_table = pca953x_dt_ids,
- .acpi_match_table = ACPI_PTR(pca953x_acpi_ids),
+ .acpi_match_table = pca953x_acpi_ids,
},
.probe = pca953x_probe,
.remove = pca953x_remove,
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 3f3d9a94b709..e96d28bf43b4 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
+#include <linux/bits.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -11,11 +12,11 @@
#include <linux/slab.h>
#define PCH_EDGE_FALLING 0
-#define PCH_EDGE_RISING BIT(0)
-#define PCH_LEVEL_L BIT(1)
-#define PCH_LEVEL_H (BIT(0) | BIT(1))
-#define PCH_EDGE_BOTH BIT(2)
-#define PCH_IM_MASK (BIT(0) | BIT(1) | BIT(2))
+#define PCH_EDGE_RISING 1
+#define PCH_LEVEL_L 2
+#define PCH_LEVEL_H 3
+#define PCH_EDGE_BOTH 4
+#define PCH_IM_MASK GENMASK(2, 0)
#define PCH_IRQ_BASE 24
@@ -103,9 +104,9 @@ static void pch_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
spin_lock_irqsave(&chip->spinlock, flags);
reg_val = ioread32(&chip->reg->po);
if (val)
- reg_val |= (1 << nr);
+ reg_val |= BIT(nr);
else
- reg_val &= ~(1 << nr);
+ reg_val &= ~BIT(nr);
iowrite32(reg_val, &chip->reg->po);
spin_unlock_irqrestore(&chip->spinlock, flags);
@@ -115,7 +116,7 @@ static int pch_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct pch_gpio *chip = gpiochip_get_data(gpio);
- return (ioread32(&chip->reg->pi) >> nr) & 1;
+ return !!(ioread32(&chip->reg->pi) & BIT(nr));
}
static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
@@ -130,13 +131,14 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
reg_val = ioread32(&chip->reg->po);
if (val)
- reg_val |= (1 << nr);
+ reg_val |= BIT(nr);
else
- reg_val &= ~(1 << nr);
+ reg_val &= ~BIT(nr);
iowrite32(reg_val, &chip->reg->po);
- pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1);
- pm |= (1 << nr);
+ pm = ioread32(&chip->reg->pm);
+ pm &= BIT(gpio_pins[chip->ioh]) - 1;
+ pm |= BIT(nr);
iowrite32(pm, &chip->reg->pm);
spin_unlock_irqrestore(&chip->spinlock, flags);
@@ -151,8 +153,9 @@ static int pch_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
unsigned long flags;
spin_lock_irqsave(&chip->spinlock, flags);
- pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1);
- pm &= ~(1 << nr);
+ pm = ioread32(&chip->reg->pm);
+ pm &= BIT(gpio_pins[chip->ioh]) - 1;
+ pm &= ~BIT(nr);
iowrite32(pm, &chip->reg->pm);
spin_unlock_irqrestore(&chip->spinlock, flags);
@@ -226,17 +229,15 @@ static int pch_irq_type(struct irq_data *d, unsigned int type)
int ch, irq = d->irq;
ch = irq - chip->irq_base;
- if (irq <= chip->irq_base + 7) {
+ if (irq < chip->irq_base + 8) {
im_reg = &chip->reg->im0;
- im_pos = ch;
+ im_pos = ch - 0;
} else {
im_reg = &chip->reg->im1;
im_pos = ch - 8;
}
dev_dbg(chip->dev, "irq=%d type=%d ch=%d pos=%d\n", irq, type, ch, im_pos);
- spin_lock_irqsave(&chip->spinlock, flags);
-
switch (type) {
case IRQ_TYPE_EDGE_RISING:
val = PCH_EDGE_RISING;
@@ -254,20 +255,21 @@ static int pch_irq_type(struct irq_data *d, unsigned int type)
val = PCH_LEVEL_L;
break;
default:
- goto unlock;
+ return 0;
}
+ spin_lock_irqsave(&chip->spinlock, flags);
+
/* Set interrupt mode */
im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4));
iowrite32(im | (val << (im_pos * 4)), im_reg);
/* And the handler */
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ else if (type & IRQ_TYPE_EDGE_BOTH)
irq_set_handler_locked(d, handle_edge_irq);
-unlock:
spin_unlock_irqrestore(&chip->spinlock, flags);
return 0;
}
@@ -277,7 +279,7 @@ static void pch_irq_unmask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pch_gpio *chip = gc->private;
- iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imaskclr);
+ iowrite32(BIT(d->irq - chip->irq_base), &chip->reg->imaskclr);
}
static void pch_irq_mask(struct irq_data *d)
@@ -285,7 +287,7 @@ static void pch_irq_mask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pch_gpio *chip = gc->private;
- iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask);
+ iowrite32(BIT(d->irq - chip->irq_base), &chip->reg->imask);
}
static void pch_irq_ack(struct irq_data *d)
@@ -293,21 +295,22 @@ static void pch_irq_ack(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pch_gpio *chip = gc->private;
- iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->iclr);
+ iowrite32(BIT(d->irq - chip->irq_base), &chip->reg->iclr);
}
static irqreturn_t pch_gpio_handler(int irq, void *dev_id)
{
struct pch_gpio *chip = dev_id;
unsigned long reg_val = ioread32(&chip->reg->istatus);
- int i, ret = IRQ_NONE;
+ int i;
+
+ dev_dbg(chip->dev, "irq=%d status=0x%lx\n", irq, reg_val);
- for_each_set_bit(i, &reg_val, gpio_pins[chip->ioh]) {
- dev_dbg(chip->dev, "[%d]:irq=%d status=0x%lx\n", i, irq, reg_val);
+ reg_val &= BIT(gpio_pins[chip->ioh]) - 1;
+ for_each_set_bit(i, &reg_val, gpio_pins[chip->ioh])
generic_handle_irq(chip->irq_base + i);
- ret = IRQ_HANDLED;
- }
- return ret;
+
+ return IRQ_RETVAL(reg_val);
}
static int pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
@@ -344,7 +347,6 @@ static int pch_gpio_probe(struct pci_dev *pdev,
s32 ret;
struct pch_gpio *chip;
int irq_base;
- u32 msk;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -357,7 +359,7 @@ static int pch_gpio_probe(struct pci_dev *pdev,
return ret;
}
- ret = pcim_iomap_regions(pdev, 1 << 1, KBUILD_MODNAME);
+ ret = pcim_iomap_regions(pdev, BIT(1), KBUILD_MODNAME);
if (ret) {
dev_err(&pdev->dev, "pci_request_regions FAILED-%d", ret);
return ret;
@@ -393,9 +395,8 @@ static int pch_gpio_probe(struct pci_dev *pdev,
chip->irq_base = irq_base;
/* Mask all interrupts, but enable them */
- msk = (1 << gpio_pins[chip->ioh]) - 1;
- iowrite32(msk, &chip->reg->imask);
- iowrite32(msk, &chip->reg->ien);
+ iowrite32(BIT(gpio_pins[chip->ioh]) - 1, &chip->reg->imask);
+ iowrite32(BIT(gpio_pins[chip->ioh]) - 1, &chip->reg->ien);
ret = devm_request_irq(&pdev->dev, pdev->irq, pch_gpio_handler,
IRQF_SHARED, KBUILD_MODNAME, chip);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index e241fb884c12..f1b53dd1df1a 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/device.h>
@@ -408,6 +409,7 @@ static const struct amba_id pl061_ids[] = {
},
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl061_ids);
static struct amba_driver pl061_gpio_driver = {
.drv = {
@@ -419,9 +421,6 @@ static struct amba_driver pl061_gpio_driver = {
.id_table = pl061_ids,
.probe = pl061_probe,
};
+module_amba_driver(pl061_gpio_driver);
-static int __init pl061_gpio_init(void)
-{
- return amba_driver_register(&pl061_gpio_driver);
-}
-device_initcall(pl061_gpio_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 7284473c9fe3..eac1582c70da 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -250,8 +250,10 @@ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
int error;
error = pm_runtime_get_sync(p->dev);
- if (error < 0)
+ if (error < 0) {
+ pm_runtime_put(p->dev);
return error;
+ }
error = pinctrl_gpio_request(chip->base + offset);
if (error)
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
new file mode 100644
index 000000000000..5412cb3b0b2a
--- /dev/null
+++ b/drivers/gpio/gpio-regmap.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * regmap based generic GPIO driver
+ *
+ * Copyright 2020 Michael Walle <michael@walle.cc>
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+struct gpio_regmap {
+ struct device *parent;
+ struct regmap *regmap;
+ struct gpio_chip gpio_chip;
+
+ int reg_stride;
+ int ngpio_per_reg;
+ unsigned int reg_dat_base;
+ unsigned int reg_set_base;
+ unsigned int reg_clr_base;
+ unsigned int reg_dir_in_base;
+ unsigned int reg_dir_out_base;
+
+ int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask);
+
+ void *driver_data;
+};
+
+static unsigned int gpio_regmap_addr(unsigned int addr)
+{
+ if (addr == GPIO_REGMAP_ADDR_ZERO)
+ return 0;
+
+ return addr;
+}
+
+static int gpio_regmap_simple_xlate(struct gpio_regmap *gpio,
+ unsigned int base, unsigned int offset,
+ unsigned int *reg, unsigned int *mask)
+{
+ unsigned int line = offset % gpio->ngpio_per_reg;
+ unsigned int stride = offset / gpio->ngpio_per_reg;
+
+ *reg = base + stride * gpio->reg_stride;
+ *mask = BIT(line);
+
+ return 0;
+}
+
+static int gpio_regmap_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base, val, reg, mask;
+ int ret;
+
+ /* we might not have an output register if we are input only */
+ if (gpio->reg_dat_base)
+ base = gpio_regmap_addr(gpio->reg_dat_base);
+ else
+ base = gpio_regmap_addr(gpio->reg_set_base);
+
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(gpio->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & mask);
+}
+
+static void gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base = gpio_regmap_addr(gpio->reg_set_base);
+ unsigned int reg, mask;
+
+ gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (val)
+ regmap_update_bits(gpio->regmap, reg, mask, mask);
+ else
+ regmap_update_bits(gpio->regmap, reg, mask, 0);
+}
+
+static void gpio_regmap_set_with_clear(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base, reg, mask;
+
+ if (val)
+ base = gpio_regmap_addr(gpio->reg_set_base);
+ else
+ base = gpio_regmap_addr(gpio->reg_clr_base);
+
+ gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ regmap_write(gpio->regmap, reg, mask);
+}
+
+static int gpio_regmap_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base, val, reg, mask;
+ int invert, ret;
+
+ if (gpio->reg_dir_out_base) {
+ base = gpio_regmap_addr(gpio->reg_dir_out_base);
+ invert = 0;
+ } else if (gpio->reg_dir_in_base) {
+ base = gpio_regmap_addr(gpio->reg_dir_in_base);
+ invert = 1;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(gpio->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ if (!!(val & mask) ^ invert)
+ return GPIO_LINE_DIRECTION_OUT;
+ else
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int gpio_regmap_set_direction(struct gpio_chip *chip,
+ unsigned int offset, bool output)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base, val, reg, mask;
+ int invert, ret;
+
+ if (gpio->reg_dir_out_base) {
+ base = gpio_regmap_addr(gpio->reg_dir_out_base);
+ invert = 0;
+ } else if (gpio->reg_dir_in_base) {
+ base = gpio_regmap_addr(gpio->reg_dir_in_base);
+ invert = 1;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
+
+ if (invert)
+ val = output ? 0 : mask;
+ else
+ val = output ? mask : 0;
+
+ return regmap_update_bits(gpio->regmap, reg, mask, val);
+}
+
+static int gpio_regmap_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return gpio_regmap_set_direction(chip, offset, false);
+}
+
+static int gpio_regmap_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ gpio_regmap_set(chip, offset, value);
+
+ return gpio_regmap_set_direction(chip, offset, true);
+}
+
+void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data)
+{
+ gpio->driver_data = data;
+}
+EXPORT_SYMBOL_GPL(gpio_regmap_set_drvdata);
+
+void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio)
+{
+ return gpio->driver_data;
+}
+EXPORT_SYMBOL_GPL(gpio_regmap_get_drvdata);
+
+/**
+ * gpio_regmap_register() - Register a generic regmap GPIO controller
+ * @config: configuration for gpio_regmap
+ *
+ * Return: A pointer to the registered gpio_regmap or ERR_PTR error value.
+ */
+struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config)
+{
+ struct gpio_regmap *gpio;
+ struct gpio_chip *chip;
+ int ret;
+
+ if (!config->parent)
+ return ERR_PTR(-EINVAL);
+
+ if (!config->ngpio)
+ return ERR_PTR(-EINVAL);
+
+ /* we need at least one */
+ if (!config->reg_dat_base && !config->reg_set_base)
+ return ERR_PTR(-EINVAL);
+
+ /* if we have a direction register we need both input and output */
+ if ((config->reg_dir_out_base || config->reg_dir_in_base) &&
+ (!config->reg_dat_base || !config->reg_set_base))
+ return ERR_PTR(-EINVAL);
+
+ /* we don't support having both registers simultaneously for now */
+ if (config->reg_dir_out_base && config->reg_dir_in_base)
+ return ERR_PTR(-EINVAL);
+
+ gpio = kzalloc(sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return ERR_PTR(-ENOMEM);
+
+ gpio->parent = config->parent;
+ gpio->regmap = config->regmap;
+ gpio->ngpio_per_reg = config->ngpio_per_reg;
+ gpio->reg_stride = config->reg_stride;
+ gpio->reg_mask_xlate = config->reg_mask_xlate;
+ gpio->reg_dat_base = config->reg_dat_base;
+ gpio->reg_set_base = config->reg_set_base;
+ gpio->reg_clr_base = config->reg_clr_base;
+ gpio->reg_dir_in_base = config->reg_dir_in_base;
+ gpio->reg_dir_out_base = config->reg_dir_out_base;
+
+ /* if not set, assume there is only one register */
+ if (!gpio->ngpio_per_reg)
+ gpio->ngpio_per_reg = config->ngpio;
+
+ /* if not set, assume they are consecutive */
+ if (!gpio->reg_stride)
+ gpio->reg_stride = 1;
+
+ if (!gpio->reg_mask_xlate)
+ gpio->reg_mask_xlate = gpio_regmap_simple_xlate;
+
+ chip = &gpio->gpio_chip;
+ chip->parent = config->parent;
+ chip->base = -1;
+ chip->ngpio = config->ngpio;
+ chip->names = config->names;
+ chip->label = config->label ?: dev_name(config->parent);
+
+ /*
+ * If our regmap is fast_io we should probably set can_sleep to false.
+ * Right now, the regmap doesn't save this property, nor is there any
+ * access function for it.
+ * The only regmap type which uses fast_io is regmap-mmio. For now,
+ * assume a safe default of true here.
+ */
+ chip->can_sleep = true;
+
+ chip->get = gpio_regmap_get;
+ if (gpio->reg_set_base && gpio->reg_clr_base)
+ chip->set = gpio_regmap_set_with_clear;
+ else if (gpio->reg_set_base)
+ chip->set = gpio_regmap_set;
+
+ if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) {
+ chip->get_direction = gpio_regmap_get_direction;
+ chip->direction_input = gpio_regmap_direction_input;
+ chip->direction_output = gpio_regmap_direction_output;
+ }
+
+ ret = gpiochip_add_data(chip, gpio);
+ if (ret < 0)
+ goto err_free_gpio;
+
+ if (config->irq_domain) {
+ ret = gpiochip_irqchip_add_domain(chip, config->irq_domain);
+ if (ret)
+ goto err_remove_gpiochip;
+ }
+
+ return gpio;
+
+err_remove_gpiochip:
+ gpiochip_remove(chip);
+err_free_gpio:
+ kfree(gpio);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(gpio_regmap_register);
+
+/**
+ * gpio_regmap_unregister() - Unregister a generic regmap GPIO controller
+ * @gpio: gpio_regmap device to unregister
+ */
+void gpio_regmap_unregister(struct gpio_regmap *gpio)
+{
+ gpiochip_remove(&gpio->gpio_chip);
+ kfree(gpio);
+}
+EXPORT_SYMBOL_GPL(gpio_regmap_unregister);
+
+static void devm_gpio_regmap_unregister(struct device *dev, void *res)
+{
+ gpio_regmap_unregister(*(struct gpio_regmap **)res);
+}
+
+/**
+ * devm_gpio_regmap_register() - resource managed gpio_regmap_register()
+ * @dev: device that is registering this GPIO device
+ * @config: configuration for gpio_regmap
+ *
+ * Managed gpio_regmap_register(). For generic regmap GPIO device registered by
+ * this function, gpio_regmap_unregister() is automatically called on driver
+ * detach. See gpio_regmap_register() for more information.
+ *
+ * Return: A pointer to the registered gpio_regmap or ERR_PTR error value.
+ */
+struct gpio_regmap *devm_gpio_regmap_register(struct device *dev,
+ const struct gpio_regmap_config *config)
+{
+ struct gpio_regmap **ptr, *gpio;
+
+ ptr = devres_alloc(devm_gpio_regmap_unregister, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ gpio = gpio_regmap_register(config);
+ if (!IS_ERR(gpio)) {
+ *ptr = gpio;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return gpio;
+}
+EXPORT_SYMBOL_GPL(devm_gpio_regmap_register);
+
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_DESCRIPTION("GPIO generic regmap driver core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 79b553dc39a3..178e9128ded0 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -894,6 +894,7 @@ static const struct of_device_id tegra186_gpio_of_match[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, tegra186_gpio_of_match);
static struct platform_driver tegra186_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 25d86441666e..a809609ee957 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -10,8 +10,8 @@
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/driver.h>
#include <linux/acpi.h>
@@ -122,7 +122,7 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
fwspec.fwnode = gc->parent->fwnode;
fwspec.param_count = 2;
fwspec.param[0] = GPIO_TO_HWIRQ(priv, gpio);
- fwspec.param[1] = IRQ_TYPE_NONE;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
return irq_create_fwspec_mapping(&fwspec);
}
@@ -290,10 +290,8 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n");
- if (priv->nirq > 0) {
- /* Register interrupt handlers for gpio signaled acpi events */
- acpi_gpiochip_request_interrupts(&priv->gc);
- }
+ /* Register interrupt handlers for GPIO signaled ACPI Events */
+ acpi_gpiochip_request_interrupts(&priv->gc);
return ret;
}
@@ -302,9 +300,7 @@ static int xgene_gpio_sb_remove(struct platform_device *pdev)
{
struct xgene_gpio_sb *priv = platform_get_drvdata(pdev);
- if (priv->nirq > 0) {
- acpi_gpiochip_free_interrupts(&priv->gc);
- }
+ acpi_gpiochip_free_interrupts(&priv->gc);
irq_domain_remove(priv->irq_domain);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 0017367e94ee..9276051663da 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1353,7 +1353,7 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
}
/* Run deferred acpi_gpiochip_request_irqs() */
-static int acpi_gpio_handle_deferred_request_irqs(void)
+static int __init acpi_gpio_handle_deferred_request_irqs(void)
{
struct acpi_gpio_chip *acpi_gpio, *tmp;
@@ -1371,7 +1371,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
/* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
-static const struct dmi_system_id gpiolib_acpi_quirks[] = {
+static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
{
/*
* The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
@@ -1455,7 +1455,7 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
{} /* Terminating entry */
};
-static int acpi_gpio_setup_params(void)
+static int __init acpi_gpio_setup_params(void)
{
const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
const struct dmi_system_id *id;
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
index 53781b253986..26741032fa9e 100644
--- a/drivers/gpio/gpiolib-devprop.c
+++ b/drivers/gpio/gpiolib-devprop.c
@@ -37,8 +37,11 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip,
if (count < 0)
return;
- if (count > gdev->ngpio)
+ if (count > gdev->ngpio) {
+ dev_warn(&gdev->dev, "gpio-line-names is length %d but should be at most length %d",
+ count, gdev->ngpio);
count = gdev->ngpio;
+ }
names = kcalloc(count, sizeof(*names), GFP_KERNEL);
if (!names)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ccc449df3792..219eb0054233 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -344,6 +344,12 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
if (transitory)
lflags |= GPIO_TRANSITORY;
+ if (flags & OF_GPIO_PULL_UP)
+ lflags |= GPIO_PULL_UP;
+
+ if (flags & OF_GPIO_PULL_DOWN)
+ lflags |= GPIO_PULL_DOWN;
+
ret = gpiod_configure_flags(desc, propname, lflags, dflags);
if (ret < 0) {
gpiod_put(desc);
@@ -460,6 +466,24 @@ static struct gpio_desc *of_find_arizona_gpio(struct device *dev,
return of_get_named_gpiod_flags(dev->of_node, con_id, 0, of_flags);
}
+static struct gpio_desc *of_find_usb_gpio(struct device *dev,
+ const char *con_id,
+ enum of_gpio_flags *of_flags)
+{
+ /*
+ * Currently this USB quirk is only for the Fairchild FUSB302 host which is using
+ * an undocumented DT GPIO line named "fcs,int_n" without the compulsory "-gpios"
+ * suffix.
+ */
+ if (!IS_ENABLED(CONFIG_TYPEC_FUSB302))
+ return ERR_PTR(-ENOENT);
+
+ if (!con_id || strcmp(con_id, "fcs,int_n"))
+ return ERR_PTR(-ENOENT);
+
+ return of_get_named_gpiod_flags(dev->of_node, con_id, 0, of_flags);
+}
+
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx, unsigned long *flags)
{
@@ -504,6 +528,9 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
if (PTR_ERR(desc) == -ENOENT)
desc = of_find_arizona_gpio(dev, con_id, &of_flags);
+ if (PTR_ERR(desc) == -ENOENT)
+ desc = of_find_usb_gpio(dev, con_id, &of_flags);
+
if (IS_ERR(desc))
return desc;
@@ -585,6 +612,10 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
*lflags |= GPIO_ACTIVE_LOW;
if (xlate_flags & OF_GPIO_TRANSITORY)
*lflags |= GPIO_TRANSITORY;
+ if (xlate_flags & OF_GPIO_PULL_UP)
+ *lflags |= GPIO_PULL_UP;
+ if (xlate_flags & OF_GPIO_PULL_DOWN)
+ *lflags |= GPIO_PULL_DOWN;
if (of_property_read_bool(np, "input"))
*dflags |= GPIOD_IN;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index c14f0784274a..4fa075d49fbc 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -296,6 +296,9 @@ static int gpiodev_add_to_list(struct gpio_device *gdev)
/*
* Convert a GPIO name to its descriptor
+ * Note that there is no guarantee that GPIO names are globally unique!
+ * Hence this function will return, if it exists, a reference to the first GPIO
+ * line found that matches the given name.
*/
static struct gpio_desc *gpio_name_to_desc(const char * const name)
{
@@ -329,10 +332,12 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
}
/*
- * Takes the names from gc->names and checks if they are all unique. If they
- * are, they are assigned to their gpio descriptors.
+ * Take the names from gc->names and assign them to their GPIO descriptors.
+ * Warn if a name is already used for a GPIO line on a different GPIO chip.
*
- * Warning if one of the names is already used for a different GPIO.
+ * Note that:
+ * 1. Non-unique names are still accepted,
+ * 2. Name collisions within the same GPIO chip are not reported.
*/
static int gpiochip_set_desc_names(struct gpio_chip *gc)
{
@@ -1267,8 +1272,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
return -EFAULT;
return 0;
- } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
- cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+ } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
struct gpioline_info lineinfo;
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
@@ -1280,23 +1284,37 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
hwgpio = gpio_chip_hwgpio(desc);
- if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL &&
- test_bit(hwgpio, priv->watched_lines))
- return -EBUSY;
-
gpio_desc_to_lineinfo(desc, &lineinfo);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
-
- if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL)
- set_bit(hwgpio, priv->watched_lines);
-
return 0;
} else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
return linehandle_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
return lineevent_create(gdev, ip);
+ } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+ struct gpioline_info lineinfo;
+
+ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+ return -EFAULT;
+
+ desc = gpiochip_get_desc(gc, lineinfo.line_offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ hwgpio = gpio_chip_hwgpio(desc);
+
+ if (test_bit(hwgpio, priv->watched_lines))
+ return -EBUSY;
+
+ gpio_desc_to_lineinfo(desc, &lineinfo);
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
+ return -EFAULT;
+
+ set_bit(hwgpio, priv->watched_lines);
+ return 0;
} else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
if (copy_from_user(&offset, ip, sizeof(offset)))
return -EFAULT;
@@ -1538,9 +1556,8 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
/* From this point, the .release() function cleans up gpio_device */
gdev->dev.release = gpiodevice_release;
- pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
- __func__, gdev->base, gdev->base + gdev->ngpio - 1,
- dev_name(&gdev->dev), gdev->chip->label ? : "generic");
+ dev_dbg(&gdev->dev, "registered GPIOs %d to %d on %s\n", gdev->base,
+ gdev->base + gdev->ngpio - 1, gdev->chip->label ? : "generic");
return 0;
@@ -1556,8 +1573,8 @@ static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog)
desc = gpiochip_get_desc(gc, hog->chip_hwnum);
if (IS_ERR(desc)) {
- pr_err("%s: unable to get GPIO desc: %ld\n",
- __func__, PTR_ERR(desc));
+ chip_err(gc, "%s: unable to get GPIO desc: %ld\n", __func__,
+ PTR_ERR(desc));
return;
}
@@ -1566,8 +1583,8 @@ static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog)
rv = gpiod_hog(desc, hog->line_name, hog->lflags, hog->dflags);
if (rv)
- pr_err("%s: unable to hog GPIO line (%s:%u): %d\n",
- __func__, gc->label, hog->chip_hwnum, rv);
+ gpiod_err(desc, "%s: unable to hog GPIO line (%s:%u): %d\n",
+ __func__, gc->label, hog->chip_hwnum, rv);
}
static void machine_gpiochip_add(struct gpio_chip *gc)
@@ -1592,8 +1609,8 @@ static void gpiochip_setup_devs(void)
list_for_each_entry(gdev, &gpio_devices, list) {
ret = gpiochip_setup_dev(gdev);
if (ret)
- pr_err("%s: Failed to initialize gpio device (%d)\n",
- dev_name(&gdev->dev), ret);
+ dev_err(&gdev->dev,
+ "Failed to initialize gpio device (%d)\n", ret);
}
}
@@ -2461,32 +2478,37 @@ static void gpiochip_irq_relres(struct irq_data *d)
gpiochip_relres_irq(gc, d->hwirq);
}
+static void gpiochip_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ if (gc->irq.irq_mask)
+ gc->irq.irq_mask(d);
+ gpiochip_disable_irq(gc, d->hwirq);
+}
+
+static void gpiochip_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ gpiochip_enable_irq(gc, d->hwirq);
+ if (gc->irq.irq_unmask)
+ gc->irq.irq_unmask(d);
+}
+
static void gpiochip_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
gpiochip_enable_irq(gc, d->hwirq);
- if (gc->irq.irq_enable)
- gc->irq.irq_enable(d);
- else
- gc->irq.chip->irq_unmask(d);
+ gc->irq.irq_enable(d);
}
static void gpiochip_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- /*
- * Since we override .irq_disable() we need to mimic the
- * behaviour of __irq_disable() in irq/chip.c.
- * First call .irq_disable() if it exists, else mimic the
- * behaviour of mask_irq() which calls .irq_mask() if
- * it exists.
- */
- if (gc->irq.irq_disable)
- gc->irq.irq_disable(d);
- else if (gc->irq.chip->irq_mask)
- gc->irq.chip->irq_mask(d);
+ gc->irq.irq_disable(d);
gpiochip_disable_irq(gc, d->hwirq);
}
@@ -2511,10 +2533,22 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
"detected irqchip that is shared with multiple gpiochips: please fix the driver.\n");
return;
}
- gc->irq.irq_enable = irqchip->irq_enable;
- gc->irq.irq_disable = irqchip->irq_disable;
- irqchip->irq_enable = gpiochip_irq_enable;
- irqchip->irq_disable = gpiochip_irq_disable;
+
+ if (irqchip->irq_disable) {
+ gc->irq.irq_disable = irqchip->irq_disable;
+ irqchip->irq_disable = gpiochip_irq_disable;
+ } else {
+ gc->irq.irq_mask = irqchip->irq_mask;
+ irqchip->irq_mask = gpiochip_irq_mask;
+ }
+
+ if (irqchip->irq_enable) {
+ gc->irq.irq_enable = irqchip->irq_enable;
+ irqchip->irq_enable = gpiochip_irq_enable;
+ } else {
+ gc->irq.irq_unmask = irqchip->irq_unmask;
+ irqchip->irq_unmask = gpiochip_irq_unmask;
+ }
}
/**
@@ -2702,7 +2736,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gc,
return -EINVAL;
if (!gc->parent) {
- pr_err("missing gpiochip .dev parent pointer\n");
+ chip_err(gc, "missing gpiochip .dev parent pointer\n");
return -EINVAL;
}
gc->irq.threaded = threaded;
@@ -2752,6 +2786,26 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gc,
}
EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
+/**
+ * gpiochip_irqchip_add_domain() - adds an irqdomain to a gpiochip
+ * @gc: the gpiochip to add the irqchip to
+ * @domain: the irqdomain to add to the gpiochip
+ *
+ * This function adds an IRQ domain to the gpiochip.
+ */
+int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ struct irq_domain *domain)
+{
+ if (!domain)
+ return -EINVAL;
+
+ gc->to_irq = gpiochip_to_irq;
+ gc->irq.domain = domain;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_domain);
+
#else /* CONFIG_GPIOLIB_IRQCHIP */
static inline int gpiochip_add_irqchip(struct gpio_chip *gc,
@@ -4653,7 +4707,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
if (!table)
return desc;
- for (p = &table->table[0]; p->chip_label; p++) {
+ for (p = &table->table[0]; p->key; p++) {
struct gpio_chip *gc;
/* idx must always match exactly */
@@ -4664,18 +4718,30 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
if (p->con_id && (!con_id || strcmp(p->con_id, con_id)))
continue;
- gc = find_chip_by_name(p->chip_label);
+ if (p->chip_hwnum == U16_MAX) {
+ desc = gpio_name_to_desc(p->key);
+ if (desc) {
+ *flags = p->flags;
+ return desc;
+ }
+
+ dev_warn(dev, "cannot find GPIO line %s, deferring\n",
+ p->key);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ gc = find_chip_by_name(p->key);
if (!gc) {
/*
* As the lookup table indicates a chip with
- * p->chip_label should exist, assume it may
+ * p->key should exist, assume it may
* still appear later and let the interested
* consumer be probed again or let the Deferred
* Probe infrastructure handle the error.
*/
dev_warn(dev, "cannot find GPIO chip %s, deferring\n",
- p->chip_label);
+ p->key);
return ERR_PTR(-EPROBE_DEFER);
}
@@ -4706,7 +4772,7 @@ static int platform_gpio_count(struct device *dev, const char *con_id)
if (!table)
return -ENOENT;
- for (p = &table->table[0]; p->chip_label; p++) {
+ for (p = &table->table[0]; p->key; p++) {
if ((con_id && p->con_id && !strcmp(con_id, p->con_id)) ||
(!con_id && !p->con_id))
count++;
@@ -4877,7 +4943,7 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
- pr_debug("no flags found for %s\n", con_id);
+ gpiod_dbg(desc, "no flags found for %s\n", con_id);
return 0;
}
@@ -5108,8 +5174,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
/* Mark GPIO as hogged so it can be identified and removed later */
set_bit(FLAG_IS_HOGGED, &desc->flags);
- pr_info("GPIO line %d (%s) hogged as %s%s\n",
- desc_to_gpio(desc), name,
+ gpiod_info(desc, "hogged as %s%s\n",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ?
(dflags & GPIOD_FLAGS_BIT_DIR_VAL) ? "/high" : "/low" : "");
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 853ce681b4a4..9ed242316414 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -81,8 +81,7 @@ struct gpio_array {
unsigned long invert_mask[];
};
-struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
- unsigned int hwnum);
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum);
int gpiod_get_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
@@ -163,18 +162,18 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
/* With chip prefix */
-#define chip_emerg(chip, fmt, ...) \
- dev_emerg(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_crit(chip, fmt, ...) \
- dev_crit(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_err(chip, fmt, ...) \
- dev_err(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_warn(chip, fmt, ...) \
- dev_warn(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_info(chip, fmt, ...) \
- dev_info(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_dbg(chip, fmt, ...) \
- dev_dbg(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
+#define chip_emerg(gc, fmt, ...) \
+ dev_emerg(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_crit(gc, fmt, ...) \
+ dev_crit(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_err(gc, fmt, ...) \
+ dev_err(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_warn(gc, fmt, ...) \
+ dev_warn(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_info(gc, fmt, ...) \
+ dev_info(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_dbg(gc, fmt, ...) \
+ dev_dbg(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
#ifdef CONFIG_GPIO_SYSFS
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 43594978958e..c4fd57d8b717 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -161,7 +161,7 @@ config DRM_LOAD_EDID_FIRMWARE
monitor are unable to provide appropriate EDID data. Since this
feature is provided as a workaround for broken hardware, the
default case is N. Details and instructions how to build your own
- EDID data are given in Documentation/driver-api/edid.rst.
+ EDID data are given in Documentation/admin-guide/edid.rst.
config DRM_DP_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
@@ -310,8 +310,6 @@ source "drivers/gpu/drm/ast/Kconfig"
source "drivers/gpu/drm/mgag200/Kconfig"
-source "drivers/gpu/drm/cirrus/Kconfig"
-
source "drivers/gpu/drm/armada/Kconfig"
source "drivers/gpu/drm/atmel-hlcdc/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 7f72ef5e7811..2c0e5a7e5953 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -17,7 +17,8 @@ drm-y := drm_auth.o drm_cache.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
- drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o
+ drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
+ drm_managed.o
drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
@@ -32,8 +33,7 @@ drm-$(CONFIG_PCI) += drm_pci.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_vram_helper-y := drm_gem_vram_helper.o \
- drm_vram_helper_common.o
+drm_vram_helper-y := drm_gem_vram_helper.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
drm_ttm_helper-y := drm_gem_ttm_helper.o
@@ -74,7 +74,6 @@ obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_MGAG200) += mgag200/
obj-$(CONFIG_DRM_V3D) += v3d/
obj-$(CONFIG_DRM_VC4) += vc4/
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c2bbcdd9c875..210d57a4afc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -55,7 +55,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
- amdgpu_umc.o smu_v11_0_i2c.o
+ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8ac1581a6b53..cd913986863e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -28,6 +28,18 @@
#ifndef __AMDGPU_H__
#define __AMDGPU_H__
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "amdgpu: " fmt
+
+#ifdef dev_fmt
+#undef dev_fmt
+#endif
+
+#define dev_fmt(fmt) "amdgpu: " fmt
+
#include "amdgpu_ctx.h"
#include <linux/atomic.h>
@@ -161,6 +173,7 @@ extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern uint amdgpu_dc_feature_mask;
+extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dm_abm_level;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
@@ -177,6 +190,8 @@ extern int sched_policy;
static const int sched_policy = KFD_SCHED_POLICY_HWS;
#endif
+extern int amdgpu_tmz;
+
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
#endif
@@ -190,8 +205,6 @@ extern int amdgpu_cik_support;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
-/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
-#define AMDGPU_IB_POOL_SIZE 16
#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
#define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 16
@@ -439,7 +452,9 @@ struct amdgpu_fpriv {
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned size, struct amdgpu_ib *ib);
+ unsigned size,
+ enum amdgpu_ib_pool_type pool,
+ struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
@@ -512,7 +527,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
/*
* Writeback
*/
-#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 256 /* Reserve at most 256 WB slots for amdgpu-owned rings. */
struct amdgpu_wb {
struct amdgpu_bo *wb_obj;
@@ -724,6 +739,7 @@ struct amdgpu_device {
uint32_t rev_id;
uint32_t external_rev_id;
unsigned long flags;
+ unsigned long apu_flags;
int usec_timeout;
const struct amdgpu_asic_funcs *asic_funcs;
bool shutdown;
@@ -751,7 +767,6 @@ struct amdgpu_device {
uint8_t *bios;
uint32_t bios_size;
struct amdgpu_bo *stolen_vga_memory;
- struct amdgpu_bo *discovery_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -843,7 +858,8 @@ struct amdgpu_device {
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
bool ib_pool_ready;
- struct amdgpu_sa_manager ring_tmp_bo;
+ struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
+ struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
/* interrupts */
struct amdgpu_irq irq;
@@ -903,7 +919,9 @@ struct amdgpu_device {
struct amdgpu_display_manager dm;
/* discovery */
- uint8_t *discovery;
+ uint8_t *discovery_bin;
+ uint32_t discovery_tmr_size;
+ struct amdgpu_bo *discovery_memory;
/* mes */
bool enable_mes;
@@ -923,7 +941,7 @@ struct amdgpu_device {
atomic64_t gart_pin_size;
/* soc15 register offset based on ip, instance and segment */
- uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
+ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
@@ -935,9 +953,6 @@ struct amdgpu_device {
/* link all shadow bo */
struct list_head shadow_list;
struct mutex shadow_list_lock;
- /* keep an lru list of rings by HW IP */
- struct list_head ring_lru_list;
- spinlock_t ring_lru_list_lock;
/* record hw reset is performed */
bool has_hw_reset;
@@ -947,8 +962,6 @@ struct amdgpu_device {
bool in_suspend;
bool in_hibernate;
- /* record last mm index being written through WREG32*/
- unsigned long last_mm_index;
bool in_gpu_reset;
enum pp_mp1_state mp1_state;
struct mutex lock_reset;
@@ -967,14 +980,19 @@ struct amdgpu_device {
uint64_t unique_id;
uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
- /* device pstate */
- int pstate;
/* enable runtime pm on the device */
bool runpm;
bool in_runpm;
bool pm_sysfs_en;
bool ucode_sysfs_en;
+
+ /* Chip product information */
+ char product_number[16];
+ char product_name[32];
+ char serial[16];
+
+ struct amdgpu_autodump autodump;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -991,10 +1009,10 @@ int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t *buf, size_t size, bool write);
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t acc_flags);
+void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags);
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags);
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
@@ -1011,25 +1029,20 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
/*
* Registers read & write functions.
*/
-
-#define AMDGPU_REGS_IDX (1<<0)
#define AMDGPU_REGS_NO_KIQ (1<<1)
-#define AMDGPU_REGS_KIQ (1<<2)
-#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
-#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
+#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
-#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ)
-#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ)
+#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg))
+#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v))
#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
-#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
-#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
-#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
-#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
+#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
+#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
@@ -1066,7 +1079,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
tmp_ |= ((val) & ~(mask)); \
WREG32_PLL(reg, tmp_); \
} while (0)
-#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
+#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
@@ -1249,5 +1262,9 @@ _name##_show(struct device *dev, \
\
static struct device_attribute pmu_attr_##_name = __ATTR_RO(_name)
-#endif
+static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
+{
+ return adev->gmc.tmz_enabled;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 1e41367ef74e..956cbbda4793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -444,7 +444,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
- /* todo: add DC handling */
if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
!amdgpu_device_has_dc_support(adev)) {
struct amdgpu_encoder *enc = atif->encoder_for_bl;
@@ -463,6 +462,27 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
#endif
}
}
+#if defined(CONFIG_DRM_AMD_DC)
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+ if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
+ amdgpu_device_has_dc_support(adev)) {
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct backlight_device *bd = dm->backlight_dev;
+
+ if (bd) {
+ DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+ req.backlight_level);
+
+ /*
+ * XXX backlight_device_set_brightness() is
+ * hardwired to post BACKLIGHT_UPDATE_SYSFS.
+ * It probably should accept 'reason' parameter.
+ */
+ backlight_device_set_brightness(bd, req.backlight_level);
+ }
+ }
+#endif
+#endif
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if (adev->flags & AMD_IS_PX) {
pm_runtime_get_sync(adev->ddev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index abfbe89e805e..ad59ac4423b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -564,6 +564,13 @@ uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
return adev->gds.gws_size;
}
+uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return adev->rev_id;
+}
+
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 13feb313e9b3..142746836838 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/mm.h>
+#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -65,6 +66,7 @@ struct kgd_mem {
struct amdgpu_sync sync;
bool aql_queue;
+ bool is_imported;
};
/* KFD Memory Eviction */
@@ -148,6 +150,9 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
+int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
+ int queue_bit);
+
/* Shared API */
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
@@ -175,13 +180,14 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
+uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
* this is called in hqd_load functions, so it should never fault in
* the first place. This resolves a circular lock dependency involving
- * four locks, including the DQM lock and mmap_sem.
+ * four locks, including the DQM lock and mmap_lock.
*/
#define read_user_wptr(mmptr, wptr, dst) \
({ \
@@ -191,9 +197,9 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
if ((mmptr) == current->mm) { \
valid = !get_user((dst), (wptr)); \
} else if (current->mm == NULL) { \
- use_mm(mmptr); \
+ kthread_use_mm(mmptr); \
valid = !get_user((dst), (wptr)); \
- unuse_mm(mmptr); \
+ kthread_unuse_mm(mmptr); \
} \
pagefault_enable(); \
} \
@@ -218,7 +224,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
void *vm, struct kgd_mem **mem,
uint64_t *offset, uint32_t flags);
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem);
+ struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 6529caca88fe..35d4a5ab0228 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/mmu_context.h>
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 4ec6d0c03201..bf927f432506 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -19,7 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gc/gc_10_1_0_offset.h"
@@ -543,6 +542,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp;
struct v10_compute_mqd *m = get_mqd(mqd);
+ if (adev->in_gpu_reset)
+ return -EIO;
+
#if 0
unsigned long flags;
int retry;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 0b7e78748540..744366c7ee85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
-
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "cikd.h"
@@ -237,7 +235,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- /* read_user_ptr may take the mm->mmap_sem.
+ /* read_user_ptr may take the mm->mmap_lock.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index ccd635b812b5..feab4cc6e836 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
-
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gfx_v8_0.h"
@@ -224,7 +222,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- /* read_user_ptr may take the mm->mmap_sem.
+ /* read_user_ptr may take the mm->mmap_lock.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index df841c2ac5e7..c7fd0c47b254 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -19,8 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
-
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gc/gc_9_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 6a5b91d23fd9..b91b5171270f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -362,13 +362,13 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
&param);
if (ret) {
- pr_err("amdgpu: failed to validate PT BOs\n");
+ pr_err("failed to validate PT BOs\n");
return ret;
}
ret = amdgpu_amdkfd_validate(&param, pd);
if (ret) {
- pr_err("amdgpu: failed to validate PD\n");
+ pr_err("failed to validate PD\n");
return ret;
}
@@ -377,7 +377,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
if (vm->use_cpu_for_update) {
ret = amdgpu_bo_kmap(pd, NULL);
if (ret) {
- pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
+ pr_err("failed to kmap PD, ret=%d\n", ret);
return ret;
}
}
@@ -660,15 +660,15 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates);
- if (!ret)
- ctx->reserved = true;
- else {
- pr_err("Failed to reserve buffers in ttm\n");
+ if (ret) {
+ pr_err("Failed to reserve buffers in ttm.\n");
kfree(ctx->vm_pd);
ctx->vm_pd = NULL;
+ return ret;
}
- return ret;
+ ctx->reserved = true;
+ return 0;
}
/**
@@ -733,17 +733,15 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates);
- if (!ret)
- ctx->reserved = true;
- else
- pr_err("Failed to reserve buffers in ttm.\n");
-
if (ret) {
+ pr_err("Failed to reserve buffers in ttm.\n");
kfree(ctx->vm_pd);
ctx->vm_pd = NULL;
+ return ret;
}
- return ret;
+ ctx->reserved = true;
+ return 0;
}
/**
@@ -1279,31 +1277,30 @@ err:
}
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem)
+ struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
{
struct amdkfd_process_info *process_info = mem->process_info;
unsigned long bo_size = mem->bo->tbo.mem.size;
struct kfd_bo_va_list *entry, *tmp;
struct bo_vm_reservation_context ctx;
struct ttm_validate_buffer *bo_list_entry;
+ unsigned int mapped_to_gpu_memory;
int ret;
+ bool is_imported = 0;
mutex_lock(&mem->lock);
-
- if (mem->mapped_to_gpu_memory > 0) {
- pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
- mem->va, bo_size);
- mutex_unlock(&mem->lock);
- return -EBUSY;
- }
-
+ mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
+ is_imported = mem->is_imported;
mutex_unlock(&mem->lock);
/* lock is not needed after this, since mem is unused and will
* be freed anyway
*/
- /* No more MMU notifiers */
- amdgpu_mn_unregister(mem->bo);
+ if (mapped_to_gpu_memory > 0) {
+ pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
+ mem->va, bo_size);
+ return -EBUSY;
+ }
/* Make sure restore workers don't access the BO any more */
bo_list_entry = &mem->validate_list;
@@ -1311,6 +1308,9 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
list_del(&bo_list_entry->head);
mutex_unlock(&process_info->lock);
+ /* No more MMU notifiers */
+ amdgpu_mn_unregister(mem->bo);
+
ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
if (unlikely(ret))
return ret;
@@ -1342,6 +1342,17 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
kfree(mem->bo->tbo.sg);
}
+ /* Update the size of the BO being freed if it was allocated from
+ * VRAM and is not imported.
+ */
+ if (size) {
+ if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
+ (!is_imported))
+ *size = bo_size;
+ else
+ *size = 0;
+ }
+
/* Free the BO*/
drm_gem_object_put_unlocked(&mem->bo->tbo.base);
mutex_destroy(&mem->lock);
@@ -1382,9 +1393,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* concurrently and the queues are actually stopped
*/
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
is_invalid_userptr = atomic_read(&mem->invalid);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
mutex_lock(&mem->lock);
@@ -1697,6 +1708,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
(*mem)->process_info = avm->process_info;
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
amdgpu_sync_create(&(*mem)->sync);
+ (*mem)->is_imported = true;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index d1495e1c9289..d9b35df33806 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
- false, false);
+ false, false, false);
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 031b094607bd..78ac6dbe70d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -60,8 +60,6 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
{
CGS_FUNC_ADEV;
switch (space) {
- case CGS_IND_REG__MMIO:
- return RREG32_IDX(index);
case CGS_IND_REG__PCIE:
return RREG32_PCIE(index);
case CGS_IND_REG__SMC:
@@ -77,6 +75,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return 0;
+ default:
+ BUG();
}
WARN(1, "Invalid indirect register space");
return 0;
@@ -88,8 +88,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
{
CGS_FUNC_ADEV;
switch (space) {
- case CGS_IND_REG__MMIO:
- return WREG32_IDX(index, value);
case CGS_IND_REG__PCIE:
return WREG32_PCIE(index, value);
case CGS_IND_REG__SMC:
@@ -105,6 +103,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return;
+ default:
+ BUG();
}
WARN(1, "Invalid indirect register space");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index af91627b19b0..19070226a945 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -924,7 +924,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
ring = to_amdgpu_ring(entity->rq->sched);
r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
- chunk_ib->ib_bytes : 0, ib);
+ chunk_ib->ib_bytes : 0,
+ AMDGPU_IB_POOL_DELAYED, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@@ -1207,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_sched_entity *entity = p->entity;
- enum drm_sched_priority priority;
struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
uint64_t seq;
@@ -1257,7 +1257,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job);
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
- priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6ed36a2c5f73..8842c55d4490 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_sched.h"
#include "amdgpu_ras.h"
+#include <linux/nospec.h>
#define to_amdgpu_ctx_entity(e) \
container_of((e), struct amdgpu_ctx_entity, entity)
@@ -72,13 +73,30 @@ static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sch
}
}
-static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
+static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
+ enum drm_sched_priority prio,
+ u32 hw_ip)
+{
+ unsigned int hw_prio;
+
+ hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
+ amdgpu_ctx_sched_prio_to_compute_prio(prio) :
+ AMDGPU_RING_PRIO_DEFAULT;
+ hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
+ if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
+ hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+
+ return hw_prio;
+}
+
+static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
+ const u32 ring)
{
struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0;
- enum gfx_pipe_priority hw_prio;
+ unsigned int hw_prio;
enum drm_sched_priority priority;
int r;
@@ -90,52 +108,16 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
entity->sequence = 1;
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
- switch (hw_ip) {
- case AMDGPU_HW_IP_GFX:
- sched = &adev->gfx.gfx_ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_COMPUTE:
- hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
- scheds = adev->gfx.compute_prio_sched[hw_prio];
- num_scheds = adev->gfx.num_compute_sched[hw_prio];
- break;
- case AMDGPU_HW_IP_DMA:
- scheds = adev->sdma.sdma_sched;
- num_scheds = adev->sdma.num_sdma_sched;
- break;
- case AMDGPU_HW_IP_UVD:
- sched = &adev->uvd.inst[0].ring.sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCE:
- sched = &adev->vce.ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- sched = &adev->uvd.inst[0].ring_enc[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_DEC:
- sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched,
- adev->vcn.num_vcn_dec_sched);
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_ENC:
- sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched,
- adev->vcn.num_vcn_enc_sched);
+ hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
+
+ hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
+ scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+ num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+
+ if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) {
+ sched = drm_sched_pick_best(scheds, num_scheds);
scheds = &sched;
num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_JPEG:
- scheds = adev->jpeg.jpeg_sched;
- num_scheds = adev->jpeg.num_jpeg_sched;
- break;
}
r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
@@ -178,7 +160,6 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
return 0;
-
}
static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
@@ -525,7 +506,7 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
{
struct amdgpu_device *adev = ctx->adev;
- enum gfx_pipe_priority hw_prio;
+ unsigned int hw_prio;
struct drm_gpu_scheduler **scheds = NULL;
unsigned num_scheds;
@@ -534,9 +515,11 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
/* set hw priority */
if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
- hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
- scheds = adev->gfx.compute_prio_sched[hw_prio];
- num_scheds = adev->gfx.num_compute_sched[hw_prio];
+ hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
+ AMDGPU_HW_IP_COMPUTE);
+ hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
+ scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+ num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
drm_sched_entity_modify_sched(&aentity->entity, scheds,
num_scheds);
}
@@ -665,78 +648,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
-
-
-static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
-{
- int num_compute_sched_normal = 0;
- int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
- int i;
-
- /* use one drm sched array, gfx.compute_sched to store both high and
- * normal priority drm compute schedulers */
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- if (!adev->gfx.compute_ring[i].has_high_prio)
- adev->gfx.compute_sched[num_compute_sched_normal++] =
- &adev->gfx.compute_ring[i].sched;
- else
- adev->gfx.compute_sched[num_compute_sched_high--] =
- &adev->gfx.compute_ring[i].sched;
- }
-
- /* compute ring only has two priority for now */
- i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
- adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
- adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
-
- i = AMDGPU_GFX_PIPE_PRIO_HIGH;
- if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
- /* When compute has no high priority rings then use */
- /* normal priority sched array */
- adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
- adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
- } else {
- adev->gfx.compute_prio_sched[i] =
- &adev->gfx.compute_sched[num_compute_sched_high - 1];
- adev->gfx.num_compute_sched[i] =
- adev->gfx.num_compute_rings - num_compute_sched_normal;
- }
-}
-
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
-{
- int i, j;
-
- amdgpu_ctx_init_compute_sched(adev);
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
- adev->gfx.num_gfx_sched++;
- }
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
- adev->sdma.num_sdma_sched++;
- }
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
- &adev->vcn.inst[i].ring_dec.sched;
- }
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j)
- adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
- &adev->vcn.inst[i].ring_enc[j].sched;
- }
-
- for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- if (adev->jpeg.harvest_config & (1 << i))
- continue;
- adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
- &adev->jpeg.inst[i].ring_dec.sched;
- }
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index de490f183af2..f54e10314661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
-
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index c0f9a651dc06..d33cb344be69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -27,7 +27,7 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
-
+#include <linux/poll.h>
#include <drm/drm_debugfs.h>
#include "amdgpu.h"
@@ -74,8 +74,82 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
return 0;
}
+int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned long timeout = 600 * HZ;
+ int ret;
+
+ wake_up_interruptible(&adev->autodump.gpu_hang);
+
+ ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
+ if (ret == 0) {
+ pr_err("autodump: timeout, move on to gpu recovery\n");
+ return -ETIMEDOUT;
+ }
+#endif
+ return 0;
+}
+
#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
+{
+ struct amdgpu_device *adev = inode->i_private;
+ int ret;
+
+ file->private_data = adev;
+
+ mutex_lock(&adev->lock_reset);
+ if (adev->autodump.dumping.done) {
+ reinit_completion(&adev->autodump.dumping);
+ ret = 0;
+ } else {
+ ret = -EBUSY;
+ }
+ mutex_unlock(&adev->lock_reset);
+
+ return ret;
+}
+
+static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
+{
+ struct amdgpu_device *adev = file->private_data;
+
+ complete_all(&adev->autodump.dumping);
+ return 0;
+}
+
+static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
+{
+ struct amdgpu_device *adev = file->private_data;
+
+ poll_wait(file, &adev->autodump.gpu_hang, poll_table);
+
+ if (adev->in_gpu_reset)
+ return POLLIN | POLLRDNORM | POLLWRNORM;
+
+ return 0;
+}
+
+static const struct file_operations autodump_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_debugfs_autodump_open,
+ .poll = amdgpu_debugfs_autodump_poll,
+ .release = amdgpu_debugfs_autodump_release,
+};
+
+static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
+{
+ init_completion(&adev->autodump.dumping);
+ complete_all(&adev->autodump.dumping);
+ init_waitqueue_head(&adev->autodump.gpu_hang);
+
+ debugfs_create_file("amdgpu_autodump", 0600,
+ adev->ddev->primary->debugfs_root,
+ adev, &autodump_debug_fops);
+}
+
/**
* amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
*
@@ -152,11 +226,16 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
}
mutex_lock(&adev->grbm_idx_mutex);
@@ -207,6 +286,7 @@ end:
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -255,6 +335,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -263,6 +347,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -275,6 +360,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -304,6 +390,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -311,6 +401,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -325,6 +416,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -354,6 +446,10 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -362,6 +458,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -374,6 +471,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -403,6 +501,10 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -410,6 +512,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -424,6 +527,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -453,6 +557,10 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -461,6 +569,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -473,6 +582,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -502,6 +612,10 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -509,6 +623,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -523,6 +638,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -651,16 +767,24 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
- if (r)
+ if (r) {
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
+ }
- if (size > valuesize)
+ if (size > valuesize) {
+ amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
+ }
outsize = 0;
x = 0;
@@ -673,6 +797,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
}
}
+ amdgpu_virt_disable_access_debugfs(adev);
return !r ? outsize : r;
}
@@ -720,6 +845,10 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -734,16 +863,20 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
- if (!x)
+ if (!x) {
+ amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
+ }
while (size && (offset < x * 4)) {
uint32_t value;
value = data[offset >> 2];
r = put_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
+ }
result += 4;
buf += 4;
@@ -751,6 +884,7 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
size -= 4;
}
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -805,6 +939,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -840,6 +978,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
err:
kfree(data);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -1369,6 +1508,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_ras_debugfs_create_all(adev);
+ amdgpu_debugfs_autodump_init(adev);
+
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
ARRAY_SIZE(amdgpu_debugfs_list));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index de12d1101526..2803884d338d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -31,6 +31,11 @@ struct amdgpu_debugfs {
unsigned num_files;
};
+struct amdgpu_autodump {
+ struct completion dumping;
+ struct wait_queue_head gpu_hang;
+};
+
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
void amdgpu_debugfs_fini(struct amdgpu_device *adev);
@@ -40,3 +45,4 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
int amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index affde2de2a0d..a027a8f7b281 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -64,9 +64,11 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
#include "amdgpu_pmu.h"
+#include "amdgpu_fru_eeprom.h"
#include <linux/suspend.h>
#include <drm/task_barrier.h>
+#include <linux/pm_runtime.h>
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -138,6 +140,72 @@ static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
/**
+ * DOC: product_name
+ *
+ * The amdgpu driver provides a sysfs API for reporting the product name
+ * for the device
+ * The file serial_number is used for this and returns the product name
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_product_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
+}
+
+static DEVICE_ATTR(product_name, S_IRUGO,
+ amdgpu_device_get_product_name, NULL);
+
+/**
+ * DOC: product_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the part number
+ * for the device
+ * The file serial_number is used for this and returns the part number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_product_number(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
+}
+
+static DEVICE_ATTR(product_number, S_IRUGO,
+ amdgpu_device_get_product_number, NULL);
+
+/**
+ * DOC: serial_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the serial number
+ * for the device
+ * The file serial_number is used for this and returns the serial number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_serial_number(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
+}
+
+static DEVICE_ATTR(serial_number, S_IRUGO,
+ amdgpu_device_get_serial_number, NULL);
+
+/**
* amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
*
* @dev: drm_device pointer
@@ -231,10 +299,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
}
/*
- * MMIO register access helper functions.
+ * device register access helper functions.
*/
/**
- * amdgpu_mm_rreg - read a memory mapped IO register
+ * amdgpu_device_rreg - read a register
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
@@ -242,25 +310,19 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
*
* Returns the 32 bit value from the offset specified.
*/
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
- uint32_t acc_flags)
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t acc_flags)
{
uint32_t ret;
- if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_kiq_rreg(adev, reg);
- if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+ if ((reg * 4) < adev->rmmio_size)
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- }
- trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+ else
+ ret = adev->pcie_rreg(adev, (reg * 4));
+ trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
return ret;
}
@@ -306,28 +368,19 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
BUG();
}
-void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
+void static inline amdgpu_device_wreg_no_kiq(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t v, uint32_t acc_flags)
{
- trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+ trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
- if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+ if ((reg * 4) < adev->rmmio_size)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- }
-
- if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
- udelay(500);
- }
+ else
+ adev->pcie_wreg(adev, (reg * 4), v);
}
/**
- * amdgpu_mm_wreg - write to a memory mapped IO register
+ * amdgpu_device_wreg - write to a register
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
@@ -336,17 +389,13 @@ void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg,
*
* Writes the value specified to the offset specified.
*/
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags)
+void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+ uint32_t acc_flags)
{
- if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
- adev->last_mm_index = v;
- }
-
- if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_kiq_wreg(adev, reg, v);
- amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+ amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
}
/*
@@ -365,7 +414,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t
return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
}
- amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+ amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
}
/**
@@ -397,20 +446,12 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
*/
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
- if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
- adev->last_mm_index = v;
- }
-
if ((reg * 4) < adev->rio_mem_size)
iowrite32(v, adev->rio_mem + (reg * 4));
else {
iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
}
-
- if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
- udelay(500);
- }
}
/**
@@ -1126,6 +1167,8 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
+ amdgpu_gmc_tmz_set(adev);
+
return 0;
}
@@ -1147,7 +1190,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
return;
if (state == VGA_SWITCHEROO_ON) {
- pr_info("amdgpu: switched on\n");
+ pr_info("switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -1161,7 +1204,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
- pr_info("amdgpu: switched off\n");
+ pr_info("switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_suspend(dev, true);
@@ -1524,9 +1567,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "vega12";
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
@@ -1574,8 +1617,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
+ amdgpu_discovery_get_gfx_info(adev);
goto parse_soc_bounding_box;
+ }
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
@@ -1721,19 +1766,31 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = amdgpu_device_parse_gpu_info_fw(adev);
- if (r)
- return r;
-
- if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
- amdgpu_discovery_get_gfx_info(adev);
-
amdgpu_amdkfd_device_probe(adev);
if (amdgpu_sriov_vf(adev)) {
+ /* handle vbios stuff prior full access mode for new handshake */
+ if (adev->virt.req_init_data_ver == 1) {
+ if (!amdgpu_get_bios(adev)) {
+ DRM_ERROR("failed to get vbios\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_atombios_init(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_atombios_init failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
+ return r;
+ }
+ }
+ }
+
+ /* we need to send REQ_GPU here for legacy handshaker otherwise the vbios
+ * will not be prepared by host for this VF */
+ if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver < 1) {
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
- return -EAGAIN;
+ return r;
}
adev->pm.pp_feature = amdgpu_pp_feature_mask;
@@ -1763,6 +1820,14 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
}
/* get the vbios after the asic_funcs are set up */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+ r = amdgpu_device_parse_gpu_info_fw(adev);
+ if (r)
+ return r;
+
+ /* skip vbios handling for new handshake */
+ if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver == 1)
+ continue;
+
/* Read BIOS */
if (!amdgpu_get_bios(adev))
return -EINVAL;
@@ -1889,6 +1954,12 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
return r;
+ if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver > 0) {
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ if (r)
+ return -EAGAIN;
+ }
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -1975,6 +2046,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
+ amdgpu_fru_get_product_info(adev);
+
init_failed:
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, true);
@@ -2171,6 +2244,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = true;
}
+ amdgpu_ras_set_error_query_ready(adev, true);
+
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
@@ -2203,7 +2278,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (gpu_instance->adev->flags & AMD_IS_APU)
continue;
- r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
+ r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
+ AMDGPU_XGMI_PSTATE_MIN);
if (r) {
DRM_ERROR("pstate setting failed (%d).\n", r);
break;
@@ -2785,12 +2861,12 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
* By default timeout for non compute jobs is 10000.
* And there is no timeout enforced on compute jobs.
* In SR-IOV or passthrough mode, timeout for compute
- * jobs are 10000 by default.
+ * jobs are 60000 by default.
*/
adev->gfx_timeout = msecs_to_jiffies(10000);
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
- adev->compute_timeout = adev->gfx_timeout;
+ adev->compute_timeout = msecs_to_jiffies(60000);
else
adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
@@ -2841,6 +2917,14 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
return ret;
}
+static const struct attribute *amdgpu_dev_attributes[] = {
+ &dev_attr_product_name.attr,
+ &dev_attr_product_number.attr,
+ &dev_attr_serial_number.attr,
+ &dev_attr_pcie_replay_count.attr,
+ NULL
+};
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2942,9 +3026,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->shadow_list);
mutex_init(&adev->shadow_list_lock);
- INIT_LIST_HEAD(&adev->ring_lru_list);
- spin_lock_init(&adev->ring_lru_list_lock);
-
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -2953,7 +3034,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
adev->gfx.gfx_off_req_count = 1;
- adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
+ adev->pm.ac_power = power_supply_is_system_supplied() > 0;
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -3002,18 +3083,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
adev->enable_mes = true;
- if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
- r = amdgpu_discovery_init(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_discovery_init failed\n");
- return r;
- }
- }
-
- /* early init functions */
- r = amdgpu_device_ip_early_init(adev);
- if (r)
- return r;
+ /* detect hw virtualization here */
+ amdgpu_detect_virtualization(adev);
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
@@ -3021,6 +3092,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r;
}
+ /* early init functions */
+ r = amdgpu_device_ip_early_init(adev);
+ if (r)
+ return r;
+
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -3127,14 +3203,13 @@ fence_driver_init:
goto failed;
}
- DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
+ dev_info(adev->dev,
+ "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
adev->gfx.config.max_shader_engines,
adev->gfx.config.max_sh_per_se,
adev->gfx.config.max_cu_per_sh,
adev->gfx.cu_info.number);
- amdgpu_ctx_init_sched(adev);
-
adev->accel_working = true;
amdgpu_vm_check_compute_bug(adev);
@@ -3199,9 +3274,9 @@ fence_driver_init:
queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
- r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
+ r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (r) {
- dev_err(adev->dev, "Could not create pcie_replay_count");
+ dev_err(adev->dev, "Could not create amdgpu device attr\n");
return r;
}
@@ -3284,9 +3359,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
- device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
+
+ sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
@@ -3754,6 +3830,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
+ amdgpu_amdkfd_pre_reset(adev);
+
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
@@ -3848,6 +3926,8 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
int i, r = 0;
bool need_full_reset = *need_full_reset_arg;
+ amdgpu_debugfs_wait_dump(adev);
+
/* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -4052,6 +4132,64 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
mutex_unlock(&adev->lock_reset);
}
+static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
+{
+ struct pci_dev *p = NULL;
+
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, 1);
+ if (p) {
+ pm_runtime_enable(&(p->dev));
+ pm_runtime_resume(&(p->dev));
+ }
+}
+
+static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
+{
+ enum amd_reset_method reset_method;
+ struct pci_dev *p = NULL;
+ u64 expires;
+
+ /*
+ * For now, only BACO and mode1 reset are confirmed
+ * to suffer the audio issue without proper suspended.
+ */
+ reset_method = amdgpu_asic_reset_method(adev);
+ if ((reset_method != AMD_RESET_METHOD_BACO) &&
+ (reset_method != AMD_RESET_METHOD_MODE1))
+ return -EINVAL;
+
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, 1);
+ if (!p)
+ return -ENODEV;
+
+ expires = pm_runtime_autosuspend_expiration(&(p->dev));
+ if (!expires)
+ /*
+ * If we cannot get the audio device autosuspend delay,
+ * a fixed 4S interval will be used. Considering 3S is
+ * the audio controller default autosuspend delay setting.
+ * 4S used here is guaranteed to cover that.
+ */
+ expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
+
+ while (!pm_runtime_status_suspended(&(p->dev))) {
+ if (!pm_runtime_suspend(&(p->dev)))
+ break;
+
+ if (expires < ktime_get_mono_fast_ns()) {
+ dev_warn(adev->dev, "failed to suspend display audio\n");
+ /* TODO: abort the succeeding gpu reset? */
+ return -ETIMEDOUT;
+ }
+ }
+
+ pm_runtime_disable(&(p->dev));
+
+ return 0;
+}
+
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -4067,7 +4205,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job *job)
{
struct list_head device_list, *device_list_handle = NULL;
- bool need_full_reset, job_signaled;
+ bool need_full_reset = false;
+ bool job_signaled = false;
struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
@@ -4075,6 +4214,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
bool use_baco =
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
true : false;
+ bool audio_suspended = false;
/*
* Flush RAM to disk so that after reboot
@@ -4088,16 +4228,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
emergency_restart();
}
- need_full_reset = job_signaled = false;
- INIT_LIST_HEAD(&device_list);
-
dev_info(adev->dev, "GPU %s begin!\n",
(in_ras_intr && !use_baco) ? "jobs stop":"reset");
- cancel_delayed_work_sync(&adev->delayed_init_work);
-
- hive = amdgpu_get_xgmi_hive(adev, false);
-
/*
* Here we trylock to avoid chain of resets executing from
* either trigger by jobs on different adevs in XGMI hive or jobs on
@@ -4105,39 +4238,25 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* We always reset all schedulers for device and all devices for XGMI
* hive so that should take care of them too.
*/
-
+ hive = amdgpu_get_xgmi_hive(adev, true);
if (hive && !mutex_trylock(&hive->reset_lock)) {
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
job ? job->base.id : -1, hive->hive_id);
+ mutex_unlock(&hive->hive_lock);
return 0;
}
- /* Start with adev pre asic reset first for soft reset check.*/
- if (!amdgpu_device_lock_adev(adev, !hive)) {
- DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
- job ? job->base.id : -1);
- return 0;
- }
-
- /* Block kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_pre_reset(adev);
-
- /* Build list of devices to reset */
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- if (!hive) {
- /*unlock kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_post_reset(adev);
- amdgpu_device_unlock_adev(adev);
+ /*
+ * Build list of devices to reset.
+ * In case we are in XGMI hive mode, resort the device list
+ * to put adev in the 1st position.
+ */
+ INIT_LIST_HEAD(&device_list);
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!hive)
return -ENODEV;
- }
-
- /*
- * In case we are in XGMI hive mode device reset is done for all the
- * nodes in the hive to retrain all XGMI links and hence the reset
- * sequence is executed in loop on all nodes.
- */
+ if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
+ list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
device_list_handle = &hive->device_list;
} else {
list_add_tail(&adev->gmc.xgmi.head, &device_list);
@@ -4146,19 +4265,40 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- if (tmp_adev != adev) {
- amdgpu_device_lock_adev(tmp_adev, false);
- if (!amdgpu_sriov_vf(tmp_adev))
- amdgpu_amdkfd_pre_reset(tmp_adev);
+ if (!amdgpu_device_lock_adev(tmp_adev, !hive)) {
+ DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
+ job ? job->base.id : -1);
+ mutex_unlock(&hive->hive_lock);
+ return 0;
}
/*
+ * Try to put the audio codec into suspend state
+ * before gpu reset started.
+ *
+ * Due to the power domain of the graphics device
+ * is shared with AZ power domain. Without this,
+ * we may change the audio hardware from behind
+ * the audio driver's back. That will trigger
+ * some audio codec errors.
+ */
+ if (!amdgpu_device_suspend_display_audio(tmp_adev))
+ audio_suspended = true;
+
+ amdgpu_ras_set_error_query_ready(tmp_adev, false);
+
+ cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
+
+ if (!amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_pre_reset(tmp_adev);
+
+ /*
* Mark these ASICs to be reseted as untracked first
* And add them back after reset completed
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- amdgpu_fbdev_set_suspend(adev, 1);
+ amdgpu_fbdev_set_suspend(tmp_adev, 1);
/* disable ras on ALL IPs */
if (!(in_ras_intr && !use_baco) &&
@@ -4178,7 +4318,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
}
}
-
if (in_ras_intr && !use_baco)
goto skip_sched_resume;
@@ -4189,30 +4328,14 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* job->base holds a reference to parent fence
*/
if (job && job->base.s_fence->parent &&
- dma_fence_is_signaled(job->base.s_fence->parent))
+ dma_fence_is_signaled(job->base.s_fence->parent)) {
job_signaled = true;
-
- if (job_signaled) {
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
}
-
- /* Guilty job will be freed after this*/
- r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
- if (r) {
- /*TODO Should we stop ?*/
- DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
- r, adev->ddev->unique);
- adev->asic_reset_res = r;
- }
-
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
-
- if (tmp_adev == adev)
- continue;
-
r = amdgpu_device_pre_asic_reset(tmp_adev,
NULL,
&need_full_reset);
@@ -4274,11 +4397,15 @@ skip_sched_resume:
/*unlock kfd: SRIOV would do it separately */
if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
+ if (audio_suspended)
+ amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unlock_adev(tmp_adev);
}
- if (hive)
+ if (hive) {
mutex_unlock(&hive->reset_lock);
+ mutex_unlock(&hive->hive_lock);
+ }
if (r)
dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
index 057f6ea645d7..61a26c15c8dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -52,9 +52,6 @@ struct amdgpu_df_funcs {
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
uint32_t ficadl_val, uint32_t ficadh_val);
- uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev,
- uint32_t df_inst);
- uint32_t (*get_df_inst_id)(struct amdgpu_device *adev);
};
struct amdgpu_df {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 27d8ae19a7a4..b5d6274952a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -23,9 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_discovery.h"
-#include "soc15_common.h"
#include "soc15_hw_ip.h"
-#include "nbio/nbio_2_3_offset.h"
#include "discovery.h"
#define mmRCC_CONFIG_MEMSIZE 0xde3
@@ -135,9 +133,10 @@ static int hw_id_map[MAX_HWIP] = {
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
{
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
- uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
+ uint64_t pos = vram_size - adev->discovery_tmr_size;
- amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->discovery_tmr_size, false);
return 0;
}
@@ -158,7 +157,7 @@ static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size
return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
}
-int amdgpu_discovery_init(struct amdgpu_device *adev)
+static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
@@ -169,17 +168,18 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
uint16_t checksum;
int r;
- adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
- if (!adev->discovery)
+ adev->discovery_tmr_size = DISCOVERY_TMR_SIZE;
+ adev->discovery_bin = kzalloc(adev->discovery_tmr_size, GFP_KERNEL);
+ if (!adev->discovery_bin)
return -ENOMEM;
- r = amdgpu_discovery_read_binary(adev, adev->discovery);
+ r = amdgpu_discovery_read_binary(adev, adev->discovery_bin);
if (r) {
DRM_ERROR("failed to read ip discovery binary\n");
goto out;
}
- bhdr = (struct binary_header *)adev->discovery;
+ bhdr = (struct binary_header *)adev->discovery_bin;
if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) {
DRM_ERROR("invalid ip discovery binary signature\n");
@@ -192,7 +192,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
size = bhdr->binary_size - offset;
checksum = bhdr->binary_checksum;
- if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
size, checksum)) {
DRM_ERROR("invalid ip discovery binary checksum\n");
r = -EINVAL;
@@ -202,7 +202,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[IP_DISCOVERY];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
- ihdr = (struct ip_discovery_header *)(adev->discovery + offset);
+ ihdr = (struct ip_discovery_header *)(adev->discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
DRM_ERROR("invalid ip discovery data table signature\n");
@@ -210,7 +210,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
ihdr->size, checksum)) {
DRM_ERROR("invalid ip discovery data table checksum\n");
r = -EINVAL;
@@ -220,9 +220,9 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[GC];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
- ghdr = (struct gpu_info_header *)(adev->discovery + offset);
+ ghdr = (struct gpu_info_header *)(adev->discovery_bin + offset);
- if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
ghdr->size, checksum)) {
DRM_ERROR("invalid gc data table checksum\n");
r = -EINVAL;
@@ -232,16 +232,16 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
return 0;
out:
- kfree(adev->discovery);
- adev->discovery = NULL;
+ kfree(adev->discovery_bin);
+ adev->discovery_bin = NULL;
return r;
}
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
- kfree(adev->discovery);
- adev->discovery = NULL;
+ kfree(adev->discovery_bin);
+ adev->discovery_bin = NULL;
}
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
@@ -257,14 +257,16 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
uint8_t num_base_address;
int hw_ip;
int i, j, k;
+ int r;
- if (!adev->discovery) {
- DRM_ERROR("ip discovery uninitialized\n");
- return -EINVAL;
+ r = amdgpu_discovery_init(adev);
+ if (r) {
+ DRM_ERROR("amdgpu_discovery_init failed\n");
+ return r;
}
- bhdr = (struct binary_header *)adev->discovery;
- ihdr = (struct ip_discovery_header *)(adev->discovery +
+ bhdr = (struct binary_header *)adev->discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
@@ -272,7 +274,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->discovery + die_offset);
+ dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -286,7 +288,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->discovery + ip_offset);
+ ip = (struct ip *)(adev->discovery_bin + ip_offset);
num_base_address = ip->num_base_address;
DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
@@ -335,24 +337,24 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
uint16_t num_ips;
int i, j;
- if (!adev->discovery) {
+ if (!adev->discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->discovery;
- ihdr = (struct ip_discovery_header *)(adev->discovery +
+ bhdr = (struct binary_header *)adev->discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->discovery + die_offset);
+ dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->discovery + ip_offset);
+ ip = (struct ip *)(adev->discovery_bin + ip_offset);
if (le16_to_cpu(ip->hw_id) == hw_id) {
if (major)
@@ -375,13 +377,13 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct gc_info_v1_0 *gc_info;
- if (!adev->discovery) {
+ if (!adev->discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->discovery;
- gc_info = (struct gc_info_v1_0 *)(adev->discovery +
+ bhdr = (struct binary_header *)adev->discovery_bin;
+ gc_info = (struct gc_info_v1_0 *)(adev->discovery_bin +
le16_to_cpu(bhdr->table_list[GC].offset));
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index ba78e15d9b05..d50d597c45ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -26,7 +26,6 @@
#define DISCOVERY_TMR_SIZE (64 << 10)
-int amdgpu_discovery_init(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 84cee27cd7ef..f7143d927b6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -523,7 +523,8 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
break;
case CHIP_RAVEN:
/* enable S/G on PCO and RV2 */
- if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
+ (adev->apu_flags & AMD_APU_IS_PICASSO))
domain |= AMDGPU_GEM_DOMAIN_GTT;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index ffeb20f11c07..43d8ed7dbd00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -38,6 +38,7 @@
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
+#include <linux/pci-p2pdma.h>
/**
* amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
@@ -179,6 +180,9 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
+ if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
+ attach->peer2peer = false;
+
if (attach->dev->driver == adev->dev->driver)
return 0;
@@ -272,14 +276,21 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct sg_table *sgt;
long r;
if (!bo->pin_count) {
- /* move buffer into GTT */
+ /* move buffer into GTT or VRAM */
struct ttm_operation_ctx ctx = { false, false };
+ unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
+ attach->peer2peer) {
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ domains |= AMDGPU_GEM_DOMAIN_VRAM;
+ }
+ amdgpu_bo_placement_from_domain(bo, domains);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return ERR_PTR(r);
@@ -289,20 +300,34 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
return ERR_PTR(-EBUSY);
}
- sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
- if (IS_ERR(sgt))
- return sgt;
-
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC))
- goto error_free;
+ switch (bo->tbo.mem.mem_type) {
+ case TTM_PL_TT:
+ sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+ bo->tbo.num_pages);
+ if (IS_ERR(sgt))
+ return sgt;
+
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
+ goto error_free;
+ break;
+
+ case TTM_PL_VRAM:
+ r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
+ dir, &sgt);
+ if (r)
+ return ERR_PTR(r);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
return sgt;
error_free:
sg_free_table(sgt);
kfree(sgt);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-EBUSY);
}
/**
@@ -318,9 +343,18 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- sg_free_table(sgt);
- kfree(sgt);
+ struct dma_buf *dma_buf = attach->dmabuf;
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ if (sgt->sgl->page_link) {
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
+ } else {
+ amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
+ }
}
/**
@@ -514,6 +548,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
}
static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
+ .allow_peer2peer = true,
.move_notify = amdgpu_dma_buf_move_notify
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index ba1bb95a3cf9..d2a105e3bf7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -856,7 +856,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
const char *name = pp_lib_thermal_controller_names[controller->ucType];
info.addr = controller->ucI2cAddress >> 1;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
}
} else {
DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
@@ -1188,3 +1188,13 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
return ret;
}
+
+int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
+{
+ struct smu_context *smu = &adev->smu;
+
+ if (is_support_sw_smu(adev))
+ return smu_allow_xgmi_power_down(smu, en);
+
+ return 0;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 936d85aa0fbc..6a8aae70a0e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -450,6 +450,7 @@ struct amdgpu_pm {
/* Used for I2C access to various EEPROMs on relevant ASICs */
struct i2c_adapter smu_i2c;
+ struct list_head pm_attr_list;
};
#define R600_SSTU_DFLT 0
@@ -538,4 +539,6 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
uint32_t cstate);
+int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index a735d79a717b..126e74758a34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -86,9 +86,10 @@
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
* - 3.36.0 - Allow reading more status registers on si/cik
* - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
+ * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 37
+#define KMS_DRIVER_MINOR 38
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -139,12 +140,14 @@ int amdgpu_emu_mode = 0;
uint amdgpu_smu_memory_pool_size = 0;
/* FBC (bit 0) disabled by default*/
uint amdgpu_dc_feature_mask = 0;
+uint amdgpu_dc_debug_mask = 0;
int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
int amdgpu_noretry;
int amdgpu_force_asic_type = -1;
+int amdgpu_tmz = 0;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
@@ -688,13 +691,12 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau
/**
* DOC: hws_gws_support(bool)
- * Whether HWS support gws barriers. Default value: false (not supported)
- * This will be replaced with a MEC firmware version check once firmware
- * is ready
+ * Assume that HWS supports GWS barriers regardless of what firmware version
+ * check says. Default value: false (rely on MEC2 firmware version check).
*/
bool hws_gws_support;
module_param(hws_gws_support, bool, 0444);
-MODULE_PARM_DESC(hws_gws_support, "MEC FW support gws barriers (false = not supported (Default), true = supported)");
+MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
/**
* DOC: queue_preemption_timeout_ms (int)
@@ -714,6 +716,13 @@ MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
/**
+ * DOC: dcdebugmask (uint)
+ * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ */
+MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
+module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
+
+/**
* DOC: abmlevel (uint)
* Override the default ABM (Adaptive Backlight Management) level used for DC
* enabled hardware. Requires DMCU to be supported and loaded.
@@ -729,6 +738,16 @@ uint amdgpu_dm_abm_level = 0;
MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
+/**
+ * DOC: tmz (int)
+ * Trusted Memory Zone (TMZ) is a method to protect data being written
+ * to or read from memory.
+ *
+ * The default value: 0 (off). TODO: change to auto till it is completed.
+ */
+MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto, 0 = off (default), 1 = on)");
+module_param_named(tmz, amdgpu_tmz, int, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1164,14 +1183,6 @@ static int amdgpu_pmops_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- /* GPU comes up enabled by the bios on resume */
- if (amdgpu_device_supports_boco(drm_dev) ||
- amdgpu_device_supports_baco(drm_dev)) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
-
return amdgpu_device_resume(drm_dev, true);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 7531527067df..d878fe7fee51 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -192,14 +192,22 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
* Used For polling fence.
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
+ uint32_t timeout)
{
uint32_t seq;
+ signed long r;
if (!s)
return -EINVAL;
seq = ++ring->fence_drv.sync_seq;
+ r = amdgpu_fence_wait_polling(ring,
+ seq - ring->fence_drv.num_fences_mask,
+ timeout);
+ if (r < 1)
+ return -ETIMEDOUT;
+
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
new file mode 100644
index 000000000000..815c072ac4da
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_i2c.h"
+#include "smu_v11_0_i2c.h"
+#include "atom.h"
+
+#define I2C_PRODUCT_INFO_ADDR 0xAC
+#define I2C_PRODUCT_INFO_ADDR_SIZE 0x2
+#define I2C_PRODUCT_INFO_OFFSET 0xC0
+
+bool is_fru_eeprom_supported(struct amdgpu_device *adev)
+{
+ /* TODO: Gaming SKUs don't have the FRU EEPROM.
+ * Use this hack to address hangs on modprobe on gaming SKUs
+ * until a proper solution can be implemented by only supporting
+ * the explicit chip IDs for VG20 Server cards
+ *
+ * TODO: Add list of supported Arcturus DIDs once confirmed
+ */
+ if ((adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a0) ||
+ (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a1) ||
+ (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a4))
+ return true;
+ return false;
+}
+
+int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
+ unsigned char *buff)
+{
+ int ret, size;
+ struct i2c_msg msg = {
+ .addr = I2C_PRODUCT_INFO_ADDR,
+ .flags = I2C_M_RD,
+ .buf = buff,
+ };
+ buff[0] = 0;
+ buff[1] = addrptr;
+ msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + 1;
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+
+ if (ret < 1) {
+ DRM_WARN("FRU: Failed to get size field");
+ return ret;
+ }
+
+ /* The size returned by the i2c requires subtraction of 0xC0 since the
+ * size apparently always reports as 0xC0+actual size.
+ */
+ size = buff[2] - I2C_PRODUCT_INFO_OFFSET;
+ /* Add 1 since address field was 1 byte */
+ buff[1] = addrptr + 1;
+
+ msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + size;
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+
+ if (ret < 1) {
+ DRM_WARN("FRU: Failed to get data field");
+ return ret;
+ }
+
+ return size;
+}
+
+int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
+{
+ unsigned char buff[34];
+ int addrptr = 0, size = 0;
+
+ if (!is_fru_eeprom_supported(adev))
+ return 0;
+
+ /* If algo exists, it means that the i2c_adapter's initialized */
+ if (!adev->pm.smu_i2c.algo) {
+ DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
+ return 0;
+ }
+
+ /* There's a lot of repetition here. This is due to the FRU having
+ * variable-length fields. To get the information, we have to find the
+ * size of each field, and then keep reading along and reading along
+ * until we get all of the data that we want. We use addrptr to track
+ * the address as we go
+ */
+
+ /* The first fields are all of size 1-byte, from 0-7 are offsets that
+ * contain information that isn't useful to us.
+ * Bytes 8-a are all 1-byte and refer to the size of the entire struct,
+ * and the language field, so just start from 0xb, manufacturer size
+ */
+ addrptr = 0xb;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
+ return size;
+ }
+
+ /* Increment the addrptr by the size of the field, and 1 due to the
+ * size field being 1 byte. This pattern continues below.
+ */
+ addrptr += size + 1;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU product name, ret:%d", size);
+ return size;
+ }
+
+ /* Product name should only be 32 characters. Any more,
+ * and something could be wrong. Cap it at 32 to be safe
+ */
+ if (size > 32) {
+ DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
+ size = 32;
+ }
+ /* Start at 2 due to buff using fields 0 and 1 for the address */
+ memcpy(adev->product_name, &buff[2], size);
+ adev->product_name[size] = '\0';
+
+ addrptr += size + 1;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU product number, ret:%d", size);
+ return size;
+ }
+
+ /* Product number should only be 16 characters. Any more,
+ * and something could be wrong. Cap it at 16 to be safe
+ */
+ if (size > 16) {
+ DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
+ size = 16;
+ }
+ memcpy(adev->product_number, &buff[2], size);
+ adev->product_number[size] = '\0';
+
+ addrptr += size + 1;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU product version, ret:%d", size);
+ return size;
+ }
+
+ addrptr += size + 1;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
+ return size;
+ }
+
+ /* Serial number should only be 16 characters. Any more,
+ * and something could be wrong. Cap it at 16 to be safe
+ */
+ if (size > 16) {
+ DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
+ size = 16;
+ }
+ memcpy(adev->serial, &buff[2], size);
+ adev->serial[size] = '\0';
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
new file mode 100644
index 000000000000..968115c97e33
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_PRODINFO_H__
+#define __AMDGPU_PRODINFO_H__
+
+int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
+
+#endif // __AMDGPU_PRODINFO_H__
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 4277125a79ee..4ed9958af94e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/pci.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_debugfs.h>
@@ -161,16 +162,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_bo_list_entry vm_pd;
struct list_head list, duplicates;
+ struct dma_fence *fence = NULL;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
- int r;
+ long r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
- tv.num_shared = 1;
+ tv.num_shared = 2;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -178,28 +180,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
- "we fail to reserve bo (%d)\n", r);
+ "we fail to reserve bo (%ld)\n", r);
return;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
- if (bo_va && --bo_va->ref_count == 0) {
- amdgpu_vm_bo_rmv(adev, bo_va);
-
- if (amdgpu_vm_ready(vm)) {
- struct dma_fence *fence = NULL;
+ if (!bo_va || --bo_va->ref_count)
+ goto out_unlock;
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
- if (unlikely(r)) {
- dev_err(adev->dev, "failed to clear page "
- "tables on GEM object close (%d)\n", r);
- }
+ amdgpu_vm_bo_rmv(adev, bo_va);
+ if (!amdgpu_vm_ready(vm))
+ goto out_unlock;
- if (fence) {
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
- }
- }
+ fence = dma_resv_get_excl(bo->tbo.base.resv);
+ if (fence) {
+ amdgpu_bo_fence(bo, fence, true);
+ fence = NULL;
}
+
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
+ if (r || !fence)
+ goto out_unlock;
+
+ amdgpu_bo_fence(bo, fence, true);
+ dma_fence_put(fence);
+
+out_unlock:
+ if (unlikely(r < 0))
+ dev_err(adev->dev, "failed to clear page "
+ "tables on GEM object close (%ld)\n", r);
ttm_eu_backoff_reservation(&ticket, &list);
}
@@ -226,7 +234,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
- AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
+ AMDGPU_GEM_CREATE_ENCRYPTED))
return -EINVAL;
@@ -234,6 +243,11 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
return -EINVAL;
+ if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
+ DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
+ return -EINVAL;
+ }
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -854,7 +868,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
attachment = READ_ONCE(bo->tbo.base.import_attach);
if (attachment)
- seq_printf(m, " imported from %p", dma_buf);
+ seq_printf(m, " imported from %p%s", dma_buf,
+ attachment->peer2peer ? " P2P" : "");
else if (dma_buf)
seq_printf(m, " exported as %p", dma_buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 6b9c9193cdfa..d612033a23ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -48,7 +48,7 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
return bit;
}
-void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
+void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue)
{
*queue = bit % adev->gfx.mec.num_queue_per_pipe;
@@ -274,7 +274,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
continue;
- amdgpu_gfx_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
+ amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
/*
* 1. Using pipes 2/3 from MEC 2 seems cause problems.
@@ -304,10 +304,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
spin_lock_init(&kiq->ring_lock);
- r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs);
- if (r)
- return r;
-
ring->adev = NULL;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -318,9 +314,11 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
return r;
ring->eop_gpu_addr = kiq->eop_gpu_addr;
+ ring->no_scheduler = true;
sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
r = amdgpu_ring_init(adev, ring, 1024,
- irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
+ irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
@@ -329,7 +327,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
{
- amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs);
amdgpu_ring_fini(ring);
}
@@ -488,6 +485,19 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
return amdgpu_ring_test_helper(kiq_ring);
}
+int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
+ int queue_bit)
+{
+ int mec, pipe, queue;
+ int set_resource_bit = 0;
+
+ amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
+
+ set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
+
+ return set_resource_bit;
+}
+
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -510,7 +520,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
break;
}
- queue_mask |= (1ull << i);
+ queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
}
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
@@ -670,16 +680,23 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
signed long r, cnt = 0;
unsigned long flags;
- uint32_t seq;
+ uint32_t seq, reg_val_offs = 0, value = 0;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
spin_lock_irqsave(&kiq->ring_lock, flags);
+ if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
+ pr_err("critical bug! too many kiq readers\n");
+ goto failed_unlock;
+ }
amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
@@ -705,9 +722,18 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
if (cnt > MAX_KIQ_REG_TRY)
goto failed_kiq_read;
- return adev->wb.wb[kiq->reg_val_offs];
+ mb();
+ value = adev->wb.wb[reg_val_offs];
+ amdgpu_device_wb_free(adev, reg_val_offs);
+ return value;
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_read:
+ if (reg_val_offs)
+ amdgpu_device_wb_free(adev, reg_val_offs);
pr_err("failed to read reg:%x\n", reg);
return ~0;
}
@@ -725,7 +751,10 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_fence_emit_polling(ring, &seq);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
@@ -754,6 +783,9 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
return;
+failed_undo:
+ amdgpu_ring_undo(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_write:
pr_err("failed to write reg:%x\n", reg);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 5825692d07e4..d43c11671a38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -103,7 +103,6 @@ struct amdgpu_kiq {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
const struct kiq_pm4_funcs *pmf;
- uint32_t reg_val_offs;
};
/*
@@ -286,13 +285,8 @@ struct amdgpu_gfx {
bool me_fw_write_wait;
bool cp_fw_write_wait;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
- struct drm_gpu_scheduler *gfx_sched[AMDGPU_MAX_GFX_RINGS];
- uint32_t num_gfx_sched;
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
- struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
- struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
- uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
unsigned num_compute_rings;
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
@@ -370,7 +364,7 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev);
int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
int pipe, int queue);
-void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
+void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue);
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
int pipe, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 5884ab590486..acabb57aa8af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -136,8 +136,8 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
/**
* amdgpu_gmc_vram_location - try to find VRAM location
*
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
* @base: base address at which to put VRAM
*
* Function will try to place VRAM at base address provided
@@ -165,8 +165,8 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
/**
* amdgpu_gmc_gart_location - try to find GART location
*
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
*
* Function will place try to place GART before or after VRAM.
*
@@ -207,8 +207,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
/**
* amdgpu_gmc_agp_location - try to find AGP location
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
*
* Function will place try to find a place for the AGP BAR in the MC address
* space.
@@ -373,3 +373,38 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
return 0;
}
+
+/**
+ * amdgpu_tmz_set -- check and set if a device supports TMZ
+ * @adev: amdgpu_device pointer
+ *
+ * Check and set if an the device @adev supports Trusted Memory
+ * Zones (TMZ).
+ */
+void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ /* Don't enable it by default yet.
+ */
+ if (amdgpu_tmz < 1) {
+ adev->gmc.tmz_enabled = false;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
+ } else {
+ adev->gmc.tmz_enabled = true;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
+ }
+ break;
+ default:
+ adev->gmc.tmz_enabled = false;
+ dev_warn(adev->dev,
+ "Trusted Memory Zone (TMZ) feature not supported\n");
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 7546da0cc70c..2bd9423c1dab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -213,6 +213,8 @@ struct amdgpu_gmc {
} fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
+ bool tmz_enabled;
+
const struct amdgpu_gmc_funcs *gmc_funcs;
struct amdgpu_xgmi xgmi;
@@ -276,4 +278,6 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
+extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index ccbd7acfc4cb..b91853fd66d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -61,12 +61,13 @@
* Returns 0 on success, error on failure.
*/
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned size, struct amdgpu_ib *ib)
+ unsigned size, enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_ib *ib)
{
int r;
if (size) {
- r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
+ r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
&ib->sa_bo, size, 256);
if (r) {
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -131,6 +132,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
uint64_t fence_ctx;
uint32_t status = 0, alloc_size;
unsigned fence_flags = 0;
+ bool secure;
unsigned i;
int r = 0;
@@ -159,6 +161,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
+ if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
+ (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)) {
+ dev_err(adev->dev, "secure submissions not supported on compute rings\n");
+ return -EINVAL;
+ }
+
alloc_size = ring->funcs->emit_frame_size + num_ibs *
ring->funcs->emit_ib_size;
@@ -181,6 +189,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
dma_fence_put(tmp);
}
+ if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
+ ring->funcs->emit_mem_sync(ring);
+
if (ring->funcs->insert_start)
ring->funcs->insert_start(ring);
@@ -215,6 +226,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
amdgpu_ring_emit_cntxcntl(ring, status);
}
+ /* Setup initial TMZiness and send it off.
+ */
+ secure = false;
+ if (job && ring->funcs->emit_frame_cntl) {
+ secure = ib->flags & AMDGPU_IB_FLAGS_SECURE;
+ amdgpu_ring_emit_frame_cntl(ring, true, secure);
+ }
+
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
@@ -226,12 +245,20 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
+ if (job && ring->funcs->emit_frame_cntl) {
+ if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
+ amdgpu_ring_emit_frame_cntl(ring, false, secure);
+ secure = !secure;
+ amdgpu_ring_emit_frame_cntl(ring, true, secure);
+ }
+ }
+
amdgpu_ring_emit_ib(ring, job, ib, status);
status &= ~AMDGPU_HAVE_CTX_SWITCH;
}
- if (ring->funcs->emit_tmz)
- amdgpu_ring_emit_tmz(ring, false);
+ if (job && ring->funcs->emit_frame_cntl)
+ amdgpu_ring_emit_frame_cntl(ring, false, secure);
#ifdef CONFIG_X86_64
if (!(adev->flags & AMD_IS_APU))
@@ -280,22 +307,32 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
*/
int amdgpu_ib_pool_init(struct amdgpu_device *adev)
{
- int r;
+ unsigned size;
+ int r, i;
- if (adev->ib_pool_ready) {
+ if (adev->ib_pool_ready)
return 0;
- }
- r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
- AMDGPU_IB_POOL_SIZE*64*1024,
- AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT);
- if (r) {
- return r;
- }
+ for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
+ if (i == AMDGPU_IB_POOL_DIRECT)
+ size = PAGE_SIZE * 2;
+ else
+ size = AMDGPU_IB_POOL_SIZE;
+
+ r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
+ size, AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ goto error;
+ }
adev->ib_pool_ready = true;
return 0;
+
+error:
+ while (i--)
+ amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+ return r;
}
/**
@@ -308,10 +345,14 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
*/
void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
{
- if (adev->ib_pool_ready) {
- amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
- adev->ib_pool_ready = false;
- }
+ int i;
+
+ if (!adev->ib_pool_ready)
+ return;
+
+ for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
+ amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+ adev->ib_pool_ready = false;
}
/**
@@ -326,9 +367,9 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
*/
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
- unsigned i;
- int r, ret = 0;
long tmo_gfx, tmo_mm;
+ int r, ret = 0;
+ unsigned i;
tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
if (amdgpu_sriov_vf(adev)) {
@@ -406,10 +447,16 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
+ seq_printf(m, "--------------------- DELAYED --------------------- \n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
+ m);
+ seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
+ m);
+ seq_printf(m, "--------------------- DIRECT ---------------------- \n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
return 0;
-
}
static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 3a67f6c046d4..fe92dcd94d4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;
- if ((*id)->owner != vm->direct.fence_context ||
+ if ((*id)->owner != vm->immediate.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */
- if ((*id)->owner != vm->direct.fence_context)
+ if ((*id)->owner != vm->immediate.fence_context)
continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
@@ -448,7 +448,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
id->pd_gpu_addr = job->vm_pd_addr;
- id->owner = vm->direct.fence_context;
+ id->owner = vm->immediate.fence_context;
if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 5ed4227f304b..0cc4c67f95f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -260,7 +260,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
if (nvec > 0) {
adev->irq.msi_enabled = true;
- dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
+ dev_dbg(adev->dev, "using MSI/MSI-X.\n");
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4981e443a884..47207188c569 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -33,6 +33,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
struct amdgpu_task_info ti;
+ struct amdgpu_device *adev = ring->adev;
memset(&ti, 0, sizeof(struct amdgpu_task_info));
@@ -49,10 +50,13 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
ti.process_name, ti.tgid, ti.task_name, ti.pid);
- if (amdgpu_device_should_recover_gpu(ring->adev))
+ if (amdgpu_device_should_recover_gpu(ring->adev)) {
amdgpu_device_gpu_recover(ring->adev, job);
- else
+ } else {
drm_sched_suspend_timeout(&ring->sched);
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.tdr_debug = true;
+ }
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -87,7 +91,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job)
+ enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_job **job)
{
int r;
@@ -95,7 +100,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
if (r)
return r;
- r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
+ r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
if (r)
kfree(*job);
@@ -140,7 +145,6 @@ void amdgpu_job_free(struct amdgpu_job *job)
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f)
{
- enum drm_sched_priority priority;
int r;
if (!f)
@@ -152,7 +156,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 3f7b8433d179..81caac9b958a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -38,6 +38,7 @@
#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
struct amdgpu_fence;
+enum amdgpu_ib_pool_type;
struct amdgpu_job {
struct drm_sched_job base;
@@ -61,14 +62,12 @@ struct amdgpu_job {
/* user fence handling */
uint64_t uf_addr;
uint64_t uf_sequence;
-
};
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job, struct amdgpu_vm *vm);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job);
-
+ enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 5727f00afc8e..d31d65e6b039 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -144,7 +144,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
const unsigned ib_size_dw = 16;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index bd9ef9cc86de..5131a0a1bc8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -43,8 +43,6 @@ struct amdgpu_jpeg {
uint8_t num_jpeg_inst;
struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
struct amdgpu_jpeg_reg internal;
- struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES];
- uint32_t num_jpeg_sched;
unsigned harvest_config;
struct delayed_work idle_work;
enum amd_powergating_state cur_state;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index fd1dc3236eca..d7e17e34fee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -183,18 +183,18 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
/* Call ACPI methods: require modeset init
* but failure is not fatal
*/
- if (!r) {
- acpi_status = amdgpu_acpi_init(adev);
- if (acpi_status)
- dev_dbg(&dev->pdev->dev,
- "Error during ACPI methods call\n");
- }
+
+ acpi_status = amdgpu_acpi_init(adev);
+ if (acpi_status)
+ dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
if (adev->runpm) {
- dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
+ /* only need to skip on ATPX */
+ if (amdgpu_device_supports_boco(dev) &&
+ !amdgpu_is_atpx_hybrid())
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
- pm_runtime_set_active(dev->dev);
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 919bd566ba3c..edaac242ff85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -77,7 +77,6 @@ struct amdgpu_nbio_funcs {
u32 *flags);
void (*ih_control)(struct amdgpu_device *adev);
void (*init_registers)(struct amdgpu_device *adev);
- void (*detect_hw_virt)(struct amdgpu_device *adev);
void (*remap_hdp_registers)(struct amdgpu_device *adev);
void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index c687f5415b3f..3d822eba9a5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -753,7 +753,7 @@ int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
amdgpu_bo_size(shadow), NULL, fence,
- true, false);
+ true, false, false);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 5e39ecd8cc28..7d41f7b9a340 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -229,6 +229,17 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
+/**
+ * amdgpu_bo_encrypted - test if the BO is encrypted
+ * @bo: pointer to a buffer object
+ *
+ * Return true if the buffer object is encrypted, false otherwise.
+ */
+static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
+{
+ return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
+}
+
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index abe94a55ecad..775e389c9a13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -154,17 +154,17 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
*
*/
-static ssize_t amdgpu_get_dpm_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -189,18 +189,18 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
}
-static ssize_t amdgpu_set_dpm_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type state;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
@@ -294,17 +294,17 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
*
*/
-static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level = 0xff;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -332,10 +332,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
"unknown");
}
-static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
@@ -343,8 +343,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
@@ -383,6 +383,15 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
return count;
}
+ if (adev->asic_type == CHIP_RAVEN) {
+ if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
+ if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ amdgpu_gfx_off_ctrl(adev, false);
+ else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ amdgpu_gfx_off_ctrl(adev, true);
+ }
+ }
+
/* profile_exit setting is valid only when current mode is in profile mode */
if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
@@ -436,6 +445,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct pp_states_info data;
int i, buf_len, ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -444,8 +456,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
ret = smu_get_power_num_states(&adev->smu, &data);
if (ret)
return ret;
- } else if (adev->powerplay.pp_funcs->get_pp_num_states)
+ } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
amdgpu_dpm_get_pp_num_states(adev, &data);
+ } else {
+ memset(&data, 0, sizeof(data));
+ }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -472,8 +487,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -511,8 +526,8 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
if (adev->pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
@@ -531,8 +546,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
unsigned long idx;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
@@ -589,8 +604,8 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size, ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -631,8 +646,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -736,8 +751,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
if (count > 127)
return -EINVAL;
@@ -828,8 +843,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -870,18 +885,18 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
* the corresponding bit from original ppfeature masks and input the
* new ppfeature masks.
*/
-static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t amdgpu_set_pp_features(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint64_t featuremask;
int ret;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
@@ -914,17 +929,17 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
return count;
}
-static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_pp_features(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -982,8 +997,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1048,8 +1063,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1082,8 +1097,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1112,8 +1127,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
uint32_t mask = 0;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1146,8 +1161,8 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1176,8 +1191,8 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1212,8 +1227,8 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1242,8 +1257,8 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1278,8 +1293,8 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1308,8 +1323,8 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1344,8 +1359,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1374,8 +1389,8 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1410,8 +1425,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1438,8 +1453,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = kstrtol(buf, 0, &value);
@@ -1479,8 +1494,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1507,8 +1522,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = kstrtol(buf, 0, &value);
@@ -1568,8 +1583,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1606,15 +1621,15 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
tmp[0] = *(buf);
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
if (ret)
return -EINVAL;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
-
if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (count < 2 || count > 127)
return -EINVAL;
@@ -1660,16 +1675,16 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
* The SMU firmware computes a percentage of load based on the
* aggregate activity level in the IP cores.
*/
-static ssize_t amdgpu_get_busy_percent(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
r = pm_runtime_get_sync(ddev->dev);
if (r < 0)
@@ -1696,16 +1711,16 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
* The SMU firmware computes a percentage of load based on the
* aggregate activity level in the IP cores.
*/
-static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
r = pm_runtime_get_sync(ddev->dev);
if (r < 0)
@@ -1742,11 +1757,17 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- uint64_t count0, count1;
+ uint64_t count0 = 0, count1 = 0;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
+ if (adev->flags & AMD_IS_APU)
+ return -ENODATA;
+
+ if (!adev->asic_funcs->get_pcie_usage)
+ return -ENODATA;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1778,8 +1799,8 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
if (adev->unique_id)
return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
@@ -1787,57 +1808,185 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
return 0;
}
-static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
-static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
- amdgpu_get_dpm_forced_performance_level,
- amdgpu_set_dpm_forced_performance_level);
-static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
-static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
-static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_force_state,
- amdgpu_set_pp_force_state);
-static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_table,
- amdgpu_set_pp_table);
-static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_sclk,
- amdgpu_set_pp_dpm_sclk);
-static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_mclk,
- amdgpu_set_pp_dpm_mclk);
-static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_socclk,
- amdgpu_set_pp_dpm_socclk);
-static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_fclk,
- amdgpu_set_pp_dpm_fclk);
-static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_dcefclk,
- amdgpu_set_pp_dpm_dcefclk);
-static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_pcie,
- amdgpu_set_pp_dpm_pcie);
-static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_sclk_od,
- amdgpu_set_pp_sclk_od);
-static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_mclk_od,
- amdgpu_set_pp_mclk_od);
-static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_power_profile_mode,
- amdgpu_set_pp_power_profile_mode);
-static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_od_clk_voltage,
- amdgpu_set_pp_od_clk_voltage);
-static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
- amdgpu_get_busy_percent, NULL);
-static DEVICE_ATTR(mem_busy_percent, S_IRUGO,
- amdgpu_get_memory_busy_percent, NULL);
-static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
-static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_feature_status,
- amdgpu_set_pp_feature_status);
-static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL);
+static struct amdgpu_device_attr amdgpu_device_attrs[] = {
+ AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
+};
+
+static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states)
+{
+ struct device_attribute *dev_attr = &attr->dev_attr;
+ const char *attr_name = dev_attr->attr.name;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ enum amd_asic_type asic_type = adev->asic_type;
+
+ if (!(attr->flags & mask)) {
+ *states = ATTR_STATE_UNSUPPORTED;
+ return 0;
+ }
+
+#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
+
+ if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
+ if (asic_type < CHIP_VEGA10)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
+ if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
+ if (asic_type < CHIP_VEGA20)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
+ if (asic_type == CHIP_ARCTURUS)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
+ *states = ATTR_STATE_UNSUPPORTED;
+ if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+ *states = ATTR_STATE_SUPPORTED;
+ } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
+ if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pcie_bw)) {
+ /* PCIe Perf counters won't work on APU nodes */
+ if (adev->flags & AMD_IS_APU)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(unique_id)) {
+ if (!adev->unique_id)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_features)) {
+ if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
+ *states = ATTR_STATE_UNSUPPORTED;
+ }
+
+ if (asic_type == CHIP_ARCTURUS) {
+ /* Arcturus does not support standalone mclk/socclk/fclk level setting */
+ if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
+ DEVICE_ATTR_IS(pp_dpm_socclk) ||
+ DEVICE_ATTR_IS(pp_dpm_fclk)) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+ }
+
+#undef DEVICE_ATTR_IS
+
+ return 0;
+}
+
+
+static int amdgpu_device_attr_create(struct amdgpu_device *adev,
+ struct amdgpu_device_attr *attr,
+ uint32_t mask, struct list_head *attr_list)
+{
+ int ret = 0;
+ struct device_attribute *dev_attr = &attr->dev_attr;
+ const char *name = dev_attr->attr.name;
+ enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
+ struct amdgpu_device_attr_entry *attr_entry;
+
+ int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
+
+ BUG_ON(!attr);
+
+ attr_update = attr->attr_update ? attr_update : default_attr_update;
+
+ ret = attr_update(adev, attr, mask, &attr_states);
+ if (ret) {
+ dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
+ name, ret);
+ return ret;
+ }
+
+ if (attr_states == ATTR_STATE_UNSUPPORTED)
+ return 0;
+
+ ret = device_create_file(adev->dev, dev_attr);
+ if (ret) {
+ dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
+ name, ret);
+ }
+
+ attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
+ if (!attr_entry)
+ return -ENOMEM;
+
+ attr_entry->attr = attr;
+ INIT_LIST_HEAD(&attr_entry->entry);
+
+ list_add_tail(&attr_entry->entry, attr_list);
+
+ return ret;
+}
+
+static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
+{
+ struct device_attribute *dev_attr = &attr->dev_attr;
+
+ device_remove_file(adev->dev, dev_attr);
+}
+
+static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
+ struct list_head *attr_list);
+
+static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
+ struct amdgpu_device_attr *attrs,
+ uint32_t counts,
+ uint32_t mask,
+ struct list_head *attr_list)
+{
+ int ret = 0;
+ uint32_t i = 0;
+
+ for (i = 0; i < counts; i++) {
+ ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
+ if (ret)
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ amdgpu_device_attr_remove_groups(adev, attr_list);
+
+ return ret;
+}
+
+static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
+ struct list_head *attr_list)
+{
+ struct amdgpu_device_attr_entry *entry, *entry_tmp;
+
+ if (list_empty(attr_list))
+ return ;
+
+ list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
+ amdgpu_device_attr_remove(adev, entry->attr);
+ list_del(&entry->entry);
+ kfree(entry);
+ }
+}
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -1847,6 +1996,9 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
int channel = to_sensor_dev_attr(attr)->index;
int r, temp = 0, size = sizeof(temp);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (channel >= PP_TEMP_MAX)
return -EINVAL;
@@ -1978,6 +2130,9 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(adev->ddev->dev);
if (ret < 0)
return ret;
@@ -2009,6 +2164,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err, ret;
int value;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2058,6 +2216,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2107,6 +2268,9 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2137,6 +2301,9 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2166,6 +2333,9 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 size = sizeof(min_rpm);
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2191,6 +2361,9 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 size = sizeof(max_rpm);
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2215,6 +2388,9 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2244,6 +2420,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2290,6 +2469,9 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(adev->ddev->dev);
if (ret < 0)
return ret;
@@ -2322,6 +2504,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2362,6 +2547,9 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
u32 vddgfx;
int r, size = sizeof(vddgfx);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2394,6 +2582,9 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
u32 vddnb;
int r, size = sizeof(vddnb);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
@@ -2431,6 +2622,9 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
int r, size = sizeof(u32);
unsigned uw;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2467,6 +2661,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
ssize_t size;
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2496,6 +2693,9 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
ssize_t size;
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2526,6 +2726,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -2564,6 +2767,9 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
uint32_t sclk;
int r, size = sizeof(sclk);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2596,6 +2802,9 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
uint32_t mclk;
int r, size = sizeof(mclk);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -3238,8 +3447,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret;
+ uint32_t mask = 0;
if (adev->pm.sysfs_initialized)
return 0;
@@ -3247,6 +3456,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled == 0)
return 0;
+ INIT_LIST_HEAD(&adev->pm.pm_attr_list);
+
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev,
hwmon_groups);
@@ -3257,160 +3468,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret;
}
- ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
- if (ret) {
- DRM_ERROR("failed to create device file for dpm state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
- if (ret) {
- DRM_ERROR("failed to create device file for dpm state\n");
- return ret;
- }
-
-
- ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
- if (ret) {
- DRM_ERROR("failed to create device file pp_num_states\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_cur_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_force_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_table);
- if (ret) {
- DRM_ERROR("failed to create device file pp_table\n");
- return ret;
- }
-
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_sclk\n");
- return ret;
- }
-
- /* Arcturus does not support standalone mclk/socclk/fclk level setting */
- if (adev->asic_type == CHIP_ARCTURUS) {
- dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO;
- dev_attr_pp_dpm_mclk.store = NULL;
-
- dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO;
- dev_attr_pp_dpm_socclk.store = NULL;
-
- dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO;
- dev_attr_pp_dpm_fclk.store = NULL;
+ switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
+ case SRIOV_VF_MODE_ONE_VF:
+ mask = ATTR_FLAG_ONEVF;
+ break;
+ case SRIOV_VF_MODE_MULTI_VF:
+ mask = 0;
+ break;
+ case SRIOV_VF_MODE_BARE_METAL:
+ default:
+ mask = ATTR_FLAG_MASK_ALL;
+ break;
}
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_mclk\n");
- return ret;
- }
- if (adev->asic_type >= CHIP_VEGA10) {
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_socclk\n");
- return ret;
- }
- if (adev->asic_type != CHIP_ARCTURUS) {
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
- return ret;
- }
- }
- }
- if (adev->asic_type >= CHIP_VEGA20) {
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_fclk\n");
- return ret;
- }
- }
- if (adev->asic_type != CHIP_ARCTURUS) {
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_pcie\n");
- return ret;
- }
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
- if (ret) {
- DRM_ERROR("failed to create device file pp_sclk_od\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
- if (ret) {
- DRM_ERROR("failed to create device file pp_mclk_od\n");
- return ret;
- }
- ret = device_create_file(adev->dev,
- &dev_attr_pp_power_profile_mode);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_power_profile_mode\n");
- return ret;
- }
- if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
- ret = device_create_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_od_clk_voltage\n");
- return ret;
- }
- }
- ret = device_create_file(adev->dev,
- &dev_attr_gpu_busy_percent);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "gpu_busy_level\n");
- return ret;
- }
- /* APU does not have its own dedicated memory */
- if (!(adev->flags & AMD_IS_APU) &&
- (adev->asic_type != CHIP_VEGA10)) {
- ret = device_create_file(adev->dev,
- &dev_attr_mem_busy_percent);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "mem_busy_percent\n");
- return ret;
- }
- }
- /* PCIe Perf counters won't work on APU nodes */
- if (!(adev->flags & AMD_IS_APU)) {
- ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
- if (ret) {
- DRM_ERROR("failed to create device file pcie_bw\n");
- return ret;
- }
- }
- if (adev->unique_id)
- ret = device_create_file(adev->dev, &dev_attr_unique_id);
- if (ret) {
- DRM_ERROR("failed to create device file unique_id\n");
+ ret = amdgpu_device_attr_create_groups(adev,
+ amdgpu_device_attrs,
+ ARRAY_SIZE(amdgpu_device_attrs),
+ mask,
+ &adev->pm.pm_attr_list);
+ if (ret)
return ret;
- }
-
- if ((adev->asic_type >= CHIP_VEGA10) &&
- !(adev->flags & AMD_IS_APU)) {
- ret = device_create_file(adev->dev,
- &dev_attr_pp_features);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_features\n");
- return ret;
- }
- }
adev->pm.sysfs_initialized = true;
@@ -3419,51 +3496,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-
if (adev->pm.dpm_enabled == 0)
return;
if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
- device_remove_file(adev->dev, &dev_attr_power_dpm_state);
- device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
-
- device_remove_file(adev->dev, &dev_attr_pp_num_states);
- device_remove_file(adev->dev, &dev_attr_pp_cur_state);
- device_remove_file(adev->dev, &dev_attr_pp_force_state);
- device_remove_file(adev->dev, &dev_attr_pp_table);
-
- device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (adev->asic_type >= CHIP_VEGA10) {
- device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
- if (adev->asic_type != CHIP_ARCTURUS)
- device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
- }
- if (adev->asic_type != CHIP_ARCTURUS)
- device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
- if (adev->asic_type >= CHIP_VEGA20)
- device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
- device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
- device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
- device_remove_file(adev->dev,
- &dev_attr_pp_power_profile_mode);
- if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled))
- device_remove_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
- device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
- if (!(adev->flags & AMD_IS_APU) &&
- (adev->asic_type != CHIP_VEGA10))
- device_remove_file(adev->dev, &dev_attr_mem_busy_percent);
- if (!(adev->flags & AMD_IS_APU))
- device_remove_file(adev->dev, &dev_attr_pcie_bw);
- if (adev->unique_id)
- device_remove_file(adev->dev, &dev_attr_unique_id);
- if ((adev->asic_type >= CHIP_VEGA10) &&
- !(adev->flags & AMD_IS_APU))
- device_remove_file(adev->dev, &dev_attr_pp_features);
+
+ amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -3626,6 +3665,9 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
u32 flags = 0;
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(dev->dev);
if (r < 0)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index 5db0ef86e84c..d9ae2b49a402 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -30,6 +30,55 @@ struct cg_flag_name
const char *name;
};
+enum amdgpu_device_attr_flags {
+ ATTR_FLAG_BASIC = (1 << 0),
+ ATTR_FLAG_ONEVF = (1 << 16),
+};
+
+#define ATTR_FLAG_TYPE_MASK (0x0000ffff)
+#define ATTR_FLAG_MODE_MASK (0xffff0000)
+#define ATTR_FLAG_MASK_ALL (0xffffffff)
+
+enum amdgpu_device_attr_states {
+ ATTR_STATE_UNSUPPORTED = 0,
+ ATTR_STATE_SUPPORTED,
+};
+
+struct amdgpu_device_attr {
+ struct device_attribute dev_attr;
+ enum amdgpu_device_attr_flags flags;
+ int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states);
+
+};
+
+struct amdgpu_device_attr_entry {
+ struct list_head entry;
+ struct amdgpu_device_attr *attr;
+};
+
+#define to_amdgpu_device_attr(_dev_attr) \
+ container_of(_dev_attr, struct amdgpu_device_attr, dev_attr)
+
+#define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .flags = _flags, \
+ ##__VA_ARGS__, }
+
+#define AMDGPU_DEVICE_ATTR(_name, _mode, _flags, ...) \
+ __AMDGPU_DEVICE_ATTR(_name, _mode, \
+ amdgpu_get_##_name, amdgpu_set_##_name, \
+ _flags, ##__VA_ARGS__)
+
+#define AMDGPU_DEVICE_ATTR_RW(_name, _flags, ...) \
+ AMDGPU_DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
+ _flags, ##__VA_ARGS__)
+
+#define AMDGPU_DEVICE_ATTR_RO(_name, _flags, ...) \
+ __AMDGPU_DEVICE_ATTR(_name, S_IRUGO, \
+ amdgpu_get_##_name, NULL, \
+ _flags, ##__VA_ARGS__)
+
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index deaa26808841..7301fdcfb8bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -37,11 +37,11 @@
#include "amdgpu_ras.h"
-static void psp_set_funcs(struct amdgpu_device *adev);
-
static int psp_sysfs_init(struct amdgpu_device *adev);
static void psp_sysfs_fini(struct amdgpu_device *adev);
+static int psp_load_smu_fw(struct psp_context *psp);
+
/*
* Due to DF Cstate management centralized to PMFW, the firmware
* loading sequence will be updated as below:
@@ -80,8 +80,6 @@ static int psp_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- psp_set_funcs(adev);
-
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -201,6 +199,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
int index;
int timeout = 2000;
bool ras_intr = false;
+ bool skip_unsupport = false;
mutex_lock(&psp->mutex);
@@ -232,6 +231,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
}
+ /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command in SRIOV */
+ skip_unsupport = (psp->cmd_buf_mem->resp.status == 0xffff000a) && amdgpu_sriov_vf(psp->adev);
+
/* In some cases, psp response status is not 0 even there is no
* problem while the command is submitted. Some version of PSP FW
* doesn't write 0 to that field.
@@ -239,7 +241,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
* during psp initialization to avoid breaking hw_init and it doesn't
* return -EINVAL.
*/
- if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
+ if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
@@ -268,7 +270,7 @@ static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd,
uint64_t tmr_mc, uint32_t size)
{
- if (psp_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(psp->adev))
cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
else
cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
@@ -662,6 +664,121 @@ int psp_xgmi_initialize(struct psp_context *psp)
return ret;
}
+int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
+
+ /* Invoke xgmi ta to get hive id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return ret;
+
+ *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+
+ return 0;
+}
+
+int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
+
+ /* Invoke xgmi ta to get the node id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return ret;
+
+ *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+
+ return 0;
+}
+
+int psp_xgmi_get_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
+ int i;
+ int ret;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ /* Fill in the shared memory with topology information as input */
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to get the topology information */
+ ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
+ if (ret)
+ return ret;
+
+ /* Read the output topology information from the shared memory */
+ topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
+ topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
+ for (i = 0; i < topology->num_nodes; i++) {
+ topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+ topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+ topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
+ topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+ }
+
+ return 0;
+}
+
+int psp_xgmi_set_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ int i;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = 1;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to set topology information */
+ return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
+}
+
// ras begin
static int psp_ras_init_shared_buf(struct psp_context *psp)
{
@@ -744,13 +861,40 @@ static int psp_ras_unload(struct psp_context *psp)
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
- return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+ ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+
+ if (amdgpu_ras_intr_triggered())
+ return ret;
+
+ if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
+ {
+ DRM_WARN("RAS: Unsupported Interface");
+ return -EINVAL;
+ }
+
+ if (!ret) {
+ if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
+ dev_warn(psp->adev->dev, "ECC switch disabled\n");
+
+ ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
+ }
+ else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
+ dev_warn(psp->adev->dev,
+ "RAS internal register access blocked\n");
+ }
+
+ return ret;
}
int psp_ras_enable_features(struct psp_context *psp,
@@ -834,6 +978,33 @@ static int psp_ras_initialize(struct psp_context *psp)
return 0;
}
+
+int psp_ras_trigger_error(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
+ ras_cmd->ras_in_message.trigger_error = *info;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret)
+ return -EINVAL;
+
+ /* If err_event_athub occurs error inject was successful, however
+ return status from TA is no long reliable */
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ return ras_cmd->ras_status;
+}
// ras end
// HDCP start
@@ -884,6 +1055,7 @@ static int psp_hdcp_load(struct psp_context *psp)
if (!ret) {
psp->hdcp_context.hdcp_initialized = true;
psp->hdcp_context.session_id = cmd->resp.session_id;
+ mutex_init(&psp->hdcp_context.mutex);
}
kfree(cmd);
@@ -1029,6 +1201,7 @@ static int psp_dtm_load(struct psp_context *psp)
if (!ret) {
psp->dtm_context.dtm_initialized = true;
psp->dtm_context.session_id = cmd->resp.session_id;
+ mutex_init(&psp->dtm_context.mutex);
}
kfree(cmd);
@@ -1169,16 +1342,20 @@ static int psp_hw_start(struct psp_context *psp)
}
/*
- * For those ASICs with DF Cstate management centralized
+ * For ASICs with DF Cstate management centralized
* to PMFW, TMR setup should be performed after PMFW
* loaded and before other non-psp firmware loaded.
*/
- if (!psp->pmfw_centralized_cstate_management) {
- ret = psp_tmr_load(psp);
- if (ret) {
- DRM_ERROR("PSP load tmr failed!\n");
+ if (psp->pmfw_centralized_cstate_management) {
+ ret = psp_load_smu_fw(psp);
+ if (ret)
return ret;
- }
+ }
+
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
+ return ret;
}
return 0;
@@ -1355,7 +1532,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
}
static int psp_execute_np_fw_load(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode)
+ struct amdgpu_firmware_info *ucode)
{
int ret = 0;
@@ -1369,64 +1546,96 @@ static int psp_execute_np_fw_load(struct psp_context *psp,
return ret;
}
+static int psp_load_smu_fw(struct psp_context *psp)
+{
+ int ret;
+ struct amdgpu_device* adev = psp->adev;
+ struct amdgpu_firmware_info *ucode =
+ &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+ struct amdgpu_ras *ras = psp->ras.ras;
+
+ if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+
+ if (adev->in_gpu_reset && ras && ras->supported) {
+ ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
+ if (ret) {
+ DRM_WARN("Failed to set MP1 state prepare for reload\n");
+ }
+ }
+
+ ret = psp_execute_np_fw_load(psp, ucode);
+
+ if (ret)
+ DRM_ERROR("PSP load smu failed!\n");
+
+ return ret;
+}
+
+static bool fw_load_skip_check(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode)
+{
+ if (!ucode->fw)
+ return true;
+
+ if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
+ (psp_smu_reload_quirk(psp) ||
+ psp->autoload_supported ||
+ psp->pmfw_centralized_cstate_management))
+ return true;
+
+ if (amdgpu_sriov_vf(psp->adev) &&
+ (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
+ /*skip ucode loading in SRIOV VF */
+ return true;
+
+ if (psp->autoload_supported &&
+ (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
+ ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
+ /* skip mec JT when autoload is enabled */
+ return true;
+
+ return false;
+}
+
static int psp_np_fw_load(struct psp_context *psp)
{
int i, ret;
struct amdgpu_firmware_info *ucode;
struct amdgpu_device* adev = psp->adev;
- if (psp->autoload_supported ||
- psp->pmfw_centralized_cstate_management) {
- ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
- if (!ucode->fw || amdgpu_sriov_vf(adev))
- goto out;
-
- ret = psp_execute_np_fw_load(psp, ucode);
+ if (psp->autoload_supported &&
+ !psp->pmfw_centralized_cstate_management) {
+ ret = psp_load_smu_fw(psp);
if (ret)
return ret;
}
- if (psp->pmfw_centralized_cstate_management) {
- ret = psp_tmr_load(psp);
- if (ret) {
- DRM_ERROR("PSP load tmr failed!\n");
- return ret;
- }
- }
-
-out:
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
- if (!ucode->fw)
- continue;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
- (psp_smu_reload_quirk(psp) ||
- psp->autoload_supported ||
- psp->pmfw_centralized_cstate_management))
- continue;
-
- if (amdgpu_sriov_vf(adev) &&
- (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
- /*skip ucode loading in SRIOV VF */
+ !fw_load_skip_check(psp, ucode)) {
+ ret = psp_load_smu_fw(psp);
+ if (ret)
+ return ret;
continue;
+ }
- if (psp->autoload_supported &&
- (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
- /* skip mec JT when autoload is enabled */
+ if (fw_load_skip_check(psp, ucode))
continue;
psp_print_fw_hdr(psp, ucode);
@@ -1438,17 +1647,12 @@ out:
/* Start rlc autoload after psp recieved all the gfx firmware */
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
- ret = psp_rlc_autoload(psp);
+ ret = psp_rlc_autoload_start(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
return ret;
}
}
-#if 0
- /* check if firmware loaded sucessfully */
- if (!amdgpu_psp_check_fw_loading_status(adev, i))
- return -EINVAL;
-#endif
}
return 0;
@@ -1806,19 +2010,110 @@ int psp_ring_cmd_submit(struct psp_context *psp,
return 0;
}
-static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
- enum AMDGPU_UCODE_ID ucode_type)
+int psp_init_asd_microcode(struct psp_context *psp,
+ const char *chip_name)
{
- struct amdgpu_firmware_info *ucode = NULL;
+ struct amdgpu_device *adev = psp->adev;
+ char fw_name[30];
+ const struct psp_firmware_header_v1_0 *asd_hdr;
+ int err = 0;
- if (!adev->firmware.fw_size)
- return false;
+ if (!chip_name) {
+ dev_err(adev->dev, "invalid chip name for asd microcode\n");
+ return -EINVAL;
+ }
- ucode = &adev->firmware.ucode[ucode_type];
- if (!ucode->fw || !ucode->ucode_size)
- return false;
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+ err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ if (err)
+ goto out;
+
+ asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+ adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
+ adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+ le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+ return 0;
+out:
+ dev_err(adev->dev, "fail to initialize asd microcode\n");
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
+ return err;
+}
+
+int psp_init_sos_microcode(struct psp_context *psp,
+ const char *chip_name)
+{
+ struct amdgpu_device *adev = psp->adev;
+ char fw_name[30];
+ const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
+ const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
+ int err = 0;
+
+ if (!chip_name) {
+ dev_err(adev->dev, "invalid chip name for sos microcode\n");
+ return -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
+ err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->psp.sos_fw);
+ if (err)
+ goto out;
+
+ sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+ amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
+
+ switch (sos_hdr->header.header_version_major) {
+ case 1:
+ adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+ adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
+ adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
+ adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
+ adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
+ le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
+ adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr->sos_offset_bytes);
+ if (sos_hdr->header.header_version_minor == 1) {
+ sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
+ adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
+ adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
+ adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
+ adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
+ }
+ if (sos_hdr->header.header_version_minor == 2) {
+ sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
+ adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
+ adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
+ }
+ break;
+ default:
+ dev_err(adev->dev,
+ "unsupported psp sos firmware\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_err(adev->dev,
+ "failed to init sos firmware\n");
+ release_firmware(adev->psp.sos_fw);
+ adev->psp.sos_fw = NULL;
- return psp_compare_sram_data(&adev->psp, ucode, ucode_type);
+ return err;
}
static int psp_set_clockgating_state(void *handle,
@@ -1957,16 +2252,6 @@ static void psp_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
}
-static const struct amdgpu_psp_funcs psp_funcs = {
- .check_fw_loading_status = psp_check_fw_loading_status,
-};
-
-static void psp_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->firmware.funcs)
- adev->firmware.funcs = &psp_funcs;
-}
-
const struct amdgpu_ip_block_version psp_v3_1_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_PSP,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 297435c0c7c1..2a56ad996d83 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -93,22 +93,8 @@ struct psp_funcs
enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type);
- bool (*compare_sram_data)(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
- int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id);
- int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id);
- int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
- struct psp_xgmi_topology_info *topology);
- int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
- struct psp_xgmi_topology_info *topology);
- bool (*support_vmr_ring)(struct psp_context *psp);
- int (*ras_trigger_error)(struct psp_context *psp,
- struct ta_ras_trigger_error_input *info);
- int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
- int (*rlc_autoload_start)(struct psp_context *psp);
int (*mem_training_init)(struct psp_context *psp);
void (*mem_training_fini)(struct psp_context *psp);
int (*mem_training)(struct psp_context *psp, uint32_t ops);
@@ -161,6 +147,7 @@ struct psp_hdcp_context {
struct amdgpu_bo *hdcp_shared_bo;
uint64_t hdcp_shared_mc_addr;
void *hdcp_shared_buf;
+ struct mutex mutex;
};
struct psp_dtm_context {
@@ -169,6 +156,7 @@ struct psp_dtm_context {
struct amdgpu_bo *dtm_shared_bo;
uint64_t dtm_shared_mc_addr;
void *dtm_shared_buf;
+ struct mutex mutex;
};
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
@@ -306,8 +294,6 @@ struct amdgpu_psp_funcs {
#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
-#define psp_compare_sram_data(psp, ucode, type) \
- (psp)->funcs->compare_sram_data((psp), (ucode), (type))
#define psp_init_microcode(psp) \
((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0)
#define psp_bootloader_load_kdb(psp) \
@@ -318,22 +304,8 @@ struct amdgpu_psp_funcs {
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
-#define psp_support_vmr_ring(psp) \
- ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_node_id(psp, node_id) \
- ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL)
-#define psp_xgmi_get_hive_id(psp, hive_id) \
- ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL)
-#define psp_xgmi_get_topology_info(psp, num_device, topology) \
- ((psp)->funcs->xgmi_get_topology_info ? \
- (psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
-#define psp_xgmi_set_topology_info(psp, num_device, topology) \
- ((psp)->funcs->xgmi_set_topology_info ? \
- (psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
-#define psp_rlc_autoload(psp) \
- ((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
#define psp_mem_training_init(psp) \
((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
#define psp_mem_training_fini(psp) \
@@ -341,15 +313,6 @@ struct amdgpu_psp_funcs {
#define psp_mem_training(psp, ops) \
((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
-#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
-
-#define psp_ras_trigger_error(psp, info) \
- ((psp)->funcs->ras_trigger_error ? \
- (psp)->funcs->ras_trigger_error((psp), (info)) : -EINVAL)
-#define psp_ras_cure_posion(psp, addr) \
- ((psp)->funcs->ras_cure_posion ? \
- (psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
-
#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
@@ -377,10 +340,21 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
int psp_xgmi_initialize(struct psp_context *psp);
int psp_xgmi_terminate(struct psp_context *psp);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
+int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
+int psp_xgmi_get_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology);
+int psp_xgmi_set_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable);
+int psp_ras_trigger_error(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info);
+
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
@@ -393,4 +367,8 @@ int psp_ring_cmd_submit(struct psp_context *psp,
uint64_t cmd_buf_mc_addr,
uint64_t fence_mc_addr,
int index);
+int psp_init_asd_microcode(struct psp_context *psp,
+ const char *chip_name);
+int psp_init_sos_microcode(struct psp_context *psp,
+ const char *chip_name);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ab379b44679c..50fe08bf2f72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -80,6 +80,20 @@ atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
+{
+ if (adev && amdgpu_ras_get_context(adev))
+ amdgpu_ras_get_context(adev)->error_query_ready = ready;
+}
+
+bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
+{
+ if (adev && amdgpu_ras_get_context(adev))
+ return amdgpu_ras_get_context(adev)->error_query_ready;
+
+ return false;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -281,8 +295,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
struct ras_debug_if data;
int ret = 0;
- if (amdgpu_ras_intr_triggered()) {
- DRM_WARN("RAS WARN: error injection currently inaccessible\n");
+ if (!amdgpu_ras_get_error_query_ready(adev)) {
+ dev_warn(adev->dev, "RAS WARN: error injection "
+ "currently inaccessible\n");
return size;
}
@@ -310,7 +325,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
/* umc ce/ue error injection for a bad page is not allowed */
if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
amdgpu_ras_check_bad_page(adev, data.inject.address)) {
- DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
+ dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked "
+ "as bad before error injection!\n",
data.inject.address);
break;
}
@@ -399,7 +415,7 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
.head = obj->head,
};
- if (amdgpu_ras_intr_triggered())
+ if (!amdgpu_ras_get_error_query_ready(obj->adev))
return snprintf(buf, PAGE_SIZE,
"Query currently inaccessible\n");
@@ -486,6 +502,29 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
}
/* obj end */
+void amdgpu_ras_parse_status_code(struct amdgpu_device* adev,
+ const char* invoke_type,
+ const char* block_name,
+ enum ta_ras_status ret)
+{
+ switch (ret) {
+ case TA_RAS_STATUS__SUCCESS:
+ return;
+ case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
+ dev_warn(adev->dev,
+ "RAS WARN: %s %s currently unavailable\n",
+ invoke_type,
+ block_name);
+ break;
+ default:
+ dev_err(adev->dev,
+ "RAS ERROR: %s %s error failed ret 0x%X\n",
+ invoke_type,
+ block_name,
+ ret);
+ }
+}
+
/* feature ctl begin */
static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
struct ras_common_if *head)
@@ -549,19 +588,23 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- union ta_ras_cmd_input info;
+ union ta_ras_cmd_input *info;
int ret;
if (!con)
return -EINVAL;
+ info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
if (!enable) {
- info.disable_features = (struct ta_ras_disable_features_input) {
+ info->disable_features = (struct ta_ras_disable_features_input) {
.block_id = amdgpu_ras_block_to_ta(head->block),
.error_type = amdgpu_ras_error_to_ta(head->type),
};
} else {
- info.enable_features = (struct ta_ras_enable_features_input) {
+ info->enable_features = (struct ta_ras_enable_features_input) {
.block_id = amdgpu_ras_block_to_ta(head->block),
.error_type = amdgpu_ras_error_to_ta(head->type),
};
@@ -570,26 +613,33 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
/* Do not enable if it is not allowed. */
WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
/* Are we alerady in that state we are going to set? */
- if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
- return 0;
+ if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
+ ret = 0;
+ goto out;
+ }
if (!amdgpu_ras_intr_triggered()) {
- ret = psp_ras_enable_features(&adev->psp, &info, enable);
+ ret = psp_ras_enable_features(&adev->psp, info, enable);
if (ret) {
- DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
- enable ? "enable":"disable",
- ras_block_str(head->block),
- ret);
+ amdgpu_ras_parse_status_code(adev,
+ enable ? "enable":"disable",
+ ras_block_str(head->block),
+ (enum ta_ras_status)ret);
if (ret == TA_RAS_STATUS__RESET_NEEDED)
- return -EAGAIN;
- return -EINVAL;
+ ret = -EAGAIN;
+ else
+ ret = -EINVAL;
+
+ goto out;
}
}
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
-
- return 0;
+ ret = 0;
+out:
+ kfree(info);
+ return ret;
}
/* Only used in device probe stage and called only once. */
@@ -618,7 +668,8 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
if (ret == -EINVAL) {
ret = __amdgpu_ras_feature_enable(adev, head, 1);
if (!ret)
- DRM_INFO("RAS INFO: %s setup object\n",
+ dev_info(adev->dev,
+ "RAS INFO: %s setup object\n",
ras_block_str(head->block));
}
} else {
@@ -744,17 +795,48 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
info->ce_count = obj->err_data.ce_count;
if (err_data.ce_count) {
- dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
- obj->err_data.ce_count, ras_block_str(info->head.block));
+ dev_info(adev->dev, "%ld correctable hardware errors "
+ "detected in %s block, no user "
+ "action is needed.\n",
+ obj->err_data.ce_count,
+ ras_block_str(info->head.block));
}
if (err_data.ue_count) {
- dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
- obj->err_data.ue_count, ras_block_str(info->head.block));
+ dev_info(adev->dev, "%ld uncorrectable hardware errors "
+ "detected in %s block\n",
+ obj->err_data.ue_count,
+ ras_block_str(info->head.block));
}
return 0;
}
+/* Trigger XGMI/WAFL error */
+int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
+ struct ta_ras_trigger_error_input *block_info)
+{
+ int ret;
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to disallow df cstate");
+
+ if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
+ dev_warn(adev->dev, "Failed to disallow XGMI power down");
+
+ ret = psp_ras_trigger_error(&adev->psp, block_info);
+
+ if (amdgpu_ras_intr_triggered())
+ return ret;
+
+ if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
+ dev_warn(adev->dev, "Failed to allow XGMI power down");
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to allow df cstate");
+
+ return ret;
+}
+
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -788,20 +870,22 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
break;
case AMDGPU_RAS_BLOCK__UMC:
case AMDGPU_RAS_BLOCK__MMHUB:
- case AMDGPU_RAS_BLOCK__XGMI_WAFL:
case AMDGPU_RAS_BLOCK__PCIE_BIF:
ret = psp_ras_trigger_error(&adev->psp, &block_info);
break;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
+ break;
default:
- DRM_INFO("%s error injection is not supported yet\n",
+ dev_info(adev->dev, "%s error injection is not supported yet\n",
ras_block_str(info->head.block));
ret = -EINVAL;
}
- if (ret)
- DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
- ras_block_str(info->head.block),
- ret);
+ amdgpu_ras_parse_status_code(adev,
+ "inject",
+ ras_block_str(info->head.block),
+ (enum ta_ras_status)ret);
return ret;
}
@@ -1430,9 +1514,10 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
/* Build list of devices to query RAS related errors */
- if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
device_list_handle = &hive->device_list;
- } else {
+ else {
+ INIT_LIST_HEAD(&device_list);
list_add_tail(&adev->gmc.xgmi.head, &device_list);
device_list_handle = &device_list;
}
@@ -1535,7 +1620,7 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
&data->bps[control->num_recs],
true,
save_count)) {
- DRM_ERROR("Failed to save EEPROM table data!");
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
@@ -1563,7 +1648,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
if (amdgpu_ras_eeprom_process_recods(control, bps, false,
control->num_recs)) {
- DRM_ERROR("Failed to load EEPROM table records!");
+ dev_err(adev->dev, "Failed to load EEPROM table records!");
ret = -EIO;
goto out;
}
@@ -1637,7 +1722,8 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL))
- DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ dev_warn(adev->dev, "RAS WARN: reserve vram for "
+ "retired page %llx fail\n", bp);
data->bps_bo[i] = bo;
data->last_reserved = i + 1;
@@ -1725,7 +1811,7 @@ free:
kfree(*data);
con->eh_data = NULL;
out:
- DRM_WARN("Failed to initialize ras recovery!\n");
+ dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
return ret;
}
@@ -1787,18 +1873,18 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
return;
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
- DRM_INFO("HBM ECC is active.\n");
+ dev_info(adev->dev, "HBM ECC is active.\n");
*hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
} else
- DRM_INFO("HBM ECC is not presented.\n");
+ dev_info(adev->dev, "HBM ECC is not presented.\n");
if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
- DRM_INFO("SRAM ECC is active.\n");
+ dev_info(adev->dev, "SRAM ECC is active.\n");
*hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
} else
- DRM_INFO("SRAM ECC is not presented.\n");
+ dev_info(adev->dev, "SRAM ECC is not presented.\n");
/* hw_supported needs to be aligned with RAS block mask. */
*hw_supported &= AMDGPU_RAS_BLOCK_MASK;
@@ -1855,7 +1941,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
if (amdgpu_ras_fs_init(adev))
goto fs_out;
- DRM_INFO("RAS INFO: ras initialized successfully, "
+ dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported);
return 0;
@@ -2037,7 +2123,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
return;
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
- DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
+ dev_info(adev->dev, "uncorrectable hardware error"
+ "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
amdgpu_ras_reset_gpu(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 55c3eceb390d..e7df5d8429f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -334,6 +334,8 @@ struct amdgpu_ras {
uint32_t flags;
bool reboot;
struct amdgpu_ras_eeprom_control eeprom_control;
+
+ bool error_query_ready;
};
struct ras_fs_data {
@@ -629,4 +631,6 @@ static inline void amdgpu_ras_intr_cleared(void)
void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a7e1d0425ed0..13ea8ebc421c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -162,11 +162,13 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned max_dw, struct amdgpu_irq_src *irq_src,
- unsigned irq_type)
+ unsigned int max_dw, struct amdgpu_irq_src *irq_src,
+ unsigned int irq_type, unsigned int hw_prio)
{
int r, i;
int sched_hw_submission = amdgpu_sched_hw_submission;
+ u32 *num_sched;
+ u32 hw_ip;
/* Set the hw submission limit higher for KIQ because
* it's used for a number of gfx/compute tasks by both
@@ -258,6 +260,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->priority = DRM_SCHED_PRIORITY_NORMAL;
mutex_init(&ring->priority_mutex);
+ if (!ring->no_scheduler) {
+ hw_ip = ring->funcs->type;
+ num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+ adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
+ &ring->sched;
+ }
+
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 9a443013d70d..be218754629a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -30,11 +30,15 @@
/* max number of rings */
#define AMDGPU_MAX_RINGS 28
+#define AMDGPU_MAX_HWIP_RINGS 8
#define AMDGPU_MAX_GFX_RINGS 2
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3
#define AMDGPU_MAX_UVD_ENC_RINGS 2
+#define AMDGPU_RING_PRIO_DEFAULT 1
+#define AMDGPU_RING_PRIO_MAX AMDGPU_GFX_PIPE_PRIO_MAX
+
/* some special values for the owner field */
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
#define AMDGPU_FENCE_OWNER_VM ((void *)1ul)
@@ -46,17 +50,30 @@
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
+#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
+
enum amdgpu_ring_type {
- AMDGPU_RING_TYPE_GFX,
- AMDGPU_RING_TYPE_COMPUTE,
- AMDGPU_RING_TYPE_SDMA,
- AMDGPU_RING_TYPE_UVD,
- AMDGPU_RING_TYPE_VCE,
- AMDGPU_RING_TYPE_KIQ,
- AMDGPU_RING_TYPE_UVD_ENC,
- AMDGPU_RING_TYPE_VCN_DEC,
- AMDGPU_RING_TYPE_VCN_ENC,
- AMDGPU_RING_TYPE_VCN_JPEG
+ AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX,
+ AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE,
+ AMDGPU_RING_TYPE_SDMA = AMDGPU_HW_IP_DMA,
+ AMDGPU_RING_TYPE_UVD = AMDGPU_HW_IP_UVD,
+ AMDGPU_RING_TYPE_VCE = AMDGPU_HW_IP_VCE,
+ AMDGPU_RING_TYPE_UVD_ENC = AMDGPU_HW_IP_UVD_ENC,
+ AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC,
+ AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC,
+ AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG,
+ AMDGPU_RING_TYPE_KIQ
+};
+
+enum amdgpu_ib_pool_type {
+ /* Normal submissions to the top of the pipeline. */
+ AMDGPU_IB_POOL_DELAYED,
+ /* Immediate submissions to the bottom of the pipeline. */
+ AMDGPU_IB_POOL_IMMEDIATE,
+ /* Direct submission to the ring buffer during init and reset. */
+ AMDGPU_IB_POOL_DIRECT,
+
+ AMDGPU_IB_POOL_MAX
};
struct amdgpu_device;
@@ -65,6 +82,11 @@ struct amdgpu_ib;
struct amdgpu_cs_parser;
struct amdgpu_job;
+struct amdgpu_sched {
+ u32 num_scheds;
+ struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS];
+};
+
/*
* Fences.
*/
@@ -96,7 +118,8 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
unsigned flags);
-int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
+ uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
@@ -159,17 +182,20 @@ struct amdgpu_ring_funcs {
void (*end_use)(struct amdgpu_ring *ring);
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
- void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
+ void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask);
void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
- void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+ void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
+ bool secure);
/* Try to soft recover the ring to make the fence signal */
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
int (*preempt_ib)(struct amdgpu_ring *ring);
+ void (*emit_mem_sync)(struct amdgpu_ring *ring);
};
struct amdgpu_ring {
@@ -214,12 +240,12 @@ struct amdgpu_ring {
unsigned vm_inv_eng;
struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
+ bool no_scheduler;
atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
- bool has_high_prio;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
@@ -241,11 +267,11 @@ struct amdgpu_ring {
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
-#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
+#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
-#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
+#define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
@@ -257,8 +283,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned ring_size, struct amdgpu_irq_src *irq_src,
- unsigned irq_type);
+ unsigned int ring_size, struct amdgpu_irq_src *irq_src,
+ unsigned int irq_type, unsigned int prio);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t val0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 4b352206354b..e5b8fb8e75c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -61,8 +61,6 @@ struct amdgpu_sdma_ras_funcs {
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
- struct drm_gpu_scheduler *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES];
- uint32_t num_sdma_sched;
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
struct amdgpu_irq_src ecc_irq;
@@ -91,7 +89,8 @@ struct amdgpu_buffer_funcs {
/* dst addr in bytes */
uint64_t dst_offset,
/* number of byte to transfer */
- uint32_t byte_count);
+ uint32_t byte_count,
+ bool tmz);
/* maximum bytes in a single operation */
uint32_t fill_max_bytes;
@@ -109,7 +108,7 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
-#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
+#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
struct amdgpu_sdma_instance *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index b86392253696..b87ca171986a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -249,6 +249,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue;
+ /* Never sync to VM updates either. */
+ if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
+ owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ continue;
+
/* Ignore fences depending on the sync mode */
switch (mode) {
case AMDGPU_SYNC_ALWAYS:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b158230af8db..2f4d5ca9894f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -44,7 +44,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
+ n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
if (adev->rings[i])
n -= adev->rings[i]->ring_size;
@@ -124,7 +124,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]);
r = amdgpu_copy_buffer(ring, gart_addr, vram_addr,
- size, NULL, &fence, false, false);
+ size, NULL, &fence, false, false, false);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -170,7 +170,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(vram_obj);
r = amdgpu_copy_buffer(ring, vram_addr, gart_addr,
- size, NULL, &fence, false, false);
+ size, NULL, &fence, false, false, false);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 63e734a125fb..5da20fc166d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -35,7 +35,7 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
-TRACE_EVENT(amdgpu_mm_rreg,
+TRACE_EVENT(amdgpu_device_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
TP_STRUCT__entry(
@@ -54,7 +54,7 @@ TRACE_EVENT(amdgpu_mm_rreg,
(unsigned long)__entry->value)
);
-TRACE_EVENT(amdgpu_mm_wreg,
+TRACE_EVENT(amdgpu_device_wreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
TP_STRUCT__entry(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6309ff72bd78..e59c01a83dac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -62,11 +62,6 @@
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
-static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem, unsigned num_pages,
- uint64_t offset, unsigned window,
- struct amdgpu_ring *ring,
- uint64_t *addr);
/**
* amdgpu_init_mem_type - Initialize a memory manager for a specific type of
@@ -277,7 +272,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
*
*/
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
- unsigned long *offset)
+ uint64_t *offset)
{
struct drm_mm_node *mm_node = mem->mm_node;
@@ -289,91 +284,191 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
}
/**
+ * amdgpu_ttm_map_buffer - Map memory into the GART windows
+ * @bo: buffer object to map
+ * @mem: memory object to map
+ * @mm_node: drm_mm node object to map
+ * @num_pages: number of pages to map
+ * @offset: offset into @mm_node where to start
+ * @window: which GART window to use
+ * @ring: DMA ring to use for the copy
+ * @tmz: if we should setup a TMZ enabled mapping
+ * @addr: resulting address inside the MC address space
+ *
+ * Setup one of the GART windows to access a specific piece of memory or return
+ * the physical address for local memory.
+ */
+static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem,
+ struct drm_mm_node *mm_node,
+ unsigned num_pages, uint64_t offset,
+ unsigned window, struct amdgpu_ring *ring,
+ bool tmz, uint64_t *addr)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ unsigned num_dw, num_bytes;
+ struct dma_fence *fence;
+ uint64_t src_addr, dst_addr;
+ void *cpu_addr;
+ uint64_t flags;
+ unsigned int i;
+ int r;
+
+ BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+ AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+
+ /* Map only what can't be accessed directly */
+ if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
+ *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
+ return 0;
+ }
+
+ *addr = adev->gmc.gart_start;
+ *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE;
+ *addr += offset & ~PAGE_MASK;
+
+ num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+ num_bytes = num_pages * 8;
+
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+ AMDGPU_IB_POOL_DELAYED, &job);
+ if (r)
+ return r;
+
+ src_addr = num_dw * 4;
+ src_addr += job->ibs[0].gpu_addr;
+
+ dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+ dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+ dst_addr, num_bytes, false);
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
+ if (tmz)
+ flags |= AMDGPU_PTE_TMZ;
+
+ cpu_addr = &job->ibs[0].ptr[num_dw];
+
+ if (mem->mem_type == TTM_PL_TT) {
+ struct ttm_dma_tt *dma;
+ dma_addr_t *dma_address;
+
+ dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
+ dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+ r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+ cpu_addr);
+ if (r)
+ goto error_free;
+ } else {
+ dma_addr_t dma_address;
+
+ dma_address = (mm_node->start << PAGE_SHIFT) + offset;
+ dma_address += adev->vm_manager.vram_base_offset;
+
+ for (i = 0; i < num_pages; ++i) {
+ r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
+ &dma_address, flags, cpu_addr);
+ if (r)
+ goto error_free;
+
+ dma_address += PAGE_SIZE;
+ }
+ }
+
+ r = amdgpu_job_submit(job, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+ if (r)
+ goto error_free;
+
+ dma_fence_put(fence);
+
+ return r;
+
+error_free:
+ amdgpu_job_free(job);
+ return r;
+}
+
+/**
* amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ * @adev: amdgpu device
+ * @src: buffer/address where to read from
+ * @dst: buffer/address where to write to
+ * @size: number of bytes to copy
+ * @tmz: if a secure copy should be used
+ * @resv: resv object to sync to
+ * @f: Returns the last fence if multiple jobs are submitted.
*
* The function copies @size bytes from {src->mem + src->offset} to
* {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
* move and different for a BO to BO copy.
*
- * @f: Returns the last fence if multiple jobs are submitted.
*/
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- struct amdgpu_copy_mem *src,
- struct amdgpu_copy_mem *dst,
- uint64_t size,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
struct dma_resv *resv,
struct dma_fence **f)
{
+ const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE);
+
+ uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *src_mm, *dst_mm;
- uint64_t src_node_start, dst_node_start, src_node_size,
- dst_node_size, src_page_offset, dst_page_offset;
struct dma_fence *fence = NULL;
int r = 0;
- const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
- AMDGPU_GPU_PAGE_SIZE);
if (!adev->mman.buffer_funcs_enabled) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
- src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
- src->offset;
- src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
- src_page_offset = src_node_start & (PAGE_SIZE - 1);
+ src_offset = src->offset;
+ src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
+ src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
- dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
- dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
- dst->offset;
- dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
- dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+ dst_offset = dst->offset;
+ dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
+ dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
mutex_lock(&adev->mman.gtt_window_lock);
while (size) {
- unsigned long cur_size;
- uint64_t from = src_node_start, to = dst_node_start;
+ uint32_t src_page_offset = src_offset & ~PAGE_MASK;
+ uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
struct dma_fence *next;
+ uint32_t cur_size;
+ uint64_t from, to;
/* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
* begins at an offset, then adjust the size accordingly
*/
- cur_size = min3(min(src_node_size, dst_node_size), size,
- GTT_MAX_BYTES);
- if (cur_size + src_page_offset > GTT_MAX_BYTES ||
- cur_size + dst_page_offset > GTT_MAX_BYTES)
- cur_size -= max(src_page_offset, dst_page_offset);
-
- /* Map only what needs to be accessed. Map src to window 0 and
- * dst to window 1
- */
- if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
- r = amdgpu_map_buffer(src->bo, src->mem,
- PFN_UP(cur_size + src_page_offset),
- src_node_start, 0, ring,
- &from);
- if (r)
- goto error;
- /* Adjust the offset because amdgpu_map_buffer returns
- * start of mapped page
- */
- from += src_page_offset;
- }
+ cur_size = max(src_page_offset, dst_page_offset);
+ cur_size = min(min3(src_node_size, dst_node_size, size),
+ (uint64_t)(GTT_MAX_BYTES - cur_size));
+
+ /* Map src to window 0 and dst to window 1. */
+ r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
+ PFN_UP(cur_size + src_page_offset),
+ src_offset, 0, ring, tmz, &from);
+ if (r)
+ goto error;
- if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
- r = amdgpu_map_buffer(dst->bo, dst->mem,
- PFN_UP(cur_size + dst_page_offset),
- dst_node_start, 1, ring,
- &to);
- if (r)
- goto error;
- to += dst_page_offset;
- }
+ r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
+ PFN_UP(cur_size + dst_page_offset),
+ dst_offset, 1, ring, tmz, &to);
+ if (r)
+ goto error;
r = amdgpu_copy_buffer(ring, from, to, cur_size,
- resv, &next, false, true);
+ resv, &next, false, true, tmz);
if (r)
goto error;
@@ -386,23 +481,20 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
src_node_size -= cur_size;
if (!src_node_size) {
- src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
- src->mem);
- src_node_size = (src_mm->size << PAGE_SHIFT);
- src_page_offset = 0;
+ ++src_mm;
+ src_node_size = src_mm->size << PAGE_SHIFT;
+ src_offset = 0;
} else {
- src_node_start += cur_size;
- src_page_offset = src_node_start & (PAGE_SIZE - 1);
+ src_offset += cur_size;
}
+
dst_node_size -= cur_size;
if (!dst_node_size) {
- dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
- dst->mem);
- dst_node_size = (dst_mm->size << PAGE_SHIFT);
- dst_page_offset = 0;
+ ++dst_mm;
+ dst_node_size = dst_mm->size << PAGE_SHIFT;
+ dst_offset = 0;
} else {
- dst_node_start += cur_size;
- dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+ dst_offset += cur_size;
}
}
error:
@@ -425,6 +517,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_copy_mem src, dst;
struct dma_fence *fence = NULL;
int r;
@@ -438,14 +531,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
new_mem->num_pages << PAGE_SHIFT,
+ amdgpu_bo_encrypted(abo),
bo->base.resv, &fence);
if (r)
goto error;
/* clear the space being freed */
if (old_mem->mem_type == TTM_PL_VRAM &&
- (ttm_to_amdgpu_bo(bo)->flags &
- AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
+ (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
struct dma_fence *wipe_fence = NULL;
r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
@@ -742,8 +835,8 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
+ uint64_t offset = (page_offset << PAGE_SHIFT);
struct drm_mm_node *mm;
- unsigned long offset = (page_offset << PAGE_SHIFT);
mm = amdgpu_find_mm_node(&bo->mem, &offset);
return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
@@ -766,18 +859,6 @@ struct amdgpu_ttm_tt {
};
#ifdef CONFIG_DRM_AMDGPU_USERPTR
-/* flags used by HMM internal, not related to CPU/GPU PTE flags */
-static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
- (1 << 0), /* HMM_PFN_VALID */
- (1 << 1), /* HMM_PFN_WRITE */
-};
-
-static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
- 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
- 0, /* HMM_PFN_NONE */
- 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
-};
-
/**
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
* memory and start HMM tracking CPU page table update
@@ -816,23 +897,20 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
goto out;
}
range->notifier = &bo->notifier;
- range->flags = hmm_range_flags;
- range->values = hmm_range_values;
- range->pfn_shift = PAGE_SHIFT;
range->start = bo->notifier.interval_tree.start;
range->end = bo->notifier.interval_tree.last + 1;
- range->default_flags = hmm_range_flags[HMM_PFN_VALID];
+ range->default_flags = HMM_PFN_REQ_FAULT;
if (!amdgpu_ttm_tt_is_readonly(ttm))
- range->default_flags |= range->flags[HMM_PFN_WRITE];
+ range->default_flags |= HMM_PFN_REQ_WRITE;
- range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns),
- GFP_KERNEL);
- if (unlikely(!range->pfns)) {
+ range->hmm_pfns = kvmalloc_array(ttm->num_pages,
+ sizeof(*range->hmm_pfns), GFP_KERNEL);
+ if (unlikely(!range->hmm_pfns)) {
r = -ENOMEM;
goto out_free_ranges;
}
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, start);
if (unlikely(!vma || start < vma->vm_start)) {
r = -EFAULT;
@@ -843,36 +921,32 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
r = -EPERM;
goto out_unlock;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
r = hmm_range_fault(range);
- up_read(&mm->mmap_sem);
- if (unlikely(r <= 0)) {
+ mmap_read_unlock(mm);
+ if (unlikely(r)) {
/*
* FIXME: This timeout should encompass the retry from
* mmu_interval_read_retry() as well.
*/
- if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout))
+ if (r == -EBUSY && !time_after(jiffies, timeout))
goto retry;
goto out_free_pfns;
}
- for (i = 0; i < ttm->num_pages; i++) {
- /* FIXME: The pages cannot be touched outside the notifier_lock */
- pages[i] = hmm_device_entry_to_page(range, range->pfns[i]);
- if (unlikely(!pages[i])) {
- pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
- i, range->pfns[i]);
- r = -ENOMEM;
-
- goto out_free_pfns;
- }
- }
+ /*
+ * Due to default_flags, all pages are HMM_PFN_VALID or
+ * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
+ * the notifier_lock, and mmu_interval_read_retry() must be done first.
+ */
+ for (i = 0; i < ttm->num_pages; i++)
+ pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
gtt->range = range;
mmput(mm);
@@ -880,9 +954,9 @@ retry:
return 0;
out_unlock:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_free_pfns:
- kvfree(range->pfns);
+ kvfree(range->hmm_pfns);
out_free_ranges:
kfree(range);
out:
@@ -907,7 +981,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
gtt->userptr, ttm->num_pages);
- WARN_ONCE(!gtt->range || !gtt->range->pfns,
+ WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
"No user pages to check\n");
if (gtt->range) {
@@ -917,7 +991,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
*/
r = mmu_interval_read_retry(gtt->range->notifier,
gtt->range->notifier_seq);
- kvfree(gtt->range->pfns);
+ kvfree(gtt->range->hmm_pfns);
kfree(gtt->range);
gtt->range = NULL;
}
@@ -1008,8 +1082,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) {
if (ttm->pages[i] !=
- hmm_device_entry_to_page(gtt->range,
- gtt->range->pfns[i]))
+ hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
break;
}
@@ -1027,6 +1100,9 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
+ if (amdgpu_bo_encrypted(abo))
+ flags |= AMDGPU_PTE_TMZ;
+
if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
uint64_t page_idx = 1;
@@ -1539,6 +1615,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
switch (bo->mem.mem_type) {
case TTM_PL_TT:
+ if (amdgpu_bo_is_amdgpu_bo(bo) &&
+ amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
+ return false;
return true;
case TTM_PL_VRAM:
@@ -1587,8 +1666,9 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
if (bo->mem.mem_type != TTM_PL_VRAM)
return -EIO;
- nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
- pos = (nodes->start << PAGE_SHIFT) + offset;
+ pos = offset;
+ nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
+ pos += (nodes->start << PAGE_SHIFT);
while (len && pos < adev->gmc.mc_vram_size) {
uint64_t aligned_pos = pos & ~(uint64_t)3;
@@ -1857,17 +1937,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
/*
- * reserve one TMR (64K) memory at the top of VRAM which holds
+ * reserve TMR memory at the top of VRAM which holds
* IP Discovery data and is protected by PSP.
*/
- r = amdgpu_bo_create_kernel_at(adev,
- adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
- DISCOVERY_TMR_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->discovery_memory,
- NULL);
- if (r)
- return r;
+ if (adev->discovery_tmr_size > 0) {
+ r = amdgpu_bo_create_kernel_at(adev,
+ adev->gmc.real_vram_size - adev->discovery_tmr_size,
+ adev->discovery_tmr_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->discovery_memory,
+ NULL);
+ if (r)
+ return r;
+ }
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
@@ -2015,75 +2097,14 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
}
-static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem, unsigned num_pages,
- uint64_t offset, unsigned window,
- struct amdgpu_ring *ring,
- uint64_t *addr)
-{
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- struct amdgpu_device *adev = ring->adev;
- struct ttm_tt *ttm = bo->ttm;
- struct amdgpu_job *job;
- unsigned num_dw, num_bytes;
- dma_addr_t *dma_address;
- struct dma_fence *fence;
- uint64_t src_addr, dst_addr;
- uint64_t flags;
- int r;
-
- BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
- AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
-
- *addr = adev->gmc.gart_start;
- *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
- AMDGPU_GPU_PAGE_SIZE;
-
- num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
- num_bytes = num_pages * 8;
-
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
- if (r)
- return r;
-
- src_addr = num_dw * 4;
- src_addr += job->ibs[0].gpu_addr;
-
- dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
- dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
- amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
- dst_addr, num_bytes);
-
- amdgpu_ring_pad_ib(ring, &job->ibs[0]);
- WARN_ON(job->ibs[0].length_dw > num_dw);
-
- dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
- flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
- r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
- &job->ibs[0].ptr[num_dw]);
- if (r)
- goto error_free;
-
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r)
- goto error_free;
-
- dma_fence_put(fence);
-
- return r;
-
-error_free:
- amdgpu_job_free(job);
- return r;
-}
-
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
- bool vm_needs_flush)
+ bool vm_needs_flush, bool tmz)
{
+ enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -2101,7 +2122,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
if (r)
return r;
@@ -2123,7 +2144,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
- dst_offset, cur_size_in_bytes);
+ dst_offset, cur_size_in_bytes, tmz);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
@@ -2190,7 +2211,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
/* for IB padding */
num_dw += 64;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+ &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index bd05bbb4878d..4351d02644a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -24,8 +24,9 @@
#ifndef __AMDGPU_TTM_H__
#define __AMDGPU_TTM_H__
-#include "amdgpu.h"
+#include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h>
+#include "amdgpu.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
@@ -74,6 +75,15 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+ struct ttm_mem_reg *mem,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table **sgt);
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table *sgt);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
@@ -87,11 +97,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
- bool vm_needs_flush);
+ bool vm_needs_flush, bool tmz);
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- struct amdgpu_copy_mem *src,
- struct amdgpu_copy_mem *dst,
- uint64_t size,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
struct dma_resv *resv,
struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 9ef312428231..65bb25e31d45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -403,8 +403,8 @@ FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos_fw_version);
FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
-FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_fw_version);
-FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_fw_version);
+FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_ras_ucode_version);
+FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_xgmi_ucode_version);
FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 9dd51f0d2c11..af1b1ccf613c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -110,7 +110,8 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
* even NOMEM error is encountered
*/
if(!err_data->err_addr)
- DRM_WARN("Failed to alloc memory for umc error address record!\n");
+ dev_warn(adev->dev, "Failed to alloc memory for "
+ "umc error address record!\n");
/* umc query_ras_error_address is also responsible for clearing
* error status
@@ -120,10 +121,14 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
/* only uncorrectable error needs gpu reset */
if (err_data->ue_count) {
+ dev_info(adev->dev, "%ld uncorrectable hardware errors "
+ "detected in UMC block\n",
+ err_data->ue_count);
+
if (err_data->err_addr_cnt &&
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
err_data->err_addr_cnt))
- DRM_WARN("Failed to add ras bad page!\n");
+ dev_warn(adev->dev, "Failed to add ras bad page!\n");
amdgpu_ras_reset_gpu(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 5fd32ad1c575..5100ebe8858d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1056,7 +1056,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err;
}
- r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED, &job);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 59ddba137946..ecaa2d7483b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -446,7 +446,8 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -524,7 +525,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence *f = NULL;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ direct ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index a41272fbcba2..2badbc0355f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -56,19 +56,23 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{
- unsigned long bo_size;
+ unsigned long bo_size, fw_shared_bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned char fw_check;
int i, r;
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
+ mutex_init(&adev->vcn.vcn_pg_lock);
+ atomic_set(&adev->vcn.total_submission_cnt, 0);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
switch (adev->asic_type) {
case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
fw_name = FIRMWARE_RAVEN2;
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
fw_name = FIRMWARE_PICASSO;
else
fw_name = FIRMWARE_RAVEN;
@@ -178,6 +182,17 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
return r;
}
}
+
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo,
+ &adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r);
+ return r;
+ }
+
+ fw_shared_bo_size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+ adev->vcn.inst[i].saved_shm_bo = kvmalloc(fw_shared_bo_size, GFP_KERNEL);
}
return 0;
@@ -192,6 +207,12 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
+
+ kvfree(adev->vcn.inst[j].saved_shm_bo);
+ amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo,
+ &adev->vcn.inst[j].fw_shared_gpu_addr,
+ (void **)&adev->vcn.inst[j].fw_shared_cpu_addr);
+
if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
&adev->vcn.inst[j].dpg_sram_gpu_addr,
@@ -210,6 +231,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
}
release_firmware(adev->vcn.fw);
+ mutex_destroy(&adev->vcn.vcn_pg_lock);
return 0;
}
@@ -236,6 +258,17 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
return -ENOMEM;
memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+
+ if (adev->vcn.inst[i].fw_shared_bo == NULL)
+ return 0;
+
+ if (!adev->vcn.inst[i].saved_shm_bo)
+ return -ENOMEM;
+
+ size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+ ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
+
+ memcpy_fromio(adev->vcn.inst[i].saved_shm_bo, ptr, size);
}
return 0;
}
@@ -273,6 +306,17 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
}
memset_io(ptr, 0, size);
}
+
+ if (adev->vcn.inst[i].fw_shared_bo == NULL)
+ return -EINVAL;
+
+ size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+ ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
+
+ if (adev->vcn.inst[i].saved_shm_bo != NULL)
+ memcpy_toio(ptr, adev->vcn.inst[i].saved_shm_bo, size);
+ else
+ memset_io(ptr, 0, size);
}
return 0;
}
@@ -295,7 +339,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
- if (fence[j])
+ if (fence[j] ||
+ unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
@@ -307,8 +352,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
fences += fence[j];
}
- if (fences == 0) {
- amdgpu_gfx_off_ctrl(adev, true);
+ if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
} else {
@@ -319,36 +363,46 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (set_clocks) {
- amdgpu_gfx_off_ctrl(adev, false);
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_UNGATE);
- }
+ atomic_inc(&adev->vcn.total_submission_cnt);
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ mutex_lock(&adev->vcn.vcn_pg_lock);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
- unsigned int fences = 0;
- unsigned int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
- }
- if (fences)
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+ atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
new_state.fw_based = VCN_DPG_STATE__PAUSE;
- else
- new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ } else {
+ unsigned int fences = 0;
+ unsigned int i;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
- new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
+
+ if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ }
adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
}
+ mutex_unlock(&adev->vcn.vcn_pg_lock);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
{
+ if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+
+ atomic_dec(&ring->adev->vcn.total_submission_cnt);
+
schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
}
@@ -390,7 +444,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ r = amdgpu_job_alloc_with_ib(adev, 64,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
goto err;
@@ -557,7 +612,8 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -610,7 +666,8 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 6fe057329de2..90aa12b22725 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -132,6 +132,13 @@
} \
} while (0)
+#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8)
+
+enum fw_queue_mode {
+ FW_QUEUE_RING_RESET = 1,
+ FW_QUEUE_DPG_HOLD_OFF = 2,
+};
+
enum engine_status_constants {
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0,
@@ -179,10 +186,15 @@ struct amdgpu_vcn_inst {
struct amdgpu_irq_src irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
+ struct amdgpu_bo *fw_shared_bo;
struct dpg_pause_state pause_state;
void *dpg_sram_cpu_addr;
uint64_t dpg_sram_gpu_addr;
uint32_t *dpg_sram_curr_addr;
+ atomic_t dpg_enc_submission_cnt;
+ void *fw_shared_cpu_addr;
+ uint64_t fw_shared_gpu_addr;
+ void *saved_shm_bo;
};
struct amdgpu_vcn {
@@ -196,16 +208,28 @@ struct amdgpu_vcn {
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal;
- struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
- struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
- uint32_t num_vcn_enc_sched;
- uint32_t num_vcn_dec_sched;
+ struct mutex vcn_pg_lock;
+ atomic_t total_submission_cnt;
unsigned harvest_config;
int (*pause_dpg_mode)(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
};
+struct amdgpu_fw_shared_multi_queue {
+ uint8_t decode_queue_mode;
+ uint8_t encode_generalpurpose_queue_mode;
+ uint8_t encode_lowlatency_queue_mode;
+ uint8_t encode_realtime_queue_mode;
+ uint8_t padding[4];
+};
+
+struct amdgpu_fw_shared {
+ uint32_t present_flag_0;
+ uint8_t pad[53];
+ struct amdgpu_fw_shared_multi_queue multi_queue;
+} __attribute__((__packed__));
+
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
int amdgpu_vcn_suspend(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index adc813cde8e2..f3b38c9e04ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -38,7 +38,8 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
void amdgpu_virt_init_setting(struct amdgpu_device *adev)
{
/* enable virtual display */
- adev->mode_info.num_crtc = 1;
+ if (adev->mode_info.num_crtc == 0)
+ adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
adev->cg_flags = 0;
@@ -59,7 +60,10 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
ref, mask);
- amdgpu_fence_emit_polling(ring, &seq);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
@@ -81,6 +85,9 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
return;
+failed_undo:
+ amdgpu_ring_undo(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq:
pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
}
@@ -152,6 +159,19 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
return 0;
}
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->req_init_data)
+ virt->ops->req_init_data(adev);
+
+ if (adev->virt.req_init_data_ver > 0)
+ DRM_INFO("host supports REQ_INIT_DATA handshake\n");
+ else
+ DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
+}
+
/**
* amdgpu_virt_wait_reset() - wait for reset gpu completed
* @amdgpu: amdgpu device.
@@ -287,3 +307,82 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
}
}
}
+
+void amdgpu_detect_virtualization(struct amdgpu_device *adev)
+{
+ uint32_t reg;
+
+ switch (adev->asic_type) {
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_ARCTURUS:
+ reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
+ break;
+ default: /* other chip doesn't support SRIOV */
+ reg = 0;
+ break;
+ }
+
+ if (reg & 1)
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+
+ if (reg & 0x80000000)
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+
+ if (!reg) {
+ if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
+ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+ }
+}
+
+bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
+{
+ return amdgpu_sriov_is_debug(adev) ? true : false;
+}
+
+bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
+{
+ return amdgpu_sriov_is_normal(adev) ? true : false;
+}
+
+int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
+{
+ if (!amdgpu_sriov_vf(adev) ||
+ amdgpu_virt_access_debugfs_is_kiq(adev))
+ return 0;
+
+ if (amdgpu_virt_access_debugfs_is_mmio(adev))
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
+}
+
+enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
+{
+ enum amdgpu_sriov_vf_mode mode;
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ mode = SRIOV_VF_MODE_ONE_VF;
+ else
+ mode = SRIOV_VF_MODE_MULTI_VF;
+ } else {
+ mode = SRIOV_VF_MODE_BARE_METAL;
+ }
+
+ return mode;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index f0128f745bd2..b90e822cebd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -30,6 +30,17 @@
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
+/* all asic after AI use this offset */
+#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
+/* tonga/fiji use this offset */
+#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
+
+enum amdgpu_sriov_vf_mode {
+ SRIOV_VF_MODE_BARE_METAL = 0,
+ SRIOV_VF_MODE_ONE_VF,
+ SRIOV_VF_MODE_MULTI_VF,
+};
+
struct amdgpu_mm_table {
struct amdgpu_bo *bo;
uint32_t *cpu_addr;
@@ -54,6 +65,7 @@ struct amdgpu_vf_error_buffer {
struct amdgpu_virt_ops {
int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
+ int (*req_init_data)(struct amdgpu_device *adev);
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
@@ -83,6 +95,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
/* VRAM LOST by GIM */
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+ /* MM bandwidth */
+ AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8,
/* PP ONE VF MODE in GIM */
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
};
@@ -256,6 +270,8 @@ struct amdgpu_virt {
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
uint32_t reg_access_mode;
+ int req_init_data_ver;
+ bool tdr_debug;
};
#define amdgpu_sriov_enabled(adev) \
@@ -287,6 +303,10 @@ static inline bool is_virtual_machine(void)
#define amdgpu_sriov_is_pp_one_vf(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
+#define amdgpu_sriov_is_debug(adev) \
+ ((!adev->in_gpu_reset) && adev->virt.tdr_debug)
+#define amdgpu_sriov_is_normal(adev) \
+ ((!adev->in_gpu_reset) && (!adev->virt.tdr_debug))
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
@@ -296,6 +316,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
@@ -303,4 +324,11 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_detect_virtualization(struct amdgpu_device *adev);
+
+bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
+int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
+void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev);
+
+enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6d9252a27916..7417754e9141 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -82,7 +82,7 @@ struct amdgpu_prt_cb {
struct dma_fence_cb cb;
};
-/**
+/*
* vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
* happens while holding this lock anywhere to prevent deadlocks when
* an MMU notifier runs in reclaim-FS context.
@@ -726,7 +726,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
- * @direct: use a direct update
+ * @immediate: use an immediate update
*
* Root PD needs to be reserved when calling this.
*
@@ -736,7 +736,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
- bool direct)
+ bool immediate)
{
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
@@ -795,7 +795,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
- params.direct = direct;
+ params.immediate = immediate;
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
@@ -850,11 +850,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
* @adev: amdgpu_device pointer
* @vm: requesting vm
* @level: the page table level
- * @direct: use a direct update
+ * @immediate: use a immediate update
* @bp: resulting BO allocation parameters
*/
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int level, bool direct,
+ int level, bool immediate,
struct amdgpu_bo_param *bp)
{
memset(bp, 0, sizeof(*bp));
@@ -870,7 +870,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
bp->type = ttm_bo_type_kernel;
- bp->no_wait_gpu = direct;
+ bp->no_wait_gpu = immediate;
if (vm->root.base.bo)
bp->resv = vm->root.base.bo->tbo.base.resv;
}
@@ -881,7 +881,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
* @cursor: Which page table to allocate
- * @direct: use a direct update
+ * @immediate: use an immediate update
*
* Make sure a specific page table or directory is allocated.
*
@@ -892,7 +892,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_vm_pt_cursor *cursor,
- bool direct)
+ bool immediate)
{
struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo_param bp;
@@ -913,7 +913,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (entry->base.bo)
return 0;
- amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
+ amdgpu_vm_bo_param(adev, vm, cursor->level, immediate, &bp);
r = amdgpu_bo_create(adev, &bp, &pt);
if (r)
@@ -925,7 +925,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
+ r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
if (r)
goto error_free_pt;
@@ -1276,7 +1276,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
- * @direct: submit directly to the paging queue
+ * @immediate: submit immediately to the paging queue
*
* Makes sure all directories are up to date.
*
@@ -1284,7 +1284,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
* 0 for success, error for failure.
*/
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, bool direct)
+ struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
int r;
@@ -1295,7 +1295,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
- params.direct = direct;
+ params.immediate = immediate;
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
@@ -1446,20 +1446,24 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt;
- if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+ if (!params->unlocked) {
/* make sure that the page tables covering the
* address range are actually allocated
*/
r = amdgpu_vm_alloc_pts(params->adev, params->vm,
- &cursor, params->direct);
+ &cursor, params->immediate);
if (r)
return r;
}
shift = amdgpu_vm_level_shift(adev, cursor.level);
parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
- if (adev->asic_type < CHIP_VEGA10 &&
- (flags & AMDGPU_PTE_VALID)) {
+ if (params->unlocked) {
+ /* Unlocked updates are only allowed on the leaves */
+ if (amdgpu_vm_pt_descendant(adev, &cursor))
+ continue;
+ } else if (adev->asic_type < CHIP_VEGA10 &&
+ (flags & AMDGPU_PTE_VALID)) {
/* No huge page support before GMC v9 */
if (cursor.level != AMDGPU_VM_PTB) {
if (!amdgpu_vm_pt_descendant(adev, &cursor))
@@ -1557,7 +1561,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
- * @direct: direct submission in a page fault
+ * @immediate: immediate submission in a page fault
+ * @unlocked: unlocked invalidation during MM callback
* @resv: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
@@ -1572,8 +1577,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, bool direct,
- struct dma_resv *resv,
+ struct amdgpu_vm *vm, bool immediate,
+ bool unlocked, struct dma_resv *resv,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
dma_addr_t *pages_addr,
@@ -1586,8 +1591,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
- params.direct = direct;
+ params.immediate = immediate;
params.pages_addr = pages_addr;
+ params.unlocked = unlocked;
/* Implicitly sync to command submissions in the same VM before
* unmapping. Sync to moving fences before mapping.
@@ -1603,11 +1609,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
goto error_unlock;
}
- if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
- struct amdgpu_bo *root = vm->root.base.bo;
+ if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
+ struct dma_fence *tmp = dma_fence_get_stub();
- if (!dma_fence_is_signaled(vm->last_direct))
- amdgpu_bo_fence(root, vm->last_direct, true);
+ amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true);
+ swap(vm->last_unlocked, tmp);
+ dma_fence_put(tmp);
}
r = vm->update_funcs->prepare(&params, resv, sync_mode);
@@ -1721,7 +1728,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
start, last, flags, addr,
dma_addr, fence);
if (r)
@@ -1784,6 +1791,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (bo) {
flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
+
+ if (amdgpu_bo_encrypted(bo))
+ flags |= AMDGPU_PTE_TMZ;
+
bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
} else {
flags = 0x0;
@@ -2014,7 +2025,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
mapping->start, mapping->last,
init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -2124,11 +2135,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
(bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
bo_va->is_xgmi = true;
- mutex_lock(&adev->vm_manager.lock_pstate);
/* Power up XGMI if it can be potentially used */
- if (++adev->vm_manager.xgmi_map_counter == 1)
- amdgpu_xgmi_set_pstate(adev, 1);
- mutex_unlock(&adev->vm_manager.lock_pstate);
+ amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
}
return bo_va;
@@ -2551,12 +2559,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
dma_fence_put(bo_va->last_pt_update);
- if (bo && bo_va->is_xgmi) {
- mutex_lock(&adev->vm_manager.lock_pstate);
- if (--adev->vm_manager.xgmi_map_counter == 0)
- amdgpu_xgmi_set_pstate(adev, 0);
- mutex_unlock(&adev->vm_manager.lock_pstate);
- }
+ if (bo && bo_va->is_xgmi)
+ amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
kfree(bo_va);
}
@@ -2585,7 +2589,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return false;
/* Don't evict VM page tables while they are updated */
- if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
+ if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
amdgpu_vm_eviction_unlock(bo_base->vm);
return false;
}
@@ -2762,7 +2766,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
if (timeout <= 0)
return timeout;
- return dma_fence_wait_timeout(vm->last_direct, true, timeout);
+ return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
}
/**
@@ -2798,7 +2802,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
/* create scheduler entities for page table updates */
- r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
+ r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
adev->vm_manager.vm_pte_scheds,
adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
@@ -2808,7 +2812,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
adev->vm_manager.vm_pte_scheds,
adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
- goto error_free_direct;
+ goto error_free_immediate;
vm->pte_support_ats = false;
vm->is_compute_context = false;
@@ -2834,7 +2838,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
- vm->last_direct = dma_fence_get_stub();
+ vm->last_unlocked = dma_fence_get_stub();
mutex_init(&vm->eviction_lock);
vm->evicting = false;
@@ -2888,11 +2892,11 @@ error_free_root:
vm->root.base.bo = NULL;
error_free_delayed:
- dma_fence_put(vm->last_direct);
+ dma_fence_put(vm->last_unlocked);
drm_sched_entity_destroy(&vm->delayed);
-error_free_direct:
- drm_sched_entity_destroy(&vm->direct);
+error_free_immediate:
+ drm_sched_entity_destroy(&vm->immediate);
return r;
}
@@ -2996,10 +3000,17 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
- if (vm->use_cpu_for_update)
+ if (vm->use_cpu_for_update) {
+ /* Sync with last SDMA update/clear before switching to CPU */
+ r = amdgpu_bo_sync_wait(vm->root.base.bo,
+ AMDGPU_FENCE_OWNER_UNDEFINED, true);
+ if (r)
+ goto free_idr;
+
vm->update_funcs = &amdgpu_vm_cpu_funcs;
- else
+ } else {
vm->update_funcs = &amdgpu_vm_sdma_funcs;
+ }
dma_fence_put(vm->last_update);
vm->last_update = NULL;
vm->is_compute_context = true;
@@ -3089,8 +3100,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pasid = 0;
}
- dma_fence_wait(vm->last_direct, false);
- dma_fence_put(vm->last_direct);
+ dma_fence_wait(vm->last_unlocked, false);
+ dma_fence_put(vm->last_unlocked);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
@@ -3107,7 +3118,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&root);
WARN_ON(vm->root.base.bo);
- drm_sched_entity_destroy(&vm->direct);
+ drm_sched_entity_destroy(&vm->immediate);
drm_sched_entity_destroy(&vm->delayed);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
@@ -3166,9 +3177,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
idr_init(&adev->vm_manager.pasid_idr);
spin_lock_init(&adev->vm_manager.pasid_lock);
-
- adev->vm_manager.xgmi_map_counter = 0;
- mutex_init(&adev->vm_manager.lock_pstate);
}
/**
@@ -3343,8 +3351,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
value = 0;
}
- r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
- flags, value, NULL, NULL);
+ r = amdgpu_vm_bo_update_mapping(adev, vm, true, false, NULL, addr,
+ addr + 1, flags, value, NULL, NULL);
if (r)
goto error_unlock;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 06fe30e1492d..c8e68d7890bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -54,6 +54,9 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_SYSTEM (1ULL << 1)
#define AMDGPU_PTE_SNOOPED (1ULL << 2)
+/* RV+ */
+#define AMDGPU_PTE_TMZ (1ULL << 3)
+
/* VI only */
#define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
@@ -203,9 +206,14 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm *vm;
/**
- * @direct: if changes should be made directly
+ * @immediate: if changes should be made immediately
*/
- bool direct;
+ bool immediate;
+
+ /**
+ * @unlocked: true if the root BO is not locked
+ */
+ bool unlocked;
/**
* @pages_addr:
@@ -271,11 +279,11 @@ struct amdgpu_vm {
struct dma_fence *last_update;
/* Scheduler entities for page table updates */
- struct drm_sched_entity direct;
+ struct drm_sched_entity immediate;
struct drm_sched_entity delayed;
- /* Last submission to the scheduler entities */
- struct dma_fence *last_direct;
+ /* Last unlocked submission to the scheduler entities */
+ struct dma_fence *last_unlocked;
unsigned int pasid;
/* dedicated to vm */
@@ -349,10 +357,6 @@ struct amdgpu_vm_manager {
*/
struct idr pasid_idr;
spinlock_t pasid_lock;
-
- /* counter of mapped memory through xgmi */
- uint32_t xgmi_map_counter;
- struct mutex lock_pstate;
};
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
@@ -380,7 +384,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, bool direct);
+ struct amdgpu_vm *vm, bool immediate);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index e38516304070..39c704a1fb0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -84,7 +84,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
pe += (unsigned long)amdgpu_bo_kptr(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
for (i = 0; i < count; i++) {
value = p->pages_addr ?
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index cf96c335b258..8d9c6feba660 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -61,10 +61,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{
+ enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+ : AMDGPU_IB_POOL_DELAYED;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r;
- r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
if (r)
return r;
@@ -90,11 +92,11 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
{
struct amdgpu_ib *ib = p->job->ibs;
struct drm_sched_entity *entity;
- struct dma_fence *f, *tmp;
struct amdgpu_ring *ring;
+ struct dma_fence *f;
int r;
- entity = p->direct ? &p->vm->direct : &p->vm->delayed;
+ entity = p->immediate ? &p->vm->immediate : &p->vm->delayed;
ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
WARN_ON(ib->length_dw == 0);
@@ -104,15 +106,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (r)
goto error;
- if (p->direct) {
- tmp = dma_fence_get(f);
- swap(p->vm->last_direct, tmp);
+ if (p->unlocked) {
+ struct dma_fence *tmp = dma_fence_get(f);
+
+ swap(p->vm->last_unlocked, f);
dma_fence_put(tmp);
} else {
- dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
+ amdgpu_bo_fence(p->vm->root.base.bo, f, true);
}
- if (fence && !p->direct)
+ if (fence && !p->immediate)
swap(*fence, f);
dma_fence_put(f);
return 0;
@@ -142,7 +145,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
- trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
+ trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
}
@@ -169,7 +172,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
struct amdgpu_ib *ib = p->job->ibs;
pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
count, incr);
@@ -198,6 +201,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
+ enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+ : AMDGPU_IB_POOL_DELAYED;
unsigned int i, ndw, nptes;
uint64_t *pte;
int r;
@@ -223,7 +228,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
- r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
+ &p->job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 82a3299e53c0..d399e5893170 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -22,6 +22,7 @@
* Authors: Christian König
*/
+#include <linux/dma-mapping.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "amdgpu_atomfirmware.h"
@@ -148,6 +149,15 @@ static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
amdgpu_mem_info_vram_vendor, NULL);
+static const struct attribute *amdgpu_vram_mgr_attributes[] = {
+ &dev_attr_mem_info_vram_total.attr,
+ &dev_attr_mem_info_vis_vram_total.attr,
+ &dev_attr_mem_info_vram_used.attr,
+ &dev_attr_mem_info_vis_vram_used.attr,
+ &dev_attr_mem_info_vram_vendor.attr,
+ NULL
+};
+
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
*
@@ -172,31 +182,9 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
man->priv = mgr;
/* Add the two VRAM-related sysfs files */
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vram_total\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vram_used\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
- return ret;
- }
+ ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
+ if (ret)
+ DRM_ERROR("Failed to register sysfs\n");
return 0;
}
@@ -219,11 +207,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
spin_unlock(&mgr->lock);
kfree(mgr);
man->priv = NULL;
- device_remove_file(adev->dev, &dev_attr_mem_info_vram_total);
- device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
- device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
- device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
- device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
+ sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
return 0;
}
@@ -459,6 +443,104 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
}
/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @mem: TTM memory object
+ * @dev: the other device
+ * @dir: dma direction
+ * @sgt: resulting sg table
+ *
+ * Allocate and fill a sg table from a VRAM allocation.
+ */
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+ struct ttm_mem_reg *mem,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table **sgt)
+{
+ struct drm_mm_node *node;
+ struct scatterlist *sg;
+ int num_entries = 0;
+ unsigned int pages;
+ int i, r;
+
+ *sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
+ if (!*sgt)
+ return -ENOMEM;
+
+ for (pages = mem->num_pages, node = mem->mm_node;
+ pages; pages -= node->size, ++node)
+ ++num_entries;
+
+ r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
+ if (r)
+ goto error_free;
+
+ for_each_sg((*sgt)->sgl, sg, num_entries, i)
+ sg->length = 0;
+
+ node = mem->mm_node;
+ for_each_sg((*sgt)->sgl, sg, num_entries, i) {
+ phys_addr_t phys = (node->start << PAGE_SHIFT) +
+ adev->gmc.aper_base;
+ size_t size = node->size << PAGE_SHIFT;
+ dma_addr_t addr;
+
+ ++node;
+ addr = dma_map_resource(dev, phys, size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ r = dma_mapping_error(dev, addr);
+ if (r)
+ goto error_unmap;
+
+ sg_set_page(sg, NULL, size, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = size;
+ }
+ return 0;
+
+error_unmap:
+ for_each_sg((*sgt)->sgl, sg, num_entries, i) {
+ if (!sg->length)
+ continue;
+
+ dma_unmap_resource(dev, sg->dma_address,
+ sg->length, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ sg_free_table(*sgt);
+
+error_free:
+ kfree(*sgt);
+ return r;
+}
+
+/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @sgt: sg table to free
+ *
+ * Free a previously allocate sg table.
+ */
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table *sgt)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ dma_unmap_resource(dev, sg->dma_address,
+ sg->length, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+/**
* amdgpu_vram_mgr_usage - how many bytes are used in this domain
*
* @man: TTM memory type manager
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 95b3327168ac..91837a991319 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -325,9 +325,18 @@ success:
static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive)
{
+ char node[10];
+ memset(node, 0, sizeof(node));
+
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
- sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
- sysfs_remove_link(hive->kobj, adev->ddev->unique);
+ device_remove_file(adev->dev, &dev_attr_xgmi_error);
+
+ if (adev != hive->adev)
+ sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
+
+ sprintf(node, "node%d", hive->number_devices);
+ sysfs_remove_link(hive->kobj, node);
+
}
@@ -373,7 +382,13 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
if (lock)
mutex_lock(&tmp->hive_lock);
- tmp->pstate = -1;
+ tmp->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
+ tmp->hi_req_gpu = NULL;
+ /*
+ * hive pstate on boot is high in vega20 so we have to go to low
+ * pstate on after boot.
+ */
+ tmp->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
mutex_unlock(&xgmi_mutex);
return tmp;
@@ -383,56 +398,59 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
int ret = 0;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
- struct amdgpu_device *tmp_adev;
- bool update_hive_pstate = true;
- bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20;
+ struct amdgpu_device *request_adev = hive->hi_req_gpu ?
+ hive->hi_req_gpu : adev;
+ bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
+ bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
- if (!hive)
+ /* fw bug so temporarily disable pstate switching */
+ return 0;
+
+ if (!hive || adev->asic_type != CHIP_VEGA20)
return 0;
mutex_lock(&hive->hive_lock);
- if (hive->pstate == pstate) {
- adev->pstate = is_high_pstate ? pstate : adev->pstate;
+ if (is_hi_req)
+ hive->hi_req_count++;
+ else
+ hive->hi_req_count--;
+
+ /*
+ * Vega20 only needs single peer to request pstate high for the hive to
+ * go high but all peers must request pstate low for the hive to go low
+ */
+ if (hive->pstate == pstate ||
+ (!is_hi_req && hive->hi_req_count && !init_low))
goto out;
- }
- dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
+ dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
- ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate);
+ ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
if (ret) {
- dev_err(adev->dev,
+ dev_err(request_adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
- adev->gmc.xgmi.node_id,
- adev->gmc.xgmi.hive_id, ret);
+ request_adev->gmc.xgmi.node_id,
+ request_adev->gmc.xgmi.hive_id, ret);
goto out;
}
- /* Update device pstate */
- adev->pstate = pstate;
-
- /*
- * Update the hive pstate only all devices of the hive
- * are in the same pstate
- */
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- if (tmp_adev->pstate != adev->pstate) {
- update_hive_pstate = false;
- break;
- }
- }
- if (update_hive_pstate || is_high_pstate)
+ if (init_low)
+ hive->pstate = hive->hi_req_count ?
+ hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
+ else {
hive->pstate = pstate;
-
+ hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
+ adev : NULL;
+ }
out:
mutex_unlock(&hive->hive_lock);
-
return ret;
}
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
- int ret = -EINVAL;
+ int ret;
/* Each psp need to set the latest topology */
ret = psp_xgmi_set_topology_info(&adev->psp,
@@ -507,9 +525,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
goto exit;
}
- /* Set default device pstate */
- adev->pstate = -1;
-
top_info = &adev->psp.xgmi_context.top_info;
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
@@ -577,14 +592,14 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
if (!hive)
return -EINVAL;
- if (!(hive->number_devices--)) {
+ task_barrier_rem_task(&hive->tb);
+ amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
+ mutex_unlock(&hive->hive_lock);
+
+ if(!(--hive->number_devices)){
amdgpu_xgmi_sysfs_destroy(adev, hive);
mutex_destroy(&hive->hive_lock);
mutex_destroy(&hive->reset_lock);
- } else {
- task_barrier_rem_task(&hive->tb);
- amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
- mutex_unlock(&hive->hive_lock);
}
return psp_xgmi_terminate(&adev->psp);
@@ -604,6 +619,8 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
adev->gmc.xgmi.num_physical_nodes == 0)
return 0;
+ amdgpu_xgmi_reset_ras_error_count(adev);
+
if (!adev->gmc.xgmi.ras_if) {
adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
if (!adev->gmc.xgmi.ras_if)
@@ -641,31 +658,34 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr)
{
- uint32_t df_inst_id;
- uint64_t dram_base_addr = 0;
- const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
-
- if ((!df_funcs) ||
- (!df_funcs->get_df_inst_id) ||
- (!df_funcs->get_dram_base_addr)) {
- dev_warn(adev->dev,
- "XGMI: relative phy_addr algorithm is not supported\n");
- return addr;
- }
-
- if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
- dev_warn(adev->dev,
- "failed to disable DF-Cstate, DF register may not be accessible\n");
- return addr;
- }
+ struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
+ return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
+}
- df_inst_id = df_funcs->get_df_inst_id(adev);
- dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
+static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
+{
+ WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
+ WREG32_PCIE(pcs_status_reg, 0);
+}
- if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
- dev_warn(adev->dev, "failed to enable DF-Cstate\n");
+void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
- return addr + dram_base_addr;
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
+ pcs_clear_status(adev,
+ xgmi_pcs_err_status_reg_arct[i]);
+ break;
+ case CHIP_VEGA20:
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
+ pcs_clear_status(adev,
+ xgmi_pcs_err_status_reg_vg20[i]);
+ break;
+ default:
+ break;
+ }
}
static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
@@ -758,6 +778,8 @@ int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
break;
}
+ amdgpu_xgmi_reset_ras_error_count(adev);
+
err_data->ue_count += ue_cnt;
err_data->ce_count += ce_cnt;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 4a92067fe595..6999eab16a72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -25,6 +25,7 @@
#include <drm/task_barrier.h>
#include "amdgpu_psp.h"
+
struct amdgpu_hive_info {
uint64_t hive_id;
struct list_head device_list;
@@ -33,8 +34,14 @@ struct amdgpu_hive_info {
struct kobject *kobj;
struct device_attribute dev_attr;
struct amdgpu_device *adev;
- int pstate; /*0 -- low , 1 -- high , -1 unknown*/
+ int hi_req_count;
+ struct amdgpu_device *hi_req_gpu;
struct task_barrier tb;
+ enum {
+ AMDGPU_XGMI_PSTATE_MIN,
+ AMDGPU_XGMI_PSTATE_MAX_VEGA20,
+ AMDGPU_XGMI_PSTATE_UNKNOWN
+ } pstate;
};
struct amdgpu_pcs_ras_field {
@@ -56,6 +63,7 @@ uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr);
int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
+void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index cae426c7c086..4cfc786699c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -54,6 +54,8 @@
#define PLL_INDEX 2
#define PLL_DATA 3
+#define ATOM_CMD_TIMEOUT_SEC 20
+
typedef struct {
struct atom_context *ctx;
uint32_t *ps, *ws;
@@ -744,8 +746,9 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
- if ((jiffies_to_msecs(cjiffies) > 10000)) {
- DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
+ if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) {
+ DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",
+ ATOM_CMD_TIMEOUT_SEC);
ctx->abort = true;
}
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 62635e58e45e..fe306d0f73f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1809,12 +1809,6 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
-static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
-{
- if (is_virtual_machine()) /* passthrough mode */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
@@ -2177,8 +2171,6 @@ static const struct amdgpu_ip_block_version cik_common_ip_block =
int cik_set_ip_blocks(struct amdgpu_device *adev)
{
- cik_detect_hw_virtualization(adev);
-
switch (adev->asic_type) {
case CHIP_BONAIRE:
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 580d3f93d670..20f108818b2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -320,8 +320,6 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
}
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -679,7 +677,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -980,7 +979,8 @@ static int cik_sdma_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -1313,7 +1313,8 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
ib->ptr[ib->length_dw++] = byte_count;
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index cee6e8a3ad9c..5f3f6ebfb387 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -450,7 +450,7 @@
# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_AQUIRE_MEM 0x58
+#define PACKET3_ACQUIRE_MEM 0x58
#define PACKET3_REWIND 0x59
#define PACKET3_LOAD_UCONFIG_REG 0x5E
#define PACKET3_LOAD_SH_REG 0x5F
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 2512e7ebfedf..e38744d06f4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2303,9 +2303,9 @@ static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_device *adev = crtc->dev->dev_private;
u32 tmp;
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
@@ -2319,10 +2319,10 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 0dde22db9848..2584ff74423b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2382,9 +2382,9 @@ static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_device *adev = crtc->dev->dev_private;
u32 tmp;
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
@@ -2398,10 +2398,10 @@ static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 84219534bd38..d05c39f9ae40 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2194,9 +2194,9 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
@@ -2211,10 +2211,10 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- CUR_CONTROL__CURSOR_EN_MASK |
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ CUR_CONTROL__CURSOR_EN_MASK |
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3a640702d7d1..ad0f8adb6a2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2205,9 +2205,9 @@ static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
@@ -2220,10 +2220,10 @@ static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- CUR_CONTROL__CURSOR_EN_MASK |
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ CUR_CONTROL__CURSOR_EN_MASK |
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 13e12be667fc..d5ff7b6331ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -172,8 +172,9 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ drm_crtc_vblank_off(crtc);
+ amdgpu_crtc->enabled = false;
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
@@ -286,7 +287,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
static const struct mode_size {
int w;
int h;
- } common_modes[17] = {
+ } common_modes[21] = {
{ 640, 480},
{ 720, 480},
{ 800, 600},
@@ -303,10 +304,14 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
{1680, 1050},
{1600, 1200},
{1920, 1080},
- {1920, 1200}
+ {1920, 1200},
+ {4096, 3112},
+ {3656, 2664},
+ {3840, 2160},
+ {4096, 2160},
};
- for (i = 0; i < 17; i++) {
+ for (i = 0; i < 21; i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
drm_mode_probed_add(connector, mode);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 5a1bd8ed1a6c..a7b8292cefee 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -686,58 +686,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
}
}
-static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev,
- uint32_t df_inst)
-{
- uint32_t base_addr_reg_val = 0;
- uint64_t base_addr = 0;
-
- base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 +
- df_inst * DF_3_6_SMN_REG_INST_DIST);
-
- if (REG_GET_FIELD(base_addr_reg_val,
- DF_CS_UMC_AON0_DramBaseAddress0,
- AddrRngVal) == 0) {
- DRM_WARN("address range not valid");
- return 0;
- }
-
- base_addr = REG_GET_FIELD(base_addr_reg_val,
- DF_CS_UMC_AON0_DramBaseAddress0,
- DramBaseAddr);
-
- return base_addr << 28;
-}
-
-static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev)
-{
- uint32_t xgmi_node_id = 0;
- uint32_t df_inst_id = 0;
-
- /* Walk through DF dst nodes to find current XGMI node */
- for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) {
-
- xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 +
- df_inst_id * DF_3_6_SMN_REG_INST_DIST);
- xgmi_node_id = REG_GET_FIELD(xgmi_node_id,
- DF_CS_UMC_AON0_DramLimitAddress0,
- DstFabricID);
-
- /* TODO: establish reason dest fabric id is offset by 7 */
- xgmi_node_id = xgmi_node_id >> 7;
-
- if (adev->gmc.xgmi.physical_node_id == xgmi_node_id)
- break;
- }
-
- if (df_inst_id == DF_3_6_INST_CNT) {
- DRM_WARN("cant match df dst id with gpu node");
- return 0;
- }
-
- return df_inst_id;
-}
-
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
.sw_fini = df_v3_6_sw_fini,
@@ -752,6 +700,4 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
.pmc_get_count = df_v3_6_pmc_get_count,
.get_fica = df_v3_6_get_fica,
.set_fica = df_v3_6_set_fica,
- .get_dram_base_addr = df_v3_6_get_dram_base_addr,
- .get_df_inst_id = df_v3_6_get_df_inst_id
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 0e0daf0021b6..bd5dd4f64311 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -138,6 +138,1062 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
/* Pending on emulation bring up */
};
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x33),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
@@ -272,11 +1328,1691 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
/* Pending on emulation bring up */
};
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000L, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1ac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1bc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1cc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x26),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x25),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x3b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
{
/* Pending on emulation bring up */
};
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000L, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x22),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x35),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -301,7 +3037,7 @@ static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
+static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -431,6 +3167,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_0_nv10,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
break;
case CHIP_NAVI14:
soc15_program_register_sequence(adev,
@@ -439,6 +3178,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_1_nv14,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
break;
case CHIP_NAVI12:
soc15_program_register_sequence(adev,
@@ -447,6 +3189,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_1_2_nv12,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
break;
default:
break;
@@ -557,7 +3302,8 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -1298,7 +4044,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
return 0;
@@ -1309,7 +4056,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
{
int r;
unsigned irq_type;
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+ struct amdgpu_ring *ring;
+ unsigned int hw_prio;
ring = &adev->gfx.compute_ring[ring_id];
@@ -1328,10 +4076,11 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
-
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type, hw_prio);
if (r)
return r;
@@ -1829,9 +4578,9 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
/* csib */
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
- adev->gfx.rlc.clear_state_gpu_addr >> 32);
+ adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
- adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
+ adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
return 0;
@@ -2441,10 +5190,6 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
- if (!enable) {
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- }
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -2923,16 +5668,12 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
if (enable) {
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
} else {
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK |
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
@@ -3268,11 +6009,8 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
- ring->has_high_prio = true;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
- } else {
- ring->has_high_prio = false;
}
}
}
@@ -3802,14 +6540,16 @@ static int gfx_v10_0_hw_init(void *handle)
* loaded firstly, so in direct type, it has to load smc ucode
* here before rlc.
*/
- r = smu_load_microcode(&adev->smu);
- if (r)
- return r;
+ if (adev->smu.ppt_funcs != NULL) {
+ r = smu_load_microcode(&adev->smu);
+ if (r)
+ return r;
- r = smu_check_fw_status(&adev->smu);
- if (r) {
- pr_err("SMC firmware status is not correct\n");
- return r;
+ r = smu_check_fw_status(&adev->smu);
+ if (r) {
+ pr_err("SMC firmware status is not correct\n");
+ return r;
+ }
}
}
@@ -4292,14 +7032,21 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
- u32 data;
+ u32 reg, data;
- data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ data = RREG32_NO_KIQ(reg);
+ else
+ data = RREG32(reg);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
- WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ else
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
}
static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
@@ -4341,6 +7088,20 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.reset = gfx_v10_0_rlc_reset,
.start = gfx_v10_0_rlc_start,
.update_spm_vmid = gfx_v10_0_update_spm_vmid,
+};
+
+static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
+ .is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
+ .set_safe_mode = gfx_v10_0_set_safe_mode,
+ .unset_safe_mode = gfx_v10_0_unset_safe_mode,
+ .init = gfx_v10_0_rlc_init,
+ .get_csb_size = gfx_v10_0_get_csb_size,
+ .get_csb_buffer = gfx_v10_0_get_csb_buffer,
+ .resume = gfx_v10_0_rlc_resume,
+ .stop = gfx_v10_0_rlc_stop,
+ .reset = gfx_v10_0_rlc_reset,
+ .start = gfx_v10_0_rlc_start,
+ .update_spm_vmid = gfx_v10_0_update_spm_vmid,
.rlcg_wreg = gfx_v10_rlcg_wreg,
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
@@ -4350,6 +7111,10 @@ static int gfx_v10_0_set_powergating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE);
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -4366,6 +7131,9 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -4678,7 +7446,8 @@ static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0);
}
-static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
+ uint32_t flags)
{
uint32_t dw2 = 0;
@@ -4686,8 +7455,6 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flag
gfx_v10_0_ring_emit_ce_meta(ring,
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
- gfx_v10_0_ring_emit_tmz(ring, true);
-
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
@@ -4844,16 +7611,19 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
sizeof(de_payload) >> 2);
}
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+ bool secure)
{
+ uint32_t v = secure ? FRAME_TMZ : 0;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+ amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
}
-static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -4862,9 +7632,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
}
static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -5250,6 +8020,29 @@ static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ const unsigned int gcr_cntl =
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
+
+ /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
+ amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+ amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
+}
+
static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
.name = "gfx_v10_0",
.early_init = gfx_v10_0_early_init,
@@ -5297,7 +8090,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
- 2, /* SWITCH_BUFFER */
+ 2 + /* SWITCH_BUFFER */
+ 8, /* gfx_v10_0_emit_mem_sync */
.emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
.emit_ib = gfx_v10_0_ring_emit_ib_gfx,
.emit_fence = gfx_v10_0_ring_emit_fence,
@@ -5314,11 +8108,12 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
.preempt_ib = gfx_v10_0_ring_preempt_ib,
- .emit_tmz = gfx_v10_0_ring_emit_tmz,
+ .emit_frame_cntl = gfx_v10_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v10_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v10_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -5338,7 +8133,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v10_0_ring_emit_vm_flush */
- 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
+ 8 + 8 + 8 + /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
+ 8, /* gfx_v10_0_emit_mem_sync */
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
.emit_fence = gfx_v10_0_ring_emit_fence,
@@ -5353,6 +8149,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .emit_mem_sync = gfx_v10_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -5439,9 +8236,11 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
- case CHIP_NAVI12:
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
break;
+ case CHIP_NAVI12:
+ adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs_sriov;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 31f44d05e606..79c52c7a02e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1914,7 +1914,8 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -1950,7 +1951,6 @@ err1:
static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
if (enable) {
WREG32(mmCP_ME_CNTL, 0);
} else {
@@ -1958,10 +1958,6 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
CP_ME_CNTL__PFP_HALT_MASK |
CP_ME_CNTL__CE_HALT_MASK));
WREG32(mmSCRATCH_UMSK, 0);
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
}
udelay(50);
}
@@ -3114,7 +3110,9 @@ static int gfx_v6_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -3136,7 +3134,8 @@ static int gfx_v6_0_sw_init(void *handle)
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -3466,6 +3465,18 @@ static int gfx_v6_0_set_powergating_state(void *handle,
return 0;
}
+static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.name = "gfx_v6_0",
.early_init = gfx_v6_0_early_init,
@@ -3496,7 +3507,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
- 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+ 3 + 2 + /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+ 5, /* SURFACE_SYNC */
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
@@ -3507,6 +3519,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
.insert_nop = amdgpu_ring_insert_nop,
.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
.emit_wreg = gfx_v6_0_ring_emit_wreg,
+ .emit_mem_sync = gfx_v6_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
@@ -3520,7 +3533,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
5 + 5 + /* hdp flush / invalidate */
7 + /* gfx_v6_0_ring_emit_pipeline_sync */
SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */
- 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 5, /* SURFACE_SYNC */
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
@@ -3530,6 +3544,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.emit_wreg = gfx_v6_0_ring_emit_wreg,
+ .emit_mem_sync = gfx_v6_0_emit_mem_sync,
};
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 733d398c61cc..0cc011f9190d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2364,7 +2364,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -2431,15 +2432,12 @@ err1:
*/
static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
- if (enable) {
+ if (enable)
WREG32(mmCP_ME_CNTL, 0);
- } else {
- WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- }
+ else
+ WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK |
+ CP_ME_CNTL__PFP_HALT_MASK |
+ CP_ME_CNTL__CE_HALT_MASK));
udelay(50);
}
@@ -2700,15 +2698,11 @@ static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
*/
static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
- if (enable) {
+ if (enable)
WREG32(mmCP_MEC_CNTL, 0);
- } else {
- WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
- }
+ else
+ WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
+ CP_MEC_CNTL__MEC_ME2_HALT_MASK));
udelay(50);
}
@@ -4439,7 +4433,8 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -4511,7 +4506,9 @@ static int gfx_v7_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -5001,6 +4998,32 @@ static int gfx_v7_0_set_powergating_state(void *handle,
return 0;
}
+static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
+static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
@@ -5033,7 +5056,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
- 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+ 3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+ 5, /* SURFACE_SYNC */
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
@@ -5048,6 +5072,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
.emit_wreg = gfx_v7_0_ring_emit_wreg,
.soft_recovery = gfx_v7_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v7_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5064,7 +5089,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5 + /* hdp invalidate */
7 + /* gfx_v7_0_ring_emit_pipeline_sync */
CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
- 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ 7 + 7 + 7 + /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ 7, /* gfx_v7_0_emit_mem_sync_compute */
.emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
@@ -5077,6 +5103,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v7_0_ring_emit_wreg,
+ .emit_mem_sync = gfx_v7_0_emit_mem_sync_compute,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fc32586ef80b..1d4128227ffd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -888,7 +888,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -1550,7 +1551,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* allocate an indirect buffer to put the commands in */
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+ r = amdgpu_ib_get(adev, NULL, total_size,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
return r;
@@ -1892,6 +1894,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int r;
unsigned irq_type;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+ unsigned int hw_prio;
ring = &adev->gfx.compute_ring[ring_id];
@@ -1911,9 +1914,11 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type, hw_prio);
if (r)
return r;
@@ -2017,7 +2022,8 @@ static int gfx_v8_0_sw_init(void *handle)
}
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
- AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -4120,7 +4126,6 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
u32 tmp = RREG32(mmCP_ME_CNTL);
if (enable) {
@@ -4131,8 +4136,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32(mmCP_ME_CNTL, tmp);
udelay(50);
@@ -4320,14 +4323,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
if (enable) {
WREG32(mmCP_MEC_CNTL, 0);
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
@@ -4437,11 +4436,8 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
- ring->has_high_prio = true;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
- } else {
- ring->has_high_prio = false;
}
}
}
@@ -5619,12 +5615,18 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 data;
- data = RREG32(mmRLC_SPM_VMID);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
+ else
+ data = RREG32(mmRLC_SPM_VMID);
data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
- WREG32(mmRLC_SPM_VMID, data);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
+ else
+ WREG32(mmRLC_SPM_VMID, data);
}
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
@@ -6387,10 +6389,10 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
}
-static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -6399,9 +6401,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
}
static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -6815,6 +6817,34 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA |
+ PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
+static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA |
+ PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6861,7 +6891,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
12 + 12 + /* FENCE x2 */
- 2, /* SWITCH_BUFFER */
+ 2 + /* SWITCH_BUFFER */
+ 5, /* SURFACE_SYNC */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
@@ -6879,6 +6910,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v8_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -6895,7 +6927,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
5 + /* hdp_invalidate */
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
- 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ 7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ 7, /* gfx_v8_0_emit_mem_sync_compute */
.emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
@@ -6908,6 +6941,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
+ .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d2d9dce68c2f..711e9dd19705 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -50,18 +50,14 @@
#include "gfx_v9_4.h"
+#include "asic_reg/pwr/pwr_10_0_offset.h"
+#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
-#define mmPWR_MISC_CNTL_STATUS 0x0183
-#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
-
#define mmGCEA_PROBE_MAP 0x070c
#define mmGCEA_PROBE_MAP_BASE_IDX 0
@@ -511,8 +507,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
@@ -963,7 +959,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
case CHIP_RAVEN:
soc15_program_register_sequence(adev, golden_settings_gc_9_1,
ARRAY_SIZE(golden_settings_gc_9_1));
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rv2,
ARRAY_SIZE(golden_settings_gc_9_1_rv2));
@@ -1082,7 +1078,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -1277,7 +1274,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
+ if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
+ (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
((!is_raven_kicker(adev) &&
adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_feature_version < 1) ||
@@ -1620,9 +1618,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
chip_name = "vega20";
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
@@ -2122,7 +2120,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
else
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
@@ -2199,6 +2197,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int r;
unsigned irq_type;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+ unsigned int hw_prio;
ring = &adev->gfx.compute_ring[ring_id];
@@ -2217,10 +2216,11 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
-
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type, hw_prio);
if (r)
return r;
@@ -2314,7 +2314,9 @@ static int gfx_v9_0_sw_init(void *handle)
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -2532,7 +2534,7 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
break;
default:
break;
- };
+ }
}
static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
@@ -2967,8 +2969,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
*/
if (adev->gfx.rlc.is_rlc_v2_1) {
if (adev->asic_type == CHIP_VEGA12 ||
- (adev->asic_type == CHIP_RAVEN &&
- adev->rev_id >= 8))
+ (adev->apu_flags & AMD_APU_IS_RAVEN2))
gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev);
}
@@ -3104,16 +3105,11 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
- if (!enable) {
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- }
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
udelay(50);
}
@@ -3309,15 +3305,11 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
if (enable) {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
} else {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
@@ -3387,11 +3379,8 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
- ring->has_high_prio = true;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
- } else {
- ring->has_high_prio = false;
}
}
}
@@ -4058,13 +4047,18 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
{
signed long r, cnt = 0;
unsigned long flags;
- uint32_t seq;
+ uint32_t seq, reg_val_offs = 0;
+ uint64_t value = 0;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
spin_lock_irqsave(&kiq->ring_lock, flags);
+ if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
+ pr_err("critical bug! too many kiq readers\n");
+ goto failed_unlock;
+ }
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 9 | /* src: register*/
@@ -4074,10 +4068,13 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
- amdgpu_fence_emit_polling(ring, &seq);
+ reg_val_offs * 4));
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
@@ -4103,10 +4100,19 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
if (cnt > MAX_KIQ_REG_TRY)
goto failed_kiq_read;
- return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
- (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
+ mb();
+ value = (uint64_t)adev->wb.wb[reg_val_offs] |
+ (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
+ amdgpu_device_wb_free(adev, reg_val_offs);
+ return value;
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_read:
+ if (reg_val_offs)
+ amdgpu_device_wb_free(adev, reg_val_offs);
pr_err("failed to read gpu clock\n");
return ~0;
}
@@ -4491,7 +4497,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* allocate an indirect buffer to put the commands in */
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+ r = amdgpu_ib_get(adev, NULL, total_size,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
return r;
@@ -4962,14 +4969,21 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
- u32 data;
+ u32 reg, data;
- data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ data = RREG32_NO_KIQ(reg);
+ else
+ data = RREG32(reg);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
- WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ else
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
}
static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
@@ -5424,10 +5438,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
}
-static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+ bool secure)
{
+ uint32_t v = secure ? FRAME_TMZ : 0;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+ amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
}
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
@@ -5437,8 +5454,6 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
if (amdgpu_sriov_vf(ring->adev))
gfx_v9_0_ring_emit_ce_meta(ring);
- gfx_v9_0_ring_emit_tmz(ring, true);
-
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
@@ -5489,10 +5504,10 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
}
-static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -5501,9 +5516,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
}
static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -6404,15 +6419,15 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- vml2_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, vml2_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
- vml2_mems[i], ded_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "DED %d\n", i, vml2_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
@@ -6424,16 +6439,16 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- vml2_walker_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, vml2_walker_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
- vml2_walker_mems[i], ded_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "DED %d\n", i, vml2_walker_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
@@ -6444,8 +6459,9 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = (data & 0x00006000L) >> 0xd;
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- atc_l2_cache_2m_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, atc_l2_cache_2m_mems[i],
+ sec_count);
err_data->ce_count += sec_count;
}
}
@@ -6456,15 +6472,17 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = (data & 0x00006000L) >> 0xd;
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- atc_l2_cache_4k_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, atc_l2_cache_4k_mems[i],
+ sec_count);
err_data->ce_count += sec_count;
}
ded_count = (data & 0x00018000L) >> 0xf;
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
- atc_l2_cache_4k_mems[i], ded_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "DED %d\n", i, atc_l2_cache_4k_mems[i],
+ ded_count);
err_data->ue_count += ded_count;
}
}
@@ -6477,7 +6495,8 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
uint32_t se_id, uint32_t inst_id, uint32_t value,
uint32_t *sec_count, uint32_t *ded_count)
{
@@ -6494,7 +6513,8 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
gfx_v9_0_ras_fields[i].sec_count_mask) >>
gfx_v9_0_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ dev_info(adev->dev, "GFX SubBlock %s, "
+ "Instance[%d][%d], SEC %d\n",
gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
sec_cnt);
@@ -6505,7 +6525,8 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
gfx_v9_0_ras_fields[i].ded_count_mask) >>
gfx_v9_0_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ dev_info(adev->dev, "GFX SubBlock %s, "
+ "Instance[%d][%d], DED %d\n",
gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
ded_cnt);
@@ -6594,9 +6615,10 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
if (reg_value)
- gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i],
- j, k, reg_value,
- &sec_count, &ded_count);
+ gfx_v9_0_ras_error_count(adev,
+ &gfx_v9_0_edc_counter_regs[i],
+ j, k, reg_value,
+ &sec_count, &ded_count);
}
}
}
@@ -6612,6 +6634,25 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ const unsigned int cp_coher_cntl =
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
+
+ /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+ amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -6658,7 +6699,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
- 2, /* SWITCH_BUFFER */
+ 2 + /* SWITCH_BUFFER */
+ 7, /* gfx_v9_0_emit_mem_sync */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6674,11 +6716,12 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
- .emit_tmz = gfx_v9_0_ring_emit_tmz,
+ .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v9_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -6698,7 +6741,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
- 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+ 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+ 7, /* gfx_v9_0_emit_mem_sync */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6713,6 +6757,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ .emit_mem_sync = gfx_v9_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -6836,7 +6881,7 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
adev->gds.gds_compute_max_wave_id = 0x27f;
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 0x8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
else
adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index dce945ef21a5..46351db36922 100644..100755
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -732,7 +732,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
vml2_walker_mems[i], sec_count);
err_data->ce_count += sec_count;
}
@@ -740,7 +741,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
vml2_walker_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -752,14 +754,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
utcl2_router_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
utcl2_router_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -772,7 +776,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
atc_l2_cache_2m_mems[i], sec_count);
err_data->ce_count += sec_count;
}
@@ -780,7 +785,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
atc_l2_cache_2m_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -793,7 +799,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
atc_l2_cache_4k_mems[i], sec_count);
err_data->ce_count += sec_count;
}
@@ -801,7 +808,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
atc_l2_cache_4k_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -816,7 +824,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
uint32_t se_id, uint32_t inst_id,
uint32_t value, uint32_t *sec_count,
uint32_t *ded_count)
@@ -833,7 +842,8 @@ static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >>
gfx_v9_4_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ dev_info(adev->dev,
+ "GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
gfx_v9_4_ras_fields[i].name, se_id, inst_id,
sec_cnt);
*sec_count += sec_cnt;
@@ -842,7 +852,8 @@ static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >>
gfx_v9_4_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ dev_info(adev->dev,
+ "GFX SubBlock %s, Instance[%d][%d], DED %d\n",
gfx_v9_4_ras_fields[i].name, se_id, inst_id,
ded_cnt);
*ded_count += ded_cnt;
@@ -876,7 +887,7 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_edc_counter_regs[i]));
if (reg_value)
- gfx_v9_4_ras_error_count(
+ gfx_v9_4_ras_error_count(adev,
&gfx_v9_4_edc_counter_regs[i],
j, k, reg_value, &sec_count,
&ded_count);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 1a2f18b908fe..6682b843bafe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -80,7 +80,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
- if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the
* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9775eca6fe43..ba2b7ac0c02d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -170,6 +170,9 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev,
"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, CID));
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@@ -369,7 +372,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* translation. Avoid this by doing the invalidation from the SDMA
* itself.
*/
- r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job);
+ r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
+ &job);
if (r)
goto error_alloc;
@@ -423,7 +427,13 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
- amdgpu_fence_emit_polling(ring, &seq);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ return -ETIME;
+ }
+
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
@@ -676,17 +686,23 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
{
- /* Could aper size report 0 ? */
- adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
- adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+ int r;
/* size in MB on si */
adev->gmc.mc_vram_size =
adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
- adev->gmc.visible_vram_size = adev->gmc.aper_size;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_device_resize_fb_bar(adev);
+ if (r)
+ return r;
+ }
+ adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
+ adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
/* In case the PCI BAR is larger than the actual amount of vram */
+ adev->gmc.visible_vram_size = adev->gmc.aper_size;
if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index b205039350b6..a75e472b4a81 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -61,17 +61,6 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin");
#define MC_SEQ_MISC0__MT__HBM 0x60000000
#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
-
-static const u32 crtc_offsets[6] =
-{
- SI_CRTC0_REGISTER_OFFSET,
- SI_CRTC1_REGISTER_OFFSET,
- SI_CRTC2_REGISTER_OFFSET,
- SI_CRTC3_REGISTER_OFFSET,
- SI_CRTC4_REGISTER_OFFSET,
- SI_CRTC5_REGISTER_OFFSET
-};
-
static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
@@ -858,7 +847,7 @@ static int gmc_v6_0_sw_init(void *handle)
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
if (r) {
- dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
+ dev_warn(adev->dev, "No suitable DMA available.\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(44);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 9da9596a3638..bcd4baecfe11 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -762,6 +762,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
*
* Print human readable fault information (CIK).
*/
@@ -1019,7 +1020,7 @@ static int gmc_v7_0_sw_init(void *handle)
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
- pr_warn("amdgpu: No suitable DMA available\n");
+ pr_warn("No suitable DMA available\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(40);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 27d83204fa2b..26976e50e2a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1005,6 +1005,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
*
* Print human readable fault information (VI).
*/
@@ -1144,7 +1145,7 @@ static int gmc_v8_0_sw_init(void *handle)
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
- pr_warn("amdgpu: No suitable DMA available\n");
+ pr_warn("No suitable DMA available\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(40);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 8606f877478f..11e93a82131d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -362,6 +362,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev,
"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
+ REG_GET_FIELD(status,
+ VM_L2_PROTECTION_FAULT_STATUS, CID));
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@@ -438,9 +441,8 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
return ((vmhub == AMDGPU_MMHUB_0 ||
vmhub == AMDGPU_MMHUB_1) &&
(!amdgpu_sriov_vf(adev)) &&
- (!(adev->asic_type == CHIP_RAVEN &&
- adev->rev_id < 0x8 &&
- adev->pdev->device == 0x15d8)));
+ (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
+ (adev->apu_flags & AMD_APU_IS_PICASSO))));
}
static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
@@ -618,7 +620,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
pasid, 2, all_hub);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
- amdgpu_fence_emit_polling(ring, &seq);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ return -ETIME;
+ }
+
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 0debfd9f428c..b10c95cad9a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -480,7 +480,8 @@ int jpeg_v1_0_sw_init(void *handle)
ring = &adev->jpeg.inst->ring_dec;
sprintf(ring->name, "jpeg_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 6173951db7b4..e67d09cb1b03 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -106,7 +106,8 @@ static int jpeg_v2_0_sw_init(void *handle)
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
sprintf(ring->name, "jpeg_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -169,14 +170,11 @@ static int jpeg_v2_0_hw_init(void *handle)
static int jpeg_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index c04c2078a7c1..713c32560445 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -118,7 +118,8 @@ static int jpeg_v2_5_sw_init(void *handle)
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
sprintf(ring->name, "jpeg_dec_%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -267,7 +268,6 @@ static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device* adev, int inst)
data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
| JPEG_CGC_GATE__JPEG2_DEC_MASK
- | JPEG_CGC_GATE__JPEG_ENC_MASK
| JPEG_CGC_GATE__JMCIF_MASK
| JPEG_CGC_GATE__JRBBM_MASK);
WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 396c2a624de0..405767208a4d 100644..100755
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -96,7 +96,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
- if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
@@ -690,7 +690,8 @@ static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
};
-static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
+static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
{
uint32_t i;
@@ -704,7 +705,8 @@ static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v1_0_ras_fields[i].sec_count_mask) >>
mmhub_v1_0_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+ dev_info(adev->dev,
+ "MMHUB SubBlock %s, SEC %d\n",
mmhub_v1_0_ras_fields[i].name,
sec_cnt);
*sec_count += sec_cnt;
@@ -714,7 +716,8 @@ static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v1_0_ras_fields[i].ded_count_mask) >>
mmhub_v1_0_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+ dev_info(adev->dev,
+ "MMHUB SubBlock %s, DED %d\n",
mmhub_v1_0_ras_fields[i].name,
ded_cnt);
*ded_count += ded_cnt;
@@ -739,7 +742,8 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
if (reg_value)
- mmhub_v1_0_get_ras_error_count(&mmhub_v1_0_edc_cnt_regs[i],
+ mmhub_v1_0_get_ras_error_count(adev,
+ &mmhub_v1_0_edc_cnt_regs[i],
reg_value, &sec_count, &ded_count);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 37dbe0f2142f..83b453f5d717 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -26,7 +26,7 @@
#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000
-#define AI_MAILBOX_POLL_FLR_TIMEDOUT 500
+#define AI_MAILBOX_POLL_FLR_TIMEDOUT 5000
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
@@ -46,7 +46,8 @@ enum idh_event {
IDH_SUCCESS,
IDH_FAIL,
IDH_QUERY_ALIVE,
- IDH_EVENT_MAX
+
+ IDH_TEXT_MESSAGE = 255,
};
extern const struct amdgpu_virt_ops xgpu_ai_virt_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 237fa5e16b7c..ce2bf1fb79ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -30,7 +30,6 @@
#include "navi10_ih.h"
#include "soc15_common.h"
#include "mxgpu_nv.h"
-#include "mxgpu_ai.h"
static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
{
@@ -53,8 +52,7 @@ static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
*/
static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
{
- return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
}
@@ -63,8 +61,7 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
{
u32 reg;
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg != event)
return -ENOENT;
@@ -110,7 +107,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
timeout -= 10;
} while (timeout > 1);
- pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
return -ETIME;
}
@@ -118,7 +114,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3)
{
- u32 reg;
int r;
uint8_t trn;
@@ -137,19 +132,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
}
} while (trn);
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
- reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
- MSGBUF_DATA, req);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
- reg);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
- data1);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
- data2);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
- data3);
-
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
xgpu_nv_mailbox_set_valid(adev, true);
/* start to poll ack */
@@ -164,23 +150,48 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
int r;
+ enum idh_event event = -1;
xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
- /* start to check msg if request is idh_req_gpu_init_access */
- if (req == IDH_REQ_GPU_INIT_ACCESS ||
- req == IDH_REQ_GPU_FINI_ACCESS ||
- req == IDH_REQ_GPU_RESET_ACCESS) {
- r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ switch (req) {
+ case IDH_REQ_GPU_INIT_ACCESS:
+ case IDH_REQ_GPU_FINI_ACCESS:
+ case IDH_REQ_GPU_RESET_ACCESS:
+ event = IDH_READY_TO_ACCESS_GPU;
+ break;
+ case IDH_REQ_GPU_INIT_DATA:
+ event = IDH_REQ_GPU_INIT_DATA_READY;
+ break;
+ default:
+ break;
+ }
+
+ if (event != -1) {
+ r = xgpu_nv_poll_msg(adev, event);
if (r) {
- pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
- return r;
+ if (req != IDH_REQ_GPU_INIT_DATA) {
+ pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+ return r;
+ }
+ else /* host doesn't support REQ_GPU_INIT_DATA handshake */
+ adev->virt.req_init_data_ver = 0;
+ } else {
+ if (req == IDH_REQ_GPU_INIT_DATA)
+ {
+ adev->virt.req_init_data_ver =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
+
+ /* assume V1 in case host doesn't set version number */
+ if (adev->virt.req_init_data_ver < 1)
+ adev->virt.req_init_data_ver = 1;
+ }
}
+
/* Retrieve checksum from mailbox2 */
if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key =
- RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
}
}
@@ -213,6 +224,11 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
return r;
}
+static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+}
+
static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -226,11 +242,14 @@ static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+ u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ tmp |= 2;
+ else
+ tmp &= ~2;
- tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
- (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+ WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
return 0;
}
@@ -282,11 +301,14 @@ static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+ u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ tmp |= 1;
+ else
+ tmp &= ~1;
- tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
- (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+ WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
return 0;
}
@@ -378,6 +400,7 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
+ .req_init_data = xgpu_nv_request_init_data,
.reset_gpu = xgpu_nv_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_nv_mailbox_trans_msg,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 99b15f6865cb..52605e14a1a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -25,8 +25,32 @@
#define __MXGPU_NV_H__
#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
-#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000
-#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000
+#define NV_MAILBOX_POLL_FLR_TIMEDOUT 5000
+
+enum idh_request {
+ IDH_REQ_GPU_INIT_ACCESS = 1,
+ IDH_REL_GPU_INIT_ACCESS,
+ IDH_REQ_GPU_FINI_ACCESS,
+ IDH_REL_GPU_FINI_ACCESS,
+ IDH_REQ_GPU_RESET_ACCESS,
+ IDH_REQ_GPU_INIT_DATA,
+
+ IDH_LOG_VF_ERROR = 200,
+};
+
+enum idh_event {
+ IDH_CLR_MSG_BUF = 0,
+ IDH_READY_TO_ACCESS_GPU,
+ IDH_FLR_NOTIFICATION,
+ IDH_FLR_NOTIFICATION_CMPL,
+ IDH_SUCCESS,
+ IDH_FAIL,
+ IDH_QUERY_ALIVE,
+ IDH_REQ_GPU_INIT_DATA_READY,
+
+ IDH_TEXT_MESSAGE = 255,
+};
extern const struct amdgpu_virt_ops xgpu_nv_virt_ops;
@@ -35,7 +59,21 @@ int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev);
int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev);
void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev);
-#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4)
-#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1)
+#define mmMAILBOX_CONTROL 0xE5E
+
+#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (mmMAILBOX_CONTROL * 4)
+#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE + 1)
+
+#define mmMAILBOX_MSGBUF_TRN_DW0 0xE56
+#define mmMAILBOX_MSGBUF_TRN_DW1 0xE57
+#define mmMAILBOX_MSGBUF_TRN_DW2 0xE58
+#define mmMAILBOX_MSGBUF_TRN_DW3 0xE59
+
+#define mmMAILBOX_MSGBUF_RCV_DW0 0xE5A
+#define mmMAILBOX_MSGBUF_RCV_DW1 0xE5B
+#define mmMAILBOX_MSGBUF_RCV_DW2 0xE5C
+#define mmMAILBOX_MSGBUF_RCV_DW3 0xE5D
+
+#define mmMAILBOX_INT_CNTL 0xE5F
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
index f13dc6cc158f..713ee66a4d3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
@@ -43,7 +43,8 @@ enum idh_event {
IDH_READY_TO_ACCESS_GPU,
IDH_FLR_NOTIFICATION,
IDH_FLR_NOTIFICATION_CMPL,
- IDH_EVENT_MAX
+
+ IDH_TEXT_MESSAGE = 255
};
extern const struct amdgpu_virt_ops xgpu_vi_virt_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index e08245a446fc..f97857ed3c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -49,8 +49,48 @@ static void navi10_ih_enable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ }
+
adev->irq.ih.enabled = true;
+
+ if (adev->irq.ih1.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+ adev->irq.ih1.enabled = true;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+ RB_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ }
+ adev->irq.ih2.enabled = true;
+ }
}
/**
@@ -66,12 +106,61 @@ static void navi10_ih_disable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ }
+
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
adev->irq.ih.enabled = false;
adev->irq.ih.rptr = 0;
+
+ if (adev->irq.ih1.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_ENABLE, 0);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+ adev->irq.ih1.enabled = false;
+ adev->irq.ih1.rptr = 0;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+ RB_ENABLE, 0);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ }
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+ adev->irq.ih2.enabled = false;
+ adev->irq.ih2.rptr = 0;
+ }
+
}
static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -97,6 +186,43 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
return ih_rb_cntl;
}
+static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+ u32 ih_doorbell_rtpr = 0;
+
+ if (ih->use_doorbell) {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ ih->doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 1);
+ } else {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 0);
+ }
+ return ih_doorbell_rtpr;
+}
+
+static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* Reroute to IH ring 1 for VMC */
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+
+ /* Reroute IH ring 1 for UMC */
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+}
+
/**
* navi10_ih_irq_init - init and enable the interrupt ring
*
@@ -111,7 +237,7 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
static int navi10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih = &adev->irq.ih;
- u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken;
+ u32 ih_rb_cntl, ih_chicken;
u32 tmp;
/* disable irqs */
@@ -127,6 +253,15 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
!!adev->irq.msi_enabled);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ }
+ navi10_ih_reroute_ih(adev);
if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
if (ih->use_bus_addr) {
@@ -137,8 +272,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
}
}
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
-
/* set the writeback address whether it's enabled or not */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
lower_32_bits(ih->wptr_addr));
@@ -149,22 +282,68 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
- if (ih->use_doorbell) {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR, OFFSET,
- ih->doorbell_index);
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR, ENABLE, 1);
- } else {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR, ENABLE, 0);
- }
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
+ navi10_ih_doorbell_rptr(ih));
adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
ih->doorbell_index);
+ ih = &adev->irq.ih1;
+ if (ih->ring_size) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
+ (ih->gpu_addr >> 40) & 0xff);
+
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ RB_FULL_DRAIN_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
+ navi10_ih_doorbell_rptr(ih));
+ }
+
+ ih = &adev->irq.ih2;
+ if (ih->ring_size) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
+ (ih->gpu_addr >> 40) & 0xff);
+
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
+
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ }
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
+ navi10_ih_doorbell_rptr(ih));
+ }
+
+
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
CLIENT18_IS_STORM_CLIENT, 1);
@@ -217,7 +396,15 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ if (ih == &adev->irq.ih)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ else if (ih == &adev->irq.ih1)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ else
+ BUG();
+
wptr = RREG32_NO_KIQ(reg);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -233,7 +420,15 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
wptr, ih->rptr, tmp);
ih->rptr = tmp;
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ if (ih == &adev->irq.ih)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ else if (ih == &adev->irq.ih1)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ else
+ BUG();
+
tmp = RREG32_NO_KIQ(reg);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(reg, tmp);
@@ -333,8 +528,52 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
navi10_ih_irq_rearm(adev, ih);
- } else
+ } else if (ih == &adev->irq.ih) {
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
+ } else if (ih == &adev->irq.ih1) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
+ } else if (ih == &adev->irq.ih2) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+ }
+}
+
+/**
+ * navi10_ih_self_irq - dispatch work for ring 1 and 2
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int navi10_ih_self_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+ switch (entry->ring_id) {
+ case 1:
+ *adev->irq.ih1.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih1_work);
+ break;
+ case 2:
+ *adev->irq.ih2.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih2_work);
+ break;
+ default: break;
+ }
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs navi10_ih_self_irq_funcs = {
+ .process = navi10_ih_self_irq,
+};
+
+static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.self_irq.num_types = 0;
+ adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs;
}
static int navi10_ih_early_init(void *handle)
@@ -342,6 +581,7 @@ static int navi10_ih_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
navi10_ih_set_interrupt_funcs(adev);
+ navi10_ih_set_self_irq_funcs(adev);
return 0;
}
@@ -351,6 +591,12 @@ static int navi10_ih_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool use_bus_addr;
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
+ &adev->irq.self_irq);
+
+ if (r)
+ return r;
+
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
@@ -363,6 +609,20 @@ static int navi10_ih_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+
r = amdgpu_irq_init(adev);
return r;
@@ -373,6 +633,8 @@ static int navi10_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
amdgpu_ih_ring_fini(adev, &adev->irq.ih);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index f3a3fe746222..cbcf04578b99 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -290,23 +290,6 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
-static void nbio_v2_3_detect_hw_virt(struct amdgpu_device *adev)
-{
- uint32_t reg;
-
- reg = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER);
- if (reg & 1)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
- if (reg & 0x80000000)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
- if (!reg) {
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
- }
-}
-
static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -338,6 +321,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.get_clockgating_state = nbio_v2_3_get_clockgating_state,
.ih_control = nbio_v2_3_ih_control,
.init_registers = nbio_v2_3_init_registers,
- .detect_hw_virt = nbio_v2_3_detect_hw_virt,
.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 635d9e1fc0a3..7b2fb050407d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -241,23 +241,6 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
};
-static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
-{
- uint32_t reg;
-
- reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
- if (reg & 1)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
- if (reg & 0x80000000)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
- if (!reg) {
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
- }
-}
-
static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -294,5 +277,4 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_clockgating_state = nbio_v6_1_get_clockgating_state,
.ih_control = nbio_v6_1_ih_control,
.init_registers = nbio_v6_1_init_registers,
- .detect_hw_virt = nbio_v6_1_detect_hw_virt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index d6cbf26074bc..d34628e113fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -280,12 +280,6 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
-static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
-{
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
{
@@ -310,6 +304,5 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
.get_clockgating_state = nbio_v7_0_get_clockgating_state,
.ih_control = nbio_v7_0_ih_control,
.init_registers = nbio_v7_0_init_registers,
- .detect_hw_virt = nbio_v7_0_detect_hw_virt,
.remap_hdp_registers = nbio_v7_0_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 149d386590df..e629156173d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -185,7 +185,7 @@ static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
if (use_doorbell) {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
- ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4);
} else
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
@@ -292,23 +292,6 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
};
-static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
-{
- uint32_t reg;
-
- reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER);
- if (reg & 1)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
- if (reg & 0x80000000)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
- if (!reg) {
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
- }
-}
-
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
@@ -340,14 +323,20 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
obj->err_data.ce_count += err_data.ce_count;
if (err_data.ce_count)
- DRM_INFO("%ld correctable errors detected in %s block\n",
- obj->err_data.ce_count, adev->nbio.ras_if->name);
+ dev_info(adev->dev, "%ld correctable hardware "
+ "errors detected in %s block, "
+ "no user action is needed.\n",
+ obj->err_data.ce_count,
+ adev->nbio.ras_if->name);
if (err_data.ue_count)
- DRM_INFO("%ld uncorrectable errors detected in %s block\n",
- obj->err_data.ue_count, adev->nbio.ras_if->name);
+ dev_info(adev->dev, "%ld uncorrectable hardware "
+ "errors detected in %s block\n",
+ obj->err_data.ue_count,
+ adev->nbio.ras_if->name);
- DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
+ dev_info(adev->dev, "RAS controller interrupt triggered "
+ "by NBIF error\n");
/* ras_controller_int is dedicated for nbif ras error,
* not the global interrupt for sync flood
@@ -561,7 +550,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.get_clockgating_state = nbio_v7_4_get_clockgating_state,
.ih_control = nbio_v7_4_ih_control,
.init_registers = nbio_v7_4_init_registers,
- .detect_hw_virt = nbio_v7_4_detect_hw_virt,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
.handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
.handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 52318b03c424..6655dd2009b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -453,18 +453,19 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
- /* Set IP register base before any HW register access */
- r = nv_reg_base_init(adev);
- if (r)
- return r;
-
adev->nbio.funcs = &nbio_v2_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
- adev->nbio.funcs->detect_hw_virt(adev);
-
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
adev->virt.ops = &xgpu_nv_virt_ops;
+ /* try send GPU_INIT_DATA request to host */
+ amdgpu_virt_request_init_data(adev);
+ }
+
+ /* Set IP register base before any HW register access */
+ r = nv_reg_base_init(adev);
+ if (r)
+ return r;
switch (adev->asic_type) {
case CHIP_NAVI10:
@@ -497,8 +498,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- !amdgpu_sriov_vf(adev))
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -548,13 +548,6 @@ static bool nv_need_full_reset(struct amdgpu_device *adev)
return true;
}
-static void nv_get_pcie_usage(struct amdgpu_device *adev,
- uint64_t *count0,
- uint64_t *count1)
-{
- /*TODO*/
-}
-
static bool nv_need_reset_on_init(struct amdgpu_device *adev)
{
#if 0
@@ -629,7 +622,6 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.invalidate_hdp = &nv_invalidate_hdp,
.init_doorbell_index = &nv_init_doorbell_index,
.need_full_reset = &nv_need_full_reset,
- .get_pcie_usage = &nv_get_pcie_usage,
.need_reset_on_init = &nv_need_reset_on_init,
.get_pcie_replay_count = &nv_get_pcie_replay_count,
.supports_baco = &nv_asic_supports_baco,
diff --git a/drivers/gpu/drm/amd/amdgpu/nvd.h b/drivers/gpu/drm/amd/amdgpu/nvd.h
index 1de984647dbb..fd6b58243b03 100644
--- a/drivers/gpu/drm/amd/amdgpu/nvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/nvd.h
@@ -256,6 +256,54 @@
#define PACKET3_BLK_CNTX_UPDATE 0x53
#define PACKET3_INCR_UPDT_STATE 0x55
#define PACKET3_ACQUIRE_MEM 0x58
+/* 1. HEADER
+ * 2. COHER_CNTL [30:0]
+ * 2.1 ENGINE_SEL [31:31]
+ * 2. COHER_SIZE [31:0]
+ * 3. COHER_SIZE_HI [7:0]
+ * 4. COHER_BASE_LO [31:0]
+ * 5. COHER_BASE_HI [23:0]
+ * 7. POLL_INTERVAL [15:0]
+ * 8. GCR_CNTL [18:0]
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(x) ((x) << 0)
+ /*
+ * 0:NOP
+ * 1:ALL
+ * 2:RANGE
+ * 3:FIRST_LAST
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_RANGE(x) ((x) << 2)
+ /*
+ * 0:ALL
+ * 1:reserved
+ * 2:RANGE
+ * 3:FIRST_LAST
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(x) ((x) << 4)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(x) ((x) << 5)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_WB(x) ((x) << 6)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(x) ((x) << 7)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(x) ((x) << 8)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(x) ((x) << 9)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_US(x) ((x) << 10)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_RANGE(x) ((x) << 11)
+ /*
+ * 0:ALL
+ * 1:VOL
+ * 2:RANGE
+ * 3:FIRST_LAST
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_DISCARD(x) ((x) << 13)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(x) ((x) << 14)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(x) ((x) << 15)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_SEQ(x) ((x) << 16)
+ /*
+ * 0: PARALLEL
+ * 1: FORWARD
+ * 2: REVERSE
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_RANGE_IS_PA (1 << 18)
#define PACKET3_REWIND 0x59
#define PACKET3_INTERRUPT 0x5A
#define PACKET3_GEN_PDEPTE 0x5B
@@ -306,6 +354,7 @@
#define PACKET3_GET_LOD_STATS 0x8E
#define PACKET3_DRAW_MULTI_PREAMBLE 0x8F
#define PACKET3_FRAME_CONTROL 0x90
+# define FRAME_TMZ (1 << 0)
# define FRAME_CMD(x) ((x) << 28)
/*
* x=0: tmz_begin
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 7539104175e8..d7f92634eba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -50,15 +50,14 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
const char *chip_name;
char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *hdr;
const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_RAVEN:
- if (adev->rev_id >= 0x8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
@@ -66,22 +65,10 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ err = psp_init_asd_microcode(psp, chip_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
- if (err)
- goto out;
-
- hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
- adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)hdr +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes);
-
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
@@ -126,8 +113,6 @@ out:
dev_err(adev->dev,
"psp v10.0: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
}
return err;
@@ -230,129 +215,6 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int
-psp_v10_0_sram_map(struct amdgpu_device *adev,
- unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
-{
- int ret = 0;
-
- switch(ucode_id) {
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SMC:
- *sram_offset = 0;
- *sram_addr_reg_offset = 0;
- *sram_data_reg_offset = 0;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_CP_CE:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_PFP:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_ME:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC1:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC2:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_RLC_G:
- *sram_offset = 0x2000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_SDMA0:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
- break;
-
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SDMA1:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_UVD:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_VCE:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static bool psp_v10_0_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
-{
- int err = 0;
- unsigned int fw_sram_reg_val = 0;
- unsigned int fw_sram_addr_reg_offset = 0;
- unsigned int fw_sram_data_reg_offset = 0;
- unsigned int ucode_size;
- uint32_t *ucode_mem = NULL;
- struct amdgpu_device *adev = psp->adev;
-
- err = psp_v10_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
- &fw_sram_data_reg_offset, ucode_type);
- if (err)
- return false;
-
- WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
- ucode_size = ucode->ucode_size;
- ucode_mem = (uint32_t *)ucode->kaddr;
- while (!ucode_size) {
- fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
- if (*ucode_mem != fw_sram_reg_val)
- return false;
-
- ucode_mem++;
- /* 4 bytes */
- ucode_size -= 4;
- }
-
- return true;
-}
-
-
static int psp_v10_0_mode1_reset(struct psp_context *psp)
{
DRM_INFO("psp mode 1 reset not supported now! \n");
@@ -379,7 +241,6 @@ static const struct psp_funcs psp_v10_0_funcs = {
.ring_create = psp_v10_0_ring_create,
.ring_stop = psp_v10_0_ring_stop,
.ring_destroy = psp_v10_0_ring_destroy,
- .compare_sram_data = psp_v10_0_compare_sram_data,
.mode1_reset = psp_v10_0_mode1_reset,
.ring_get_wptr = psp_v10_0_ring_get_wptr,
.ring_set_wptr = psp_v10_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0afd610a1263..1de89cc3c355 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -75,10 +75,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
const char *chip_name;
char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *sos_hdr;
- const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
- const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
- const struct psp_firmware_header_v1_0 *asd_hdr;
const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
@@ -103,66 +99,13 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
- err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
+ err = psp_init_sos_microcode(psp, chip_name);
if (err)
- goto out;
+ return err;
- err = amdgpu_ucode_validate(adev->psp.sos_fw);
+ err = psp_init_asd_microcode(psp, chip_name);
if (err)
- goto out;
-
- sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
- amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
-
- switch (sos_hdr->header.header_version_major) {
- case 1:
- adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
- adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
- adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
- adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
- adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
- le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
- adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr->sos_offset_bytes);
- if (sos_hdr->header.header_version_minor == 1) {
- sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
- adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
- adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
- adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
- adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
- }
- if (sos_hdr->header.header_version_minor == 2) {
- sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
- adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
- adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
- }
- break;
- default:
- dev_err(adev->dev,
- "Unsupported psp sos firmware\n");
- err = -EINVAL;
- goto out;
- }
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
- if (err)
- goto out1;
-
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
- if (err)
- goto out1;
-
- asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
- adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
- le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+ return err;
switch (adev->asic_type) {
case CHIP_VEGA20:
@@ -194,6 +137,8 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
+ if (amdgpu_sriov_vf(adev))
+ break;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
@@ -229,15 +174,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
out2:
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
-out1:
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
-out:
- dev_err(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
-
return err;
}
@@ -283,11 +219,8 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
/* Check tOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- if (psp_v11_0_is_sos_alive(psp)) {
- psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
+ if (psp_v11_0_is_sos_alive(psp))
return 0;
- }
ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
@@ -319,11 +252,8 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- if (psp_v11_0_is_sos_alive(psp)) {
- psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
+ if (psp_v11_0_is_sos_alive(psp))
return 0;
- }
ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
@@ -446,13 +376,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
return 0;
}
-static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
-{
- if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
- return true;
- return false;
-}
-
static int psp_v11_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -460,7 +383,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
struct amdgpu_device *adev = psp->adev;
/* Write the ring destroy command*/
- if (psp_v11_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
else
@@ -471,7 +394,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) */
- if (psp_v11_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
0x80000000, 0x80000000, false);
else
@@ -489,7 +412,7 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- if (psp_v11_0_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
ret = psp_v11_0_ring_stop(psp, ring_type);
if (ret) {
DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
@@ -567,138 +490,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int
-psp_v11_0_sram_map(struct amdgpu_device *adev,
- unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
-{
- int ret = 0;
-
- switch (ucode_id) {
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SMC:
- *sram_offset = 0;
- *sram_addr_reg_offset = 0;
- *sram_data_reg_offset = 0;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_CP_CE:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_PFP:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_ME:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC1:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC2:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_RLC_G:
- *sram_offset = 0x2000;
- if (adev->asic_type < CHIP_NAVI10) {
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
- } else {
- *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_ADDR_NV10;
- *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_DATA_NV10;
- }
- break;
-
- case AMDGPU_UCODE_ID_SDMA0:
- *sram_offset = 0x0;
- if (adev->asic_type < CHIP_NAVI10) {
- *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
- } else {
- *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_ADDR_NV10;
- *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_DATA_NV10;
- }
- break;
-
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SDMA1:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_UVD:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_VCE:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static bool psp_v11_0_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
-{
- int err = 0;
- unsigned int fw_sram_reg_val = 0;
- unsigned int fw_sram_addr_reg_offset = 0;
- unsigned int fw_sram_data_reg_offset = 0;
- unsigned int ucode_size;
- uint32_t *ucode_mem = NULL;
- struct amdgpu_device *adev = psp->adev;
-
- err = psp_v11_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
- &fw_sram_data_reg_offset, ucode_type);
- if (err)
- return false;
-
- WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
- ucode_size = ucode->ucode_size;
- ucode_mem = (uint32_t *)ucode->kaddr;
- while (ucode_size) {
- fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
- if (*ucode_mem != fw_sram_reg_val)
- return false;
-
- ucode_mem++;
- /* 4 bytes */
- ucode_size -= 4;
- }
-
- return true;
-}
-
static int psp_v11_0_mode1_reset(struct psp_context *psp)
{
int ret;
@@ -733,181 +524,6 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
return 0;
}
-/* TODO: Fill in follow functions once PSP firmware interface for XGMI is ready.
- * For now, return success and hack the hive_id so high level code can
- * start testing
- */
-static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
- int number_devices, struct psp_xgmi_topology_info *topology)
-{
- struct ta_xgmi_shared_memory *xgmi_cmd;
- struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
- struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
- int i;
- int ret;
-
- if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
- return -EINVAL;
-
- xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
- memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
- /* Fill in the shared memory with topology information as input */
- topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
- xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
- topology_info_input->num_nodes = number_devices;
-
- for (i = 0; i < topology_info_input->num_nodes; i++) {
- topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
- topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
- topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
- topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
- }
-
- /* Invoke xgmi ta to get the topology information */
- ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
- if (ret)
- return ret;
-
- /* Read the output topology information from the shared memory */
- topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
- topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
- for (i = 0; i < topology->num_nodes; i++) {
- topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
- topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
- topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
- topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
- }
-
- return 0;
-}
-
-static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
- int number_devices, struct psp_xgmi_topology_info *topology)
-{
- struct ta_xgmi_shared_memory *xgmi_cmd;
- struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
- int i;
-
- if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
- return -EINVAL;
-
- xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
- memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
- topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
- xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
- topology_info_input->num_nodes = number_devices;
-
- for (i = 0; i < topology_info_input->num_nodes; i++) {
- topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
- topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
- topology_info_input->nodes[i].is_sharing_enabled = 1;
- topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
- }
-
- /* Invoke xgmi ta to set topology information */
- return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
-}
-
-static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
-{
- struct ta_xgmi_shared_memory *xgmi_cmd;
- int ret;
-
- xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
- memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
- xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
-
- /* Invoke xgmi ta to get hive id */
- ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
- if (ret)
- return ret;
-
- *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
-
- return 0;
-}
-
-static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
-{
- struct ta_xgmi_shared_memory *xgmi_cmd;
- int ret;
-
- xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
- memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
- xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
-
- /* Invoke xgmi ta to get the node id */
- ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
- if (ret)
- return ret;
-
- *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
-
- return 0;
-}
-
-static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
- struct ta_ras_trigger_error_input *info)
-{
- struct ta_ras_shared_memory *ras_cmd;
- int ret;
-
- if (!psp->ras.ras_initialized)
- return -EINVAL;
-
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
- memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
- ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
- ras_cmd->ras_in_message.trigger_error = *info;
-
- ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
- if (ret)
- return -EINVAL;
-
- /* If err_event_athub occurs error inject was successful, however
- return status from TA is no long reliable */
- if (amdgpu_ras_intr_triggered())
- return 0;
-
- return ras_cmd->ras_status;
-}
-
-static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr)
-{
-#if 0
- // not support yet.
- struct ta_ras_shared_memory *ras_cmd;
- int ret;
-
- if (!psp->ras.ras_initialized)
- return -EINVAL;
-
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
- memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
- ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON;
- ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr;
-
- ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
- if (ret)
- return -EINVAL;
-
- return ras_cmd->ras_status;
-#else
- return -EINVAL;
-#endif
-}
-
-static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
-{
- return psp_rlc_autoload_start(psp);
-}
-
static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
{
int ret;
@@ -1099,7 +715,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
uint32_t data;
struct amdgpu_device *adev = psp->adev;
- if (psp_v11_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -1111,7 +727,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
{
struct amdgpu_device *adev = psp->adev;
- if (psp_v11_0_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
} else
@@ -1203,16 +819,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ring_create = psp_v11_0_ring_create,
.ring_stop = psp_v11_0_ring_stop,
.ring_destroy = psp_v11_0_ring_destroy,
- .compare_sram_data = psp_v11_0_compare_sram_data,
.mode1_reset = psp_v11_0_mode1_reset,
- .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
- .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
- .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
- .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
- .support_vmr_ring = psp_v11_0_support_vmr_ring,
- .ras_trigger_error = psp_v11_0_ras_trigger_error,
- .ras_cure_posion = psp_v11_0_ras_cure_posion,
- .rlc_autoload_start = psp_v11_0_rlc_autoload_start,
.mem_training_init = psp_v11_0_memory_training_init,
.mem_training_fini = psp_v11_0_memory_training_fini,
.mem_training = psp_v11_0_memory_training,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 58d8b6d732e8..6c9614f77d33 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -45,11 +45,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
const char *chip_name;
- char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *asd_hdr;
-
- DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_RENOIR:
@@ -59,28 +55,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
- if (err)
- goto out1;
-
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
- if (err)
- goto out1;
-
- asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
- adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
- le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
-
- return 0;
-
-out1:
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
-
+ err = psp_init_asd_microcode(psp, chip_name);
return err;
}
@@ -95,11 +70,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
- psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
+ if (sol_reg)
return 0;
- }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -228,13 +200,6 @@ static int psp_v12_0_ring_init(struct psp_context *psp,
return 0;
}
-static bool psp_v12_0_support_vmr_ring(struct psp_context *psp)
-{
- if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
- return true;
- return false;
-}
-
static int psp_v12_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -243,7 +208,7 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- if (psp_v12_0_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(psp->adev)) {
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
@@ -295,7 +260,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
struct amdgpu_device *adev = psp->adev;
/* Write the ring destroy command*/
- if (psp_v12_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
else
@@ -306,7 +271,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) */
- if (psp_v12_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
0x80000000, 0x80000000, false);
else
@@ -334,128 +299,6 @@ static int psp_v12_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int
-psp_v12_0_sram_map(struct amdgpu_device *adev,
- unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
-{
- int ret = 0;
-
- switch (ucode_id) {
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SMC:
- *sram_offset = 0;
- *sram_addr_reg_offset = 0;
- *sram_data_reg_offset = 0;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_CP_CE:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_PFP:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_ME:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC1:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC2:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_RLC_G:
- *sram_offset = 0x2000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_SDMA0:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
- break;
-
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SDMA1:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_UVD:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_VCE:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static bool psp_v12_0_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
-{
- int err = 0;
- unsigned int fw_sram_reg_val = 0;
- unsigned int fw_sram_addr_reg_offset = 0;
- unsigned int fw_sram_data_reg_offset = 0;
- unsigned int ucode_size;
- uint32_t *ucode_mem = NULL;
- struct amdgpu_device *adev = psp->adev;
-
- err = psp_v12_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
- &fw_sram_data_reg_offset, ucode_type);
- if (err)
- return false;
-
- WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
- ucode_size = ucode->ucode_size;
- ucode_mem = (uint32_t *)ucode->kaddr;
- while (ucode_size) {
- fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
- if (*ucode_mem != fw_sram_reg_val)
- return false;
-
- ucode_mem++;
- /* 4 bytes */
- ucode_size -= 4;
- }
-
- return true;
-}
-
static int psp_v12_0_mode1_reset(struct psp_context *psp)
{
int ret;
@@ -495,7 +338,7 @@ static uint32_t psp_v12_0_ring_get_wptr(struct psp_context *psp)
uint32_t data;
struct amdgpu_device *adev = psp->adev;
- if (psp_v12_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -507,7 +350,7 @@ static void psp_v12_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
{
struct amdgpu_device *adev = psp->adev;
- if (psp_v12_0_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
} else
@@ -522,7 +365,6 @@ static const struct psp_funcs psp_v12_0_funcs = {
.ring_create = psp_v12_0_ring_create,
.ring_stop = psp_v12_0_ring_stop,
.ring_destroy = psp_v12_0_ring_destroy,
- .compare_sram_data = psp_v12_0_compare_sram_data,
.mode1_reset = psp_v12_0_mode1_reset,
.ring_get_wptr = psp_v12_0_ring_get_wptr,
.ring_set_wptr = psp_v12_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 735c43c7daab..f2e725f72d2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -50,9 +50,6 @@ MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
#define smnMP1_FIRMWARE_FLAGS 0x3010028
-static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554};
-
-static bool psp_v3_1_support_vmr_ring(struct psp_context *psp);
static int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type);
@@ -60,9 +57,7 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
const char *chip_name;
- char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
@@ -76,55 +71,15 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
- err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.sos_fw);
+ err = psp_init_sos_microcode(psp, chip_name);
if (err)
- goto out;
-
- hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
- adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version);
- adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes);
- adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) -
- le32_to_cpu(hdr->sos_size_bytes);
- adev->psp.sys_start_addr = (uint8_t *)hdr +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes);
- adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(hdr->sos_offset_bytes);
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
- if (err)
- goto out;
+ return err;
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ err = psp_init_asd_microcode(psp, chip_name);
if (err)
- goto out;
-
- hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
- adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)hdr +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ return err;
return 0;
-out:
- if (err) {
- dev_err(adev->dev,
- "psp v3.1: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
- }
-
- return err;
}
static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
@@ -168,41 +123,19 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
return ret;
}
-static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver)
-{
- int i;
-
- if (ver == adev->psp.sos_fw_version)
- return true;
-
- /*
- * Double check if the latest four legacy versions.
- * If yes, it is still the right version.
- */
- for (i = 0; i < ARRAY_SIZE(sos_old_versions); i++) {
- if (sos_old_versions[i] == adev->psp.sos_fw_version)
- return true;
- }
-
- return false;
-}
-
static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
{
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg, ver;
+ uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
- psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
+ if (sol_reg)
return 0;
- }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -227,11 +160,6 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
0, true);
-
- ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- if (!psp_v3_1_match_version(adev, ver))
- DRM_WARN("SOS version doesn't match\n");
-
return ret;
}
@@ -302,7 +230,7 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
psp_v3_1_reroute_ih(psp);
- if (psp_v3_1_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
ret = psp_v3_1_ring_stop(psp, ring_type);
if (ret) {
DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n");
@@ -360,34 +288,26 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- if (psp_v3_1_support_vmr_ring(psp)) {
- /* Write the Destroy GPCOM ring command to C2PMSG_101 */
- psp_ring_reg = GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, psp_ring_reg);
-
- /* there might be handshake issue which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
- } else {
- /* Write the ring destroy command to C2PMSG_64 */
- psp_ring_reg = 3 << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+ /* Write the ring destroy command*/
+ if (amdgpu_sriov_vf(adev))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
- /* there might be handshake issue which needs delay */
- mdelay(20);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
- }
+ /* Wait for response flag (bit 31) */
+ if (amdgpu_sriov_vf(adev))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
return ret;
}
@@ -410,128 +330,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp,
return ret;
}
-static int
-psp_v3_1_sram_map(struct amdgpu_device *adev,
- unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
-{
- int ret = 0;
-
- switch(ucode_id) {
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SMC:
- *sram_offset = 0;
- *sram_addr_reg_offset = 0;
- *sram_data_reg_offset = 0;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_CP_CE:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_PFP:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_ME:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC1:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC2:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_RLC_G:
- *sram_offset = 0x2000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_SDMA0:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
- break;
-
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SDMA1:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_UVD:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_VCE:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static bool psp_v3_1_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
-{
- int err = 0;
- unsigned int fw_sram_reg_val = 0;
- unsigned int fw_sram_addr_reg_offset = 0;
- unsigned int fw_sram_data_reg_offset = 0;
- unsigned int ucode_size;
- uint32_t *ucode_mem = NULL;
- struct amdgpu_device *adev = psp->adev;
-
- err = psp_v3_1_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
- &fw_sram_data_reg_offset, ucode_type);
- if (err)
- return false;
-
- WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
- ucode_size = ucode->ucode_size;
- ucode_mem = (uint32_t *)ucode->kaddr;
- while (ucode_size) {
- fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
- if (*ucode_mem != fw_sram_reg_val)
- return false;
-
- ucode_mem++;
- /* 4 bytes */
- ucode_size -= 4;
- }
-
- return true;
-}
-
static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -575,20 +373,12 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
return 0;
}
-static bool psp_v3_1_support_vmr_ring(struct psp_context *psp)
-{
- if (amdgpu_sriov_vf(psp->adev))
- return true;
-
- return false;
-}
-
static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
{
uint32_t data;
struct amdgpu_device *adev = psp->adev;
- if (psp_v3_1_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -599,7 +389,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
{
struct amdgpu_device *adev = psp->adev;
- if (psp_v3_1_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
/* send interrupt to PSP for SRIOV ring write pointer update */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
@@ -616,10 +406,8 @@ static const struct psp_funcs psp_v3_1_funcs = {
.ring_create = psp_v3_1_ring_create,
.ring_stop = psp_v3_1_ring_stop,
.ring_destroy = psp_v3_1_ring_destroy,
- .compare_sram_data = psp_v3_1_compare_sram_data,
.smu_reload_quirk = psp_v3_1_smu_reload_quirk,
.mode1_reset = psp_v3_1_mode1_reset,
- .support_vmr_ring = psp_v3_1_support_vmr_ring,
.ring_get_wptr = psp_v3_1_ring_get_wptr,
.ring_set_wptr = psp_v3_1_ring_set_wptr,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 7d509a40076f..5f304d61999e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -355,8 +355,6 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -614,7 +612,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -874,7 +873,8 @@ static int sdma_v2_4_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -1200,7 +1200,8 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index b6109a99fc43..c59f6f6f4c09 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -529,8 +529,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -886,7 +884,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -1158,7 +1157,8 @@ static int sdma_v3_0_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -1638,7 +1638,8 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 5f3a5ee2a3f4..33501c6c7189 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -115,17 +115,21 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
@@ -174,6 +178,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
@@ -203,6 +208,7 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
@@ -222,27 +228,35 @@ static const struct soc15_reg_golden golden_settings_sdma_arct[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
- SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002)
+ SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
};
static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
@@ -472,7 +486,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
ARRAY_SIZE(golden_settings_sdma_4_1));
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
soc15_program_register_sequence(adev,
golden_settings_sdma_rv2,
ARRAY_SIZE(golden_settings_sdma_rv2));
@@ -561,9 +575,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
chip_name = "vega20";
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
@@ -923,8 +937,6 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
-
- sdma[i]->sched.ready = false;
}
}
@@ -971,8 +983,6 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
IB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
-
- sdma[i]->sched.ready = false;
}
}
@@ -1539,7 +1549,8 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -1840,7 +1851,7 @@ static int sdma_v4_0_sw_init(void *handle)
ring->ring_obj = NULL;
ring->use_doorbell = true;
- DRM_INFO("use_doorbell being set to: [%s]\n",
+ DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
/* doorbell size is 2 dwords, get DWORD offset */
@@ -1848,7 +1859,8 @@ static int sdma_v4_0_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -1866,7 +1878,8 @@ static int sdma_v4_0_sw_init(void *handle)
sprintf(ring->name, "page%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -2445,10 +2458,12 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index d2840c2f6286..b544baf306f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -88,6 +88,29 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
};
+static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+};
+
static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -141,9 +164,14 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
break;
case CHIP_NAVI12:
- soc15_program_register_sequence(adev,
- golden_settings_sdma_5,
- (const u32)ARRAY_SIZE(golden_settings_sdma_5));
+ if (amdgpu_sriov_vf(adev))
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_5_sriov,
+ (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
+ else
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_5,
+ (const u32)ARRAY_SIZE(golden_settings_sdma_5));
soc15_program_register_sequence(adev,
golden_settings_sdma_nv12,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
@@ -514,9 +542,6 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
}
-
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -541,7 +566,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
- u32 f32_cntl, phase_quantum = 0;
+ u32 f32_cntl = 0, phase_quantum = 0;
int i;
if (amdgpu_sdma_phase_quantum) {
@@ -569,9 +594,12 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
- f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
- AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ if (!amdgpu_sriov_vf(adev)) {
+ f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ }
+
if (enable && amdgpu_sdma_phase_quantum) {
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
phase_quantum);
@@ -580,7 +608,8 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum);
}
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
}
}
@@ -603,6 +632,9 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
sdma_v5_0_rlc_stop(adev);
}
+ if (amdgpu_sriov_vf(adev))
+ return;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
@@ -635,7 +667,8 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
@@ -711,26 +744,28 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
/* set minor_ptr_update to 0 after wptr programed */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-
- /* enable MCBP */
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
-
- /* Set up RESP_MODE to non-copy addresses */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
-
- /* program default cache read and write policy */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- temp &= 0xFF0FFF;
- temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+ if (!amdgpu_sriov_vf(adev)) {
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+
+ /* enable MCBP */
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+ }
if (!amdgpu_sriov_vf(adev)) {
/* unhalt engine */
@@ -960,7 +995,8 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
@@ -1236,7 +1272,7 @@ static int sdma_v5_0_sw_init(void *handle)
ring->ring_obj = NULL;
ring->use_doorbell = true;
- DRM_INFO("use_doorbell being set to: [%s]\n",
+ DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
ring->doorbell_index = (i == 0) ?
@@ -1248,7 +1284,8 @@ static int sdma_v5_0_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -1399,14 +1436,16 @@ static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
{
u32 sdma_cntl;
- u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
- sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
- sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
+ if (!amdgpu_sriov_vf(adev)) {
+ u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
+ sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
+ sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
- sdma_cntl = RREG32(reg_offset);
- sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32(reg_offset, sdma_cntl);
+ sdma_cntl = RREG32(reg_offset);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32(reg_offset, sdma_cntl);
+ }
return 0;
}
@@ -1667,10 +1706,12 @@ static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 4d415bfdb42f..153db3f763bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1249,12 +1249,6 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
return 0;
}
-static void si_detect_hw_virtualization(struct amdgpu_device *adev)
-{
- if (is_virtual_machine()) /* passthrough mode */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
@@ -2165,8 +2159,6 @@ static const struct amdgpu_ip_block_version si_common_ip_block =
int si_set_ip_blocks(struct amdgpu_device *adev)
{
- si_detect_hw_virtualization(adev);
-
switch (adev->asic_type) {
case CHIP_VERDE:
case CHIP_TAHITI:
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 42d5601b6bf3..7d2bbcbe547b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -124,7 +124,6 @@ static void si_dma_stop(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
- ring->sched.ready = false;
}
}
@@ -267,7 +266,8 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -504,7 +504,8 @@ static int si_dma_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -775,7 +776,8 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
1, 0, 0, byte_count);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 0860e85a2d35..c00ba4b23c9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -345,26 +345,6 @@ static const struct si_dte_data dte_data_tahiti =
false
};
-#if 0
-static const struct si_dte_data dte_data_tahiti_le =
-{
- { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
- { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
- 0x5,
- 0xAFC8,
- 0x64,
- 0x32,
- 1,
- 0,
- 0x10,
- { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
- { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
- { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
- 85,
- true
-};
-#endif
-
static const struct si_dte_data dte_data_tahiti_pro =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index d42a8d8a0dea..c7c9e07962b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -564,7 +564,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
- if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8)
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
+ !(adev->apu_flags & AMD_APU_IS_RAVEN2))
return 0;
switch (soc15_asic_reset_method(adev)) {
@@ -708,7 +709,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
adev->df.funcs = &df_v1_7_funcs;
adev->rev_id = soc15_get_rev_id(adev);
- adev->nbio.funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
@@ -1130,16 +1130,23 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RAVEN:
adev->asic_funcs = &soc15_asic_funcs;
+ if (adev->pdev->device == 0x15dd)
+ adev->apu_flags |= AMD_APU_IS_RAVEN;
+ if (adev->pdev->device == 0x15d8)
+ adev->apu_flags |= AMD_APU_IS_PICASSO;
if (adev->rev_id >= 0x8)
+ adev->apu_flags |= AMD_APU_IS_RAVEN2;
+
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
adev->external_rev_id = adev->rev_id + 0x79;
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
adev->external_rev_id = adev->rev_id + 0x41;
else if (adev->rev_id == 1)
adev->external_rev_id = adev->rev_id + 0x20;
else
adev->external_rev_id = adev->rev_id + 0x01;
- if (adev->rev_id >= 0x8) {
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
@@ -1157,7 +1164,7 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_VCN_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
- } else if (adev->pdev->device == 0x15d8) {
+ } else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
@@ -1218,11 +1225,12 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG;
- adev->pg_flags = 0;
+ adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x32;
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
+ adev->apu_flags |= AMD_APU_IS_RENOIR;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index c893c645a4b2..56d02aa690a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -35,6 +35,9 @@
#define RREG32_SOC15(ip, inst, reg) \
RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+#define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
+ RREG32_NO_KIQ(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+
#define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index edfe50821cd9..799925d22fc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -253,7 +253,30 @@
# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_AQUIRE_MEM 0x58
+#define PACKET3_ACQUIRE_MEM 0x58
+/* 1. HEADER
+ * 2. COHER_CNTL [30:0]
+ * 2.1 ENGINE_SEL [31:31]
+ * 3. COHER_SIZE [31:0]
+ * 4. COHER_SIZE_HI [7:0]
+ * 5. COHER_BASE_LO [31:0]
+ * 6. COHER_BASE_HI [23:0]
+ * 7. POLL_INTERVAL [15:0]
+ */
+/* COHER_CNTL fields for CP_COHER_CNTL */
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_NC_ACTION_ENA(x) ((x) << 3)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WC_ACTION_ENA(x) ((x) << 4)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_INV_METADATA_ACTION_ENA(x) ((x) << 5)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_VOL_ACTION_ENA(x) ((x) << 15)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(x) ((x) << 18)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(x) ((x) << 22)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(x) ((x) << 23)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_CB_ACTION_ENA(x) ((x) << 25)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_DB_ACTION_ENA(x) ((x) << 26)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(x) ((x) << 27)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_VOL_ACTION_ENA(x) ((x) << 28)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(x) ((x) << 29)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_WB_ACTION_ENA(x) ((x) << 30)
#define PACKET3_REWIND 0x59
#define PACKET3_LOAD_UCONFIG_REG 0x5E
#define PACKET3_LOAD_SH_REG 0x5F
@@ -286,6 +309,7 @@
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
#define PACKET3_SWITCH_BUFFER 0x8B
#define PACKET3_FRAME_CONTROL 0x90
+# define FRAME_TMZ (1 << 0)
# define FRAME_CMD(x) ((x) << 28)
/*
* x=0: tmz_begin
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index ca7d05993ca2..745ed0fba1ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -24,6 +24,8 @@
#ifndef _TA_RAS_IF_H
#define _TA_RAS_IF_H
+#define RAS_TA_HOST_IF_VER 0
+
/* Responses have bit 31 set */
#define RSP_ID_MASK (1U << 31)
#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
@@ -36,18 +38,24 @@ enum ras_command {
TA_RAS_COMMAND__TRIGGER_ERROR,
};
-enum ta_ras_status {
- TA_RAS_STATUS__SUCCESS = 0x00,
- TA_RAS_STATUS__RESET_NEEDED = 0x01,
- TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0x02,
- TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0x03,
- TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0x04,
- TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0x05,
- TA_RAS_STATUS__ERROR_ASD_READ_WRITE = 0x06,
- TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE = 0x07,
- TA_RAS_STATUS__ERROR_TIMEOUT = 0x08,
- TA_RAS_STATUS__ERROR_BLOCK_DISABLED = 0x09,
- TA_RAS_STATUS__ERROR_GENERIC = 0x10,
+enum ta_ras_status
+{
+ TA_RAS_STATUS__SUCCESS = 0x00,
+ TA_RAS_STATUS__RESET_NEEDED = 0xA001,
+ TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0xA002,
+ TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003,
+ TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0xA004,
+ TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0xA005,
+ TA_RAS_STATUS__ERROR_ASD_READ_WRITE = 0xA006,
+ TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE = 0xA007,
+ TA_RAS_STATUS__ERROR_TIMEOUT = 0xA008,
+ TA_RAS_STATUS__ERROR_BLOCK_DISABLED = 0XA009,
+ TA_RAS_STATUS__ERROR_GENERIC = 0xA00A,
+ TA_RAS_STATUS__ERROR_RAS_MMHUB_INIT = 0xA00B,
+ TA_RAS_STATUS__ERROR_GET_DEV_INFO = 0xA00C,
+ TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D,
+ TA_RAS_STATUS__ERROR_NOT_INITIALIZED = 0xA00E,
+ TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F
};
enum ta_ras_block {
@@ -97,22 +105,39 @@ struct ta_ras_trigger_error_input {
uint64_t value; // method if error injection. i.e persistent, coherent etc.
};
+struct ta_ras_output_flags
+{
+ uint8_t ras_init_success_flag;
+ uint8_t err_inject_switch_disable_flag;
+ uint8_t reg_access_failure_flag;
+};
+
/* Common input structure for RAS callbacks */
/**********************************************************/
union ta_ras_cmd_input {
struct ta_ras_enable_features_input enable_features;
struct ta_ras_disable_features_input disable_features;
struct ta_ras_trigger_error_input trigger_error;
+
+ uint32_t reserve_pad[256];
+};
+
+union ta_ras_cmd_output
+{
+ struct ta_ras_output_flags flags;
+
+ uint32_t reserve_pad[256];
};
/* Shared Memory structures */
/**********************************************************/
struct ta_ras_shared_memory {
- uint32_t cmd_id;
- uint32_t resp_id;
- enum ta_ras_status ras_status;
- uint32_t reserved;
- union ta_ras_cmd_input ras_in_message;
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ uint32_t ras_status;
+ uint32_t if_version;
+ union ta_ras_cmd_input ras_in_message;
+ union ta_ras_cmd_output ras_out_message;
};
#endif // TL_RAS_IF_H_
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 14d346321a5f..418cf097c918 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -56,24 +56,43 @@ const uint32_t
static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
{
- WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ uint32_t rsmu_umc_addr, rsmu_umc_val;
+
+ rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
+
+ rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
RSMU_UMC_INDEX_MODE_EN, 1);
+
+ WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
}
static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
{
- WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ uint32_t rsmu_umc_addr, rsmu_umc_val;
+
+ rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
+
+ rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
RSMU_UMC_INDEX_MODE_EN, 0);
+
+ WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
}
static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
{
- uint32_t rsmu_umc_index;
+ uint32_t rsmu_umc_addr, rsmu_umc_val;
- rsmu_umc_index = RREG32_SOC15(RSMU, 0,
+ rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
- return REG_GET_FIELD(rsmu_umc_index,
+ return REG_GET_FIELD(rsmu_umc_val,
RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
RSMU_UMC_INDEX_MODE_EN);
}
@@ -85,6 +104,81 @@ static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
}
+static void umc_v6_1_clear_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_err_cnt_addr;
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* UMC 6_1_2 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ mmUMCCH0_0_EccErrCntSel_ARCT);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ mmUMCCH0_0_EccErrCnt_ARCT);
+ } else {
+ /* UMC 6_1_1 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ mmUMCCH0_0_EccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ mmUMCCH0_0_EccErrCnt);
+ }
+
+ /* select the lower chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 0);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear lower chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V6_1_CE_CNT_INIT);
+
+ /* select the higher chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear higher chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V6_1_CE_CNT_INIT);
+}
+
+static void umc_v6_1_clear_error_count(struct amdgpu_device *adev)
+{
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+ uint32_t rsmu_umc_index_state =
+ umc_v6_1_get_umc_index_mode_state(adev);
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_disable_umc_index_mode(adev);
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_6_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_1_clear_error_count_per_channel(adev,
+ umc_reg_offset);
+ }
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_enable_umc_index_mode(adev);
+}
+
static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
@@ -117,23 +211,21 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 0);
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
*error_count +=
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
UMC_V6_1_CE_CNT_INIT);
- /* clear the lower chip err count */
- WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
/* select the higher chip and check the err counter */
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 1);
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
*error_count +=
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
UMC_V6_1_CE_CNT_INIT);
- /* clear the higher chip err count */
- WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
/* check for SRAM correctable error
MCUMC_STATUS is a 64 bit register */
@@ -209,6 +301,8 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
+
+ umc_v6_1_clear_error_count(adev);
}
static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 82abd8e728ab..3cafba726587 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -118,7 +118,8 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -210,13 +211,10 @@ done:
static int uvd_v4_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 0fa8aae2d78e..a566ff926e90 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -116,7 +116,8 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -208,13 +209,10 @@ done:
static int uvd_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index e0aadcaf6c8b..0a880bc101b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -216,7 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -279,7 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -416,7 +418,8 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -428,7 +431,9 @@ static int uvd_v6_0_sw_init(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -535,13 +540,10 @@ done:
static int uvd_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 0995378d8263..7a55457e6f9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -224,7 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -286,7 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -450,7 +452,9 @@ static int uvd_v7_0_sw_init(void *handle)
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
sprintf(ring->name, "uvd_%d", ring->me);
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->uvd.inst[j].irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -469,7 +473,9 @@ static int uvd_v7_0_sw_init(void *handle)
else
ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
}
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->uvd.inst[j].irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -598,7 +604,6 @@ done:
static int uvd_v7_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i;
if (!amdgpu_sriov_vf(adev))
uvd_v7_0_stop(adev);
@@ -607,12 +612,6 @@ static int uvd_v7_0_hw_fini(void *handle)
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
- for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
- if (adev->uvd.harvest_config & (1 << i))
- continue;
- adev->uvd.inst[i].ring.sched.ready = false;
- }
-
return 0;
}
@@ -1694,7 +1693,7 @@ static int uvd_v7_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
uvd_v7_0_set_bypass_mode(adev, enable);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index b6837fcfdba7..0e2945baf0f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -434,7 +434,8 @@ static int vce_v2_0_sw_init(void *handle)
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512,
- &adev->vce.irq, 0);
+ &adev->vce.irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 217db187207c..6d9108fa22e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -442,7 +442,8 @@ static int vce_v3_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 3fd102efb7af..a0fb119240f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -476,7 +476,8 @@ static int vce_v4_0_sw_init(void *handle)
else
ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
- r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -539,7 +540,6 @@ static int vce_v4_0_hw_init(void *handle)
static int vce_v4_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i;
if (!amdgpu_sriov_vf(adev)) {
/* vce_v4_0_wait_for_idle(handle); */
@@ -549,9 +549,6 @@ static int vce_v4_0_hw_fini(void *handle)
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
- for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 09b0572b838d..1ad79155ed00 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -127,7 +127,8 @@ static int vcn_v1_0_sw_init(void *handle)
ring = &adev->vcn.inst->ring_dec;
sprintf(ring->name, "vcn_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -145,7 +146,8 @@ static int vcn_v1_0_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
sprintf(ring->name, "vcn_enc%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -227,14 +229,11 @@ done:
static int vcn_v1_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
RREG32_SOC15(VCN, 0, mmUVD_STATUS))
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index ec8091a661df..90ed773695ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -92,6 +92,7 @@ static int vcn_v2_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ volatile struct amdgpu_fw_shared *fw_shared;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -133,7 +134,8 @@ static int vcn_v2_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
sprintf(ring->name, "vcn_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -163,7 +165,8 @@ static int vcn_v2_0_sw_init(void *handle)
else
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -174,6 +177,8 @@ static int vcn_v2_0_sw_init(void *handle)
if (r)
return r;
+ fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
return 0;
}
@@ -188,6 +193,9 @@ static int vcn_v2_0_sw_fini(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+
+ fw_shared->present_flag_0 = 0;
amdgpu_virt_free_mm_table(adev);
@@ -223,6 +231,10 @@ static int vcn_v2_0_hw_init(void *handle)
if (r)
goto done;
+ //Disable vcn decode for sriov
+ if (amdgpu_sriov_vf(adev))
+ ring->sched.ready = false;
+
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
@@ -248,21 +260,12 @@ done:
static int vcn_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
- int i;
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->sched.ready = false;
-
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.inst->ring_enc[i];
- ring->sched.ready = false;
- }
-
return 0;
}
@@ -359,6 +362,15 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+ /* non-cache window */
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
+
WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
}
@@ -442,13 +454,16 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
/* non-cache window */
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+ UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
@@ -773,6 +788,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
{
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
@@ -872,6 +888,12 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
+
/* set the write pointer delay */
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
@@ -894,11 +916,16 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
return 0;
}
static int vcn_v2_0_start(struct amdgpu_device *adev)
{
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -1033,6 +1060,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* programm the RB_BASE for ring buffer */
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
@@ -1045,20 +1073,25 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst->ring_enc[0];
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst->ring_enc[1];
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
return 0;
}
@@ -1180,6 +1213,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
if (!ret_code) {
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
@@ -1189,23 +1223,38 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
/* Restore */
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst->ring_enc[0];
+ ring->wptr = 0;
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst->ring_enc[1];
+ ring->wptr = 0;
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
@@ -1796,7 +1845,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
uint32_t table_size = 0;
struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
- struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
struct mmsch_v2_0_cmd_end end = { {0} };
struct mmsch_v2_0_init_header *header;
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
@@ -1806,8 +1854,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
- direct_poll.cmd_header.command_type =
- MMSCH_COMMAND__DIRECT_REG_POLLING;
end.cmd_header.command_type = MMSCH_COMMAND__END;
if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index c6363f5ad564..3c6eafb62ee6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -86,7 +86,7 @@ static int vcn_v2_5_early_init(void *handle)
adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
+ harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
adev->vcn.harvest_config |= 1 << i;
}
@@ -165,6 +165,8 @@ static int vcn_v2_5_sw_init(void *handle)
return r;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
+ volatile struct amdgpu_fw_shared *fw_shared;
+
if (adev->vcn.harvest_config & (1 << j))
continue;
adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
@@ -175,15 +177,15 @@ static int vcn_v2_5_sw_init(void *handle)
adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
+ adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
+ adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
+ adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
+ adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
+ adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
ring = &adev->vcn.inst[j].ring_dec;
ring->use_doorbell = true;
@@ -191,7 +193,8 @@ static int vcn_v2_5_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
sprintf(ring->name, "vcn_dec_%d", j);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -203,10 +206,15 @@ static int vcn_v2_5_sw_init(void *handle)
(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->vcn.inst[j].irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
+
+ fw_shared = adev->vcn.inst[j].fw_shared_cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
}
if (amdgpu_sriov_vf(adev)) {
@@ -230,8 +238,16 @@ static int vcn_v2_5_sw_init(void *handle)
*/
static int vcn_v2_5_sw_fini(void *handle)
{
- int r;
+ int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ volatile struct amdgpu_fw_shared *fw_shared;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+ fw_shared->present_flag_0 = 0;
+ }
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
@@ -308,25 +324,16 @@ done:
static int vcn_v2_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring;
- int i, j;
+ int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- ring = &adev->vcn.inst[i].ring_dec;
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
-
- ring->sched.ready = false;
-
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
- ring = &adev->vcn.inst[i].ring_enc[j];
- ring->sched.ready = false;
- }
}
return 0;
@@ -392,38 +399,47 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
continue;
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
offset = 0;
} else {
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = size;
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
/* cache window 1: stack */
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
/* cache window 2: context */
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+ /* non-cache window */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
}
}
@@ -436,88 +452,91 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
}
offset = 0;
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
offset = size;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
}
if (!indirect)
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
else
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
/* cache window 1: stack */
if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
}
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
/* cache window 2: context */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
/* non-cache window */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
@@ -671,19 +690,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__MMSCH_MODE_MASK);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
+ VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
/* turn off clock gating */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
+ VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
/* turn on SUVD clock gating */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
+ VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
/* turn on sw mode in UVD_SUVD_CGC_CTRL */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
+ VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
}
/**
@@ -750,17 +769,18 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
/* disable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* enable dynamic power gating mode */
- tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
+ tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
- WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
if (indirect)
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
@@ -773,11 +793,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* disable master interupt */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
+ VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
/* setup mmUVD_LMI_CTRL */
tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
@@ -789,28 +809,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0x00100000L);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
+ VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MPC_CNTL),
+ VCN, 0, mmUVD_MPC_CNTL),
0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MPC_SET_MUXA0),
+ VCN, 0, mmUVD_MPC_SET_MUXA0),
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MPC_SET_MUXB0),
+ VCN, 0, mmUVD_MPC_SET_MUXB0),
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MPC_SET_MUX),
+ VCN, 0, mmUVD_MPC_SET_MUX),
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
@@ -818,26 +838,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
+ VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
+ VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
/* enable LMI MC and UMC channels */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
/* unblock VCPU register access */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
+ VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* enable master interrupt */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MASTINT_EN),
+ VCN, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect)
@@ -853,30 +873,41 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* set the write pointer delay */
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
/* set the wb address */
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
/* programm the RB_BASE for ring buffer */
- WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
- WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
- ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+ ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
return 0;
}
@@ -898,12 +929,12 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
}
/* disable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* set uvd status busy */
- tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
}
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
@@ -916,44 +947,44 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
/* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
/* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* setup mmUVD_LMI_CTRL */
- tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
tmp &= ~0xff;
- WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
/* setup mmUVD_MPC_CNTL */
- tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
/* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
/* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
/* setup mmUVD_MPC_SET_MUX */
- WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
@@ -962,30 +993,31 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
vcn_v2_5_mc_resume(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
if (adev->vcn.harvest_config & (1 << i))
continue;
/* VCN global tiling registers */
- WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
+ WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
- WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
+ WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
/* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
for (k = 0; k < 10; ++k) {
uint32_t status;
for (j = 0; j < 100; ++j) {
- status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
+ status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
if (status & 2)
break;
if (amdgpu_emu_mode == 1)
@@ -998,11 +1030,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
break;
DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
@@ -1015,15 +1047,15 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
}
/* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
ring = &adev->vcn.inst[i].ring_dec;
/* force RBC into idle state */
@@ -1033,33 +1065,40 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* programm the RB_BASE for ring buffer */
- WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
- ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
- WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
+ ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[i].ring_enc[1];
- WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
- WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
}
return 0;
@@ -1079,33 +1118,33 @@ static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
* memory descriptor location
*/
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
/* 2, update vmid of descriptor */
- data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+ data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
/* use domain0 for MM scheduler */
data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
/* 3, notify mmsch about the size of this descriptor */
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
/* 4, set resp to zero */
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
/*
* 5, kick off the initialization and wait until
* VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
*/
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
- data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop = 10;
while ((data & 0x10000002) != 0x10000002) {
udelay(100);
- data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop--;
if (!loop)
break;
@@ -1128,14 +1167,12 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
uint32_t table_size = 0;
struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
- struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
struct mmsch_v1_0_cmd_end end = { { 0 } };
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
- direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
end.cmd_header.command_type = MMSCH_COMMAND__END;
header->version = MMSCH_VERSION;
@@ -1150,93 +1187,93 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
table_size = 0;
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
offset = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = size;
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
size);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
0);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
AMDGPU_VCN_STACK_SIZE);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
0);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_VCN_CONTEXT_SIZE);
ring = &adev->vcn.inst[i].ring_enc[0];
ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
ring->ring_size / 4);
ring = &adev->vcn.inst[i].ring_dec;
ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
upper_32_bits(ring->gpu_addr));
@@ -1248,7 +1285,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+ SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
@@ -1269,24 +1306,24 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
uint32_t tmp;
/* Wait for power status to be 1 */
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* wait for read ptr to be equal to write ptr */
- tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
- tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+ tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
- tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* disable dynamic power gating mode */
- WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
return 0;
@@ -1330,17 +1367,17 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
return r;
/* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
/* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK);
/* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~(UVD_VCPU_CNTL__CLK_EN_MASK));
/* clear status */
@@ -1349,7 +1386,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
vcn_v2_5_enable_clock_gating(adev);
/* enable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
@@ -1365,55 +1402,69 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
{
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
- int ret_code;
+ int ret_code = 0;
/* pause/unpause if state is changed */
if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
- reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
+ reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
- ret_code = 0;
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
if (!ret_code) {
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
+
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
- WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
/* wait for ACK */
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
/* Restore */
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
-
+ ring->wptr = 0;
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
-
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
-
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
+ ring->wptr = 0;
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
} else {
- /* unpause dpg, no need to wait */
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
- WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
@@ -1432,7 +1483,7 @@ static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
}
/**
@@ -1449,7 +1500,7 @@ static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
- return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
}
/**
@@ -1463,15 +1514,11 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
- lower_32_bits(ring->wptr) | 0x80000000);
-
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
}
@@ -1517,9 +1564,9 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
- return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
else
- return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
}
/**
@@ -1537,12 +1584,12 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
- return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
} else {
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
- return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
}
}
@@ -1562,14 +1609,14 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
}
} else {
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 3ce10e05d0d6..af8986a55354 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -448,27 +448,6 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
-{
- uint32_t reg = 0;
-
- if (adev->asic_type == CHIP_TONGA ||
- adev->asic_type == CHIP_FIJI) {
- reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
- /* bit0: 0 means pf and 1 means vf */
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
- /* bit31: 0 means disable IOV and 1 means enable */
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
- }
-
- if (reg == 0) {
- if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
- }
-}
-
static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
{mmGRBM_STATUS},
{mmGRBM_STATUS2},
@@ -1728,9 +1707,6 @@ static const struct amdgpu_ip_block_version vi_common_ip_block =
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
- /* in early init stage, vbios code won't work */
- vi_detect_hw_virtualization(adev);
-
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_vi_virt_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 19ddd2312e00..7a01e6133798 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -332,7 +332,7 @@
# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_AQUIRE_MEM 0x58
+#define PACKET3_ACQUIRE_MEM 0x58
#define PACKET3_REWIND 0x59
#define PACKET3_LOAD_UCONFIG_REG 0x5E
#define PACKET3_LOAD_SH_REG 0x5F
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 0ec5f25adf56..cf0017f4d9d5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -215,6 +215,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
}
q_properties->is_interop = false;
+ q_properties->is_gws = false;
q_properties->queue_percent = args->queue_percentage;
q_properties->priority = args->queue_priority;
q_properties->queue_address = args->ring_base_address;
@@ -1322,6 +1323,10 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_free;
}
+ /* Update the VRAM usage count */
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+ WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
+
mutex_unlock(&p->mutex);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
@@ -1337,7 +1342,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return 0;
err_free:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
err_unlock:
mutex_unlock(&p->mutex);
return err;
@@ -1351,6 +1356,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
void *mem;
struct kfd_dev *dev;
int ret;
+ uint64_t size = 0;
dev = kfd_device_by_id(GET_GPU_ID(args->handle));
if (!dev)
@@ -1373,7 +1379,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
}
ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
- (struct kgd_mem *)mem);
+ (struct kgd_mem *)mem, &size);
/* If freeing the buffer failed, leave the handle in place for
* clean-up during process tear-down.
@@ -1382,6 +1388,8 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
kfd_process_device_remove_obj_handle(
pdd, GET_IDR_HANDLE(args->handle));
+ WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
+
err_unlock:
mutex_unlock(&p->mutex);
return ret;
@@ -1584,6 +1592,45 @@ copy_from_user_failed:
return err;
}
+static int kfd_ioctl_alloc_queue_gws(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ int retval;
+ struct kfd_ioctl_alloc_queue_gws_args *args = data;
+ struct queue *q;
+ struct kfd_dev *dev;
+
+ mutex_lock(&p->mutex);
+ q = pqm_get_user_queue(&p->pqm, args->queue_id);
+
+ if (q) {
+ dev = q->device;
+ } else {
+ retval = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!dev->gws) {
+ retval = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ retval = -ENODEV;
+ goto out_unlock;
+ }
+
+ retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
+ mutex_unlock(&p->mutex);
+
+ args->first_gws = 0;
+ return retval;
+
+out_unlock:
+ mutex_unlock(&p->mutex);
+ return retval;
+}
+
static int kfd_ioctl_get_dmabuf_info(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -1687,7 +1734,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
return 0;
err_free:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
err_unlock:
mutex_unlock(&p->mutex);
return r;
@@ -1786,6 +1833,8 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
kfd_ioctl_import_dmabuf, 0),
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
+ kfd_ioctl_alloc_queue_gws, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index de9f68d5c312..1009a3b8dcc2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -502,7 +502,7 @@ int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
num_nodes = crat_table->num_domains;
image_len = crat_table->length;
- pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
+ pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
for (node_id = 0; node_id < num_nodes; node_id++) {
top_dev = kfd_create_topology_device(device_list);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 05bc6d96ec52..0491ab2b4a9b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -569,6 +569,23 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
}
}
+static int kfd_gws_init(struct kfd_dev *kfd)
+{
+ int ret = 0;
+
+ if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
+ return 0;
+
+ if (hws_gws_support
+ || (kfd->device_info->asic_family >= CHIP_VEGA10
+ && kfd->device_info->asic_family <= CHIP_RAVEN
+ && kfd->mec2_fw_version >= 0x1b3))
+ ret = amdgpu_amdkfd_alloc_gws(kfd->kgd,
+ amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws);
+
+ return ret;
+}
+
bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
@@ -578,6 +595,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->ddev = ddev;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_MEC1);
+ kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
+ KGD_ENGINE_MEC2);
kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_SDMA1);
kfd->shared_resources = *gpu_resources;
@@ -598,13 +617,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
} else
kfd->max_proc_per_quantum = hws_max_conc_proc;
- /* Allocate global GWS that is shared by all KFD processes */
- if (hws_gws_support && amdgpu_amdkfd_alloc_gws(kfd->kgd,
- amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws)) {
- dev_err(kfd_device, "Could not allocate %d gws\n",
- amdgpu_amdkfd_get_num_gws(kfd->kgd));
- goto out;
- }
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
@@ -662,6 +674,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto device_queue_manager_error;
}
+ /* If supported on this device, allocate global GWS that is shared
+ * by all KFD processes
+ */
+ if (kfd_gws_init(kfd)) {
+ dev_err(kfd_device, "Could not allocate %d gws\n",
+ amdgpu_amdkfd_get_num_gws(kfd->kgd));
+ goto gws_error;
+ }
+
if (kfd_iommu_device_init(kfd)) {
dev_err(kfd_device, "Error initializing iommuv2\n");
goto device_iommu_error;
@@ -691,6 +712,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_topology_add_device_error:
kfd_resume_error:
device_iommu_error:
+gws_error:
device_queue_manager_uninit(kfd->dqm);
device_queue_manager_error:
kfd_interrupt_exit(kfd);
@@ -701,7 +723,7 @@ kfd_doorbell_error:
kfd_gtt_sa_init_error:
amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
alloc_gtt_mem_failure:
- if (hws_gws_support)
+ if (kfd->gws)
amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
@@ -720,7 +742,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfd_doorbell_fini(kfd);
kfd_gtt_sa_fini(kfd);
amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
- if (hws_gws_support)
+ if (kfd->gws)
amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 77ea0f0cb163..e9c4867abeff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -505,8 +505,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_vmid(dqm, qpd, q);
}
qpd->queue_count--;
- if (q->properties.is_active)
+ if (q->properties.is_active) {
decrement_queue_count(dqm, q->properties.type);
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
+ }
return retval;
}
@@ -583,6 +588,20 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
else if (!q->properties.is_active && prev_active)
decrement_queue_count(dqm, q->properties.type);
+ if (q->gws && !q->properties.is_gws) {
+ if (q->properties.is_active) {
+ dqm->gws_queue_count++;
+ pdd->qpd.mapped_gws_queue = true;
+ }
+ q->properties.is_gws = true;
+ } else if (!q->gws && q->properties.is_gws) {
+ if (q->properties.is_active) {
+ dqm->gws_queue_count--;
+ pdd->qpd.mapped_gws_queue = false;
+ }
+ q->properties.is_gws = false;
+ }
+
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = map_queues_cpsch(dqm);
else if (q->properties.is_active &&
@@ -631,6 +650,10 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
q->properties.type)];
q->properties.is_active = false;
decrement_queue_count(dqm, q->properties.type);
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
@@ -744,6 +767,10 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
q->properties.type)];
q->properties.is_active = true;
increment_queue_count(dqm, q->properties.type);
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count++;
+ qpd->mapped_gws_queue = true;
+ }
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
@@ -913,6 +940,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
INIT_LIST_HEAD(&dqm->queues);
dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
dqm->active_cp_queue_count = 0;
+ dqm->gws_queue_count = 0;
for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
int pipe_offset = pipe * get_queues_per_pipe(dqm);
@@ -1061,7 +1089,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
break;
}
- res.queue_mask |= (1ull << i);
+ res.queue_mask |= 1ull
+ << amdgpu_queue_mask_bit_to_set_resource_bit(
+ (struct amdgpu_device *)dqm->dev->kgd, i);
}
res.gws_mask = ~0ull;
res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
@@ -1082,7 +1112,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
INIT_LIST_HEAD(&dqm->queues);
dqm->active_queue_count = dqm->processes_count = 0;
dqm->active_cp_queue_count = 0;
-
+ dqm->gws_queue_count = 0;
dqm->active_runlist = false;
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -1432,6 +1462,10 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
}
/*
@@ -1650,8 +1684,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- if (q->properties.is_active)
+ if (q->properties.is_active) {
decrement_queue_count(dqm, q->properties.type);
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
+ }
dqm->total_queue_count--;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 50d919f814e9..4afa015c69b1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -182,6 +182,7 @@ struct device_queue_manager {
unsigned int processes_count;
unsigned int active_queue_count;
unsigned int active_cp_queue_count;
+ unsigned int gws_queue_count;
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 15476fca8fa6..a9583b95fcc1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -901,7 +901,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
memory_exception_data.gpu_id = dev->id;
@@ -924,7 +924,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memory_exception_data.failure.NoExecute = 0;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
pr_debug("notpresent %d, noexecute %d, readonly %d\n",
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index e05d75ecda21..fce6ccabe38b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -37,7 +37,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
if (vmid < dev->vm_info.first_vmid_kfd ||
vmid > dev->vm_info.last_vmid_kfd)
- return 0;
+ return false;
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
@@ -69,7 +69,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
- return 0;
+ return false;
/* Interrupt types we care about: various signals and faults.
* They will be forwarded to a work queue (see below).
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 8d871514671e..7c8786b9eb0a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -192,7 +192,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
dev_warn_ratelimited(kfd_device,
"Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
- PCI_BUS_NUM(pdev->devfn),
+ pdev->bus->number,
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn),
pasid,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index bae706462f96..a2b77d1df854 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -126,6 +126,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.queue_size = queue_size;
prop.is_interop = false;
+ prop.is_gws = false;
prop.priority = 1;
prop.queue_percent = 100;
prop.type = type;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index efdb75e7677b..685ca82d42fe 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -41,7 +41,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
bool *over_subscription)
{
- unsigned int process_count, queue_count, compute_queue_count;
+ unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
unsigned int max_proc_per_quantum = 1;
struct kfd_dev *dev = pm->dqm->dev;
@@ -49,6 +49,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
process_count = pm->dqm->processes_count;
queue_count = pm->dqm->active_queue_count;
compute_queue_count = pm->dqm->active_cp_queue_count;
+ gws_queue_count = pm->dqm->gws_queue_count;
/* check if there is over subscription
* Note: the arbitration between the number of VMIDs and
@@ -61,7 +62,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
max_proc_per_quantum = dev->max_proc_per_quantum;
if ((process_count > max_proc_per_quantum) ||
- compute_queue_count > get_cp_queues_num(pm->dqm)) {
+ compute_queue_count > get_cp_queues_num(pm->dqm) ||
+ gws_queue_count > 1) {
*over_subscription = true;
pr_debug("Over subscribed runlist\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 2de01009f1b6..bdca9dc5f118 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -43,7 +43,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
packet->bitfields2.pasid = qpd->pqm->process->pasid;
packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
- packet->bitfields14.num_gws = qpd->num_gws;
+ packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
packet->bitfields14.num_oac = qpd->num_oac;
packet->bitfields14.sdma_enable = 1;
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index c24cad3c64ed..f0587d94294d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -282,6 +282,7 @@ struct kfd_dev {
/* Firmware versions */
uint16_t mec_fw_version;
+ uint16_t mec2_fw_version;
uint16_t sdma_fw_version;
/* Maximum process number mapped to HW scheduler */
@@ -410,6 +411,10 @@ enum KFD_QUEUE_PRIORITY {
* @is_active: Defines if the queue is active or not. @is_active and
* @is_evicted are protected by the DQM lock.
*
+ * @is_gws: Defines if the queue has been updated to be GWS-capable or not.
+ * @is_gws should be protected by the DQM lock, since changing it can yield the
+ * possibility of updating DQM state on number of GWS queues.
+ *
* @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
* of the queue.
*
@@ -432,6 +437,7 @@ struct queue_properties {
bool is_interop;
bool is_evicted;
bool is_active;
+ bool is_gws;
/* Not relevant for user mode queues in cp scheduling */
unsigned int vmid;
/* Relevant only for sdma queues*/
@@ -563,6 +569,14 @@ struct qcm_process_device {
*/
bool reset_wavefronts;
+ /* This flag tells us if this process has a GWS-capable
+ * queue that will be mapped into the runlist. It's
+ * possible to request a GWS BO, but not have the queue
+ * currently mapped, and this changes how the MAP_PROCESS
+ * PM4 packet is configured.
+ */
+ bool mapped_gws_queue;
+
/*
* All the memory management data should be here too
*/
@@ -615,6 +629,8 @@ enum kfd_pdd_bound {
PDD_BOUND_SUSPENDED,
};
+#define MAX_VRAM_FILENAME_LEN 11
+
/* Data that is per-process-per device. */
struct kfd_process_device {
/*
@@ -657,6 +673,11 @@ struct kfd_process_device {
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
enum kfd_pdd_bound bound;
+
+ /* VRAM usage */
+ uint64_t vram_usage;
+ struct attribute attr_vram;
+ char vram_filename[MAX_VRAM_FILENAME_LEN];
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -923,6 +944,8 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws);
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
unsigned int qid);
+struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
+ unsigned int qid);
int pqm_get_wave_state(struct process_queue_manager *pqm,
unsigned int qid,
void __user *ctl_stack,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index fe0cd49d4ea7..d27221ddcdeb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -79,18 +79,22 @@ static struct kfd_procfs_tree procfs;
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
- int val = 0;
-
if (strcmp(attr->name, "pasid") == 0) {
struct kfd_process *p = container_of(attr, struct kfd_process,
attr_pasid);
- val = p->pasid;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
+ } else if (strncmp(attr->name, "vram_", 5) == 0) {
+ struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
+ attr_vram);
+ if (pdd)
+ return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
} else {
pr_err("Invalid attribute");
return -EINVAL;
}
- return snprintf(buffer, PAGE_SIZE, "%d\n", val);
+ return 0;
}
static void kfd_procfs_kobj_release(struct kobject *kobj)
@@ -206,6 +210,34 @@ int kfd_procfs_add_queue(struct queue *q)
return 0;
}
+int kfd_procfs_add_vram_usage(struct kfd_process *p)
+{
+ int ret = 0;
+ struct kfd_process_device *pdd;
+
+ if (!p)
+ return -EINVAL;
+
+ if (!p->kobj)
+ return -EFAULT;
+
+ /* Create proc/<pid>/vram_<gpuid> file for each GPU */
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ snprintf(pdd->vram_filename, MAX_VRAM_FILENAME_LEN, "vram_%u",
+ pdd->dev->id);
+ pdd->attr_vram.name = pdd->vram_filename;
+ pdd->attr_vram.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&pdd->attr_vram);
+ ret = sysfs_create_file(p->kobj, &pdd->attr_vram);
+ if (ret)
+ pr_warn("Creating vram usage for gpu id %d failed",
+ (int)pdd->dev->id);
+ }
+
+ return ret;
+}
+
+
void kfd_procfs_del_queue(struct queue *q)
{
if (!q)
@@ -248,7 +280,7 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
struct kfd_dev *dev = pdd->dev;
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL);
}
/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@@ -312,7 +344,7 @@ sync_memory_failed:
return err;
err_map_mem:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL);
err_alloc_mem:
*kptr = NULL;
return err;
@@ -411,6 +443,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
process->kobj);
if (!process->kobj_queues)
pr_warn("Creating KFD proc/queues folder failed");
+
+ ret = kfd_procfs_add_vram_usage(process);
+ if (ret)
+ pr_warn("Creating vram usage file for pid %d failed",
+ (int)process->lead_thread->pid);
}
out:
if (!IS_ERR(process))
@@ -488,7 +525,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
peer_pdd->dev->kgd, mem, peer_pdd->vm);
}
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL);
kfd_process_device_remove_obj_handle(pdd, id);
}
}
@@ -551,6 +588,7 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
+ struct kfd_process_device *pdd;
/* Remove the procfs files */
if (p->kobj) {
@@ -558,6 +596,10 @@ static void kfd_process_wq_release(struct work_struct *work)
kobject_del(p->kobj_queues);
kobject_put(p->kobj_queues);
p->kobj_queues = NULL;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ sysfs_remove_file(p->kobj, &pdd->attr_vram);
+
kobject_del(p->kobj);
kobject_put(p->kobj);
p->kobj = NULL;
@@ -858,10 +900,12 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->qpd.dqm = dev->dqm;
pdd->qpd.pqm = &p->pqm;
pdd->qpd.evicted = 0;
+ pdd->qpd.mapped_gws_queue = false;
pdd->process = p;
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
+ pdd->vram_usage = 0;
list_add(&pdd->per_device_list, &p->per_device_data);
/* Init idr used for memory handle translation */
@@ -1078,7 +1122,7 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
return p;
}
-/* process_evict_queues - Evict all user queues of a process
+/* kfd_process_evict_queues - Evict all user queues of a process
*
* Eviction is reference-counted per process-device. This means multiple
* evictions from different sources can be nested safely.
@@ -1118,7 +1162,7 @@ fail:
return r;
}
-/* process_restore_queues - Restore all user queues of a process */
+/* kfd_process_restore_queues - Restore all user queues of a process */
int kfd_process_restore_queues(struct kfd_process *p)
{
struct kfd_process_device *pdd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 084c35f55d59..eb1635ac8988 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -476,6 +476,15 @@ struct kernel_queue *pqm_get_kernel_queue(
return NULL;
}
+struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
+ unsigned int qid)
+{
+ struct process_queue_node *pqn;
+
+ pqn = get_queue_by_qid(pqm, qid);
+ return pqn ? pqn->q : NULL;
+}
+
int pqm_get_wave_state(struct process_queue_manager *pqm,
unsigned int qid,
void __user *ctl_stack,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index aa0bfa78a667..bb77f7af2b6d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -478,6 +478,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.device_id);
sysfs_show_32bit_prop(buffer, "location_id",
dev->node_props.location_id);
+ sysfs_show_32bit_prop(buffer, "domain",
+ dev->node_props.domain);
sysfs_show_32bit_prop(buffer, "drm_render_minor",
dev->node_props.drm_render_minor);
sysfs_show_64bit_prop(buffer, "hive_id",
@@ -787,7 +789,6 @@ static int kfd_topology_update_sysfs(void)
{
int ret;
- pr_info("Creating topology SYSFS entries\n");
if (!sys_props.kobj_topology) {
sys_props.kobj_topology =
kfd_alloc_struct(sys_props.kobj_topology);
@@ -1048,7 +1049,6 @@ int kfd_topology_init(void)
sys_props.generation_count++;
kfd_update_system_properties();
kfd_debug_print_topology();
- pr_info("Finished initializing topology\n");
} else
pr_err("Failed to update topology in sysfs ret=%d\n", ret);
@@ -1303,7 +1303,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.vendor_id = gpu->pdev->vendor;
dev->node_props.device_id = gpu->pdev->device;
+ dev->node_props.capability |=
+ ((amdgpu_amdkfd_get_asic_rev_id(dev->gpu->kgd) <<
+ HSA_CAP_ASIC_REVISION_SHIFT) &
+ HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->pdev);
+ dev->node_props.domain = pci_domain_nr(gpu->pdev->bus);
dev->node_props.max_engine_clk_fcompute =
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
dev->node_props.max_engine_clk_ccompute =
@@ -1317,7 +1322,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
gpu->device_info->num_xgmi_sdma_engines;
dev->node_props.num_sdma_queues_per_engine =
gpu->device_info->num_sdma_queues_per_engine;
- dev->node_props.num_gws = (hws_gws_support &&
+ dev->node_props.num_gws = (dev->gpu->gws &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 46eeecaf1b68..326d9b26b7aa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -41,7 +41,6 @@
#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000
#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12
-#define HSA_CAP_RESERVED 0xffffc000
#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
#define HSA_CAP_DOORBELL_TYPE_1_0 0x1
@@ -51,6 +50,10 @@
#define HSA_CAP_SRAM_EDCSUPPORTED 0x00080000
#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000
#define HSA_CAP_RASEVENTNOTIFY 0x00200000
+#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000
+#define HSA_CAP_ASIC_REVISION_SHIFT 22
+
+#define HSA_CAP_RESERVED 0xfc078000
struct kfd_node_properties {
uint64_t hive_id;
@@ -77,6 +80,7 @@ struct kfd_node_properties {
uint32_t vendor_id;
uint32_t device_id;
uint32_t location_id;
+ uint32_t domain;
uint32_t max_engine_clk_fcompute;
uint32_t max_engine_clk_ccompute;
int32_t drm_render_minor;
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 87858bc57e64..1911a34cc060 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -21,16 +21,12 @@ config DRM_AMD_DC_HDCP
bool "Enable HDCP support in DC"
depends on DRM_AMD_DC
help
- Choose this option
- if you want to support
- HDCP authentication
+ Choose this option if you want to support HDCP authentication.
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
help
- Choose this option
- if you want to hit
- kdgb_break in assert.
+ Choose this option if you want to hit kdgb_break in assert.
endmenu
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7fc15b82fe48..7ced9f87be97 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -30,7 +30,7 @@
#include "dc.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
-#include "dmub/inc/dmub_srv.h"
+#include "dmub/dmub_srv.h"
#include "dc/inc/hw/dmcu.h"
#include "dc/inc/hw/abm.h"
#include "dc/dc_dmub_srv.h"
@@ -774,8 +774,9 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
fw_inst_const_size);
}
- memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
- fw_bss_data_size);
+ if (fw_bss_data_size)
+ memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
+ fw_bss_data, fw_bss_data_size);
/* Copy firmware bios info into FB memory. */
memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
@@ -917,6 +918,23 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+ if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
+ adev->dm.dc->debug.force_single_disp_pipe_split = false;
+ adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+
+ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+ adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
+ adev->dm.dc->debug.disable_stutter = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+ adev->dm.dc->debug.disable_dsc = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
+ adev->dm.dc->debug.disable_clock_gate = true;
+
r = dm_dmub_hw_init(adev);
if (r) {
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -1214,6 +1232,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
le32_to_cpu(hdr->inst_const_bytes);
+ region_params.fw_inst_const =
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
status = dmub_srv_calc_region_info(dmub_srv, &region_params,
&region_info);
@@ -1333,8 +1355,13 @@ static int dm_late_init(void *handle)
struct dmcu_iram_parameters params;
unsigned int linear_lut[16];
int i;
- struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
- bool ret = false;
+ struct dmcu *dmcu = NULL;
+ bool ret;
+
+ if (!adev->dm.fw_dmcu)
+ return detect_mst_link_for_all_connectors(adev->ddev);
+
+ dmcu = adev->dm.dc->res_pool->dmcu;
for (i = 0; i < 16; i++)
linear_lut[i] = 0xFFFF * i / 15;
@@ -1350,13 +1377,10 @@ static int dm_late_init(void *handle)
*/
params.min_abm_backlight = 0x28F;
- /* todo will enable for navi10 */
- if (adev->asic_type <= CHIP_RAVEN) {
- ret = dmcu_load_iram(dmcu, params);
+ ret = dmcu_load_iram(dmcu, params);
- if (!ret)
- return -EINVAL;
- }
+ if (!ret)
+ return -EINVAL;
return detect_mst_link_for_all_connectors(adev->ddev);
}
@@ -1511,12 +1535,115 @@ static int dm_hw_fini(void *handle)
return 0;
}
+
+static int dm_enable_vblank(struct drm_crtc *crtc);
+static void dm_disable_vblank(struct drm_crtc *crtc);
+
+static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
+ struct dc_state *state, bool enable)
+{
+ enum dc_irq_source irq_source;
+ struct amdgpu_crtc *acrtc;
+ int rc = -EBUSY;
+ int i = 0;
+
+ for (i = 0; i < state->stream_count; i++) {
+ acrtc = get_crtc_by_otg_inst(
+ adev, state->stream_status[i].primary_otg_inst);
+
+ if (acrtc && state->stream_status[i].plane_count != 0) {
+ irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
+ rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+ DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
+ if (rc)
+ DRM_WARN("Failed to %s pflip interrupts\n",
+ enable ? "enable" : "disable");
+
+ if (enable) {
+ rc = dm_enable_vblank(&acrtc->base);
+ if (rc)
+ DRM_WARN("Failed to enable vblank interrupts\n");
+ } else {
+ dm_disable_vblank(&acrtc->base);
+ }
+
+ }
+ }
+
+}
+
+enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+{
+ struct dc_state *context = NULL;
+ enum dc_status res = DC_ERROR_UNEXPECTED;
+ int i;
+ struct dc_stream_state *del_streams[MAX_PIPES];
+ int del_streams_count = 0;
+
+ memset(del_streams, 0, sizeof(del_streams));
+
+ context = dc_create_state(dc);
+ if (context == NULL)
+ goto context_alloc_fail;
+
+ dc_resource_state_copy_construct_current(dc, context);
+
+ /* First remove from context all streams */
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+
+ del_streams[del_streams_count++] = stream;
+ }
+
+ /* Remove all planes for removed streams and then remove the streams */
+ for (i = 0; i < del_streams_count; i++) {
+ if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
+ if (res != DC_OK)
+ goto fail;
+ }
+
+
+ res = dc_validate_global_state(dc, context, false);
+
+ if (res != DC_OK) {
+ DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
+ goto fail;
+ }
+
+ res = dc_commit_state(dc, context);
+
+fail:
+ dc_release_state(context);
+
+context_alloc_fail:
+ return res;
+}
+
static int dm_suspend(void *handle)
{
struct amdgpu_device *adev = handle;
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
+ if (adev->in_gpu_reset) {
+ mutex_lock(&dm->dc_lock);
+ dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+
+ amdgpu_dm_commit_zero_streams(dm->dc);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ return ret;
+ }
+
WARN_ON(adev->dm.cached_state);
adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
@@ -1527,7 +1654,7 @@ static int dm_suspend(void *handle)
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
- return ret;
+ return 0;
}
static struct amdgpu_dm_connector *
@@ -1631,6 +1758,46 @@ static void emulated_link_detect(struct dc_link *link)
}
+static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ struct amdgpu_display_manager *dm)
+{
+ struct {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } * bundle;
+ int k, m;
+
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+ if (!bundle) {
+ dm_error("Failed to allocate update bundle\n");
+ goto cleanup;
+ }
+
+ for (k = 0; k < dc_state->stream_count; k++) {
+ bundle->stream_update.stream = dc_state->streams[k];
+
+ for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+ bundle->surface_updates[m].surface =
+ dc_state->stream_status->plane_states[m];
+ bundle->surface_updates[m].surface->force_full_update =
+ true;
+ }
+ dc_commit_updates_for_stream(
+ dm->dc, bundle->surface_updates,
+ dc_state->stream_status->plane_count,
+ dc_state->streams[k], &bundle->stream_update, dc_state);
+ }
+
+cleanup:
+ kfree(bundle);
+
+ return;
+}
+
static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
@@ -1647,8 +1814,44 @@ static int dm_resume(void *handle)
struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
- int i, r;
+ struct dc_state *dc_state;
+ int i, r, j;
+
+ if (adev->in_gpu_reset) {
+ dc_state = dm->cached_dc_state;
+
+ r = dm_dmub_hw_init(adev);
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+ dc_resume(dm->dc);
+
+ amdgpu_dm_irq_resume_early(adev);
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ dc_state->streams[i]->mode_changed = true;
+ for (j = 0; j < dc_state->stream_status->plane_count; j++) {
+ dc_state->stream_status->plane_states[j]->update_flags.raw
+ = 0xffffffff;
+ }
+ }
+
+ WARN_ON(!dc_commit_state(dm->dc, dc_state));
+
+ dm_gpureset_commit_state(dm->cached_dc_state, dm);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
+ dc_release_state(dm->cached_dc_state);
+ dm->cached_dc_state = NULL;
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ mutex_unlock(&dm->dc_lock);
+
+ return 0;
+ }
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_release_state(dm_state->context);
dm_state->context = dc_create_state(dm->dc);
@@ -3013,9 +3216,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
- if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
- dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
-
/* No userspace support. */
dm->dc->debug.disable_tri_buf = true;
@@ -3286,7 +3486,7 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
}
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
- uint64_t *tiling_flags)
+ uint64_t *tiling_flags, bool *tmz_surface)
{
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
int r = amdgpu_bo_reserve(rbo, false);
@@ -3301,6 +3501,9 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
if (tiling_flags)
amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
+ if (tmz_surface)
+ *tmz_surface = amdgpu_bo_encrypted(rbo);
+
amdgpu_bo_unreserve(rbo);
return r;
@@ -3388,6 +3591,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
+ bool tmz_surface,
bool force_disable_dcc)
{
const struct drm_framebuffer *fb = &afb->base;
@@ -3398,6 +3602,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
memset(dcc, 0, sizeof(*dcc));
memset(address, 0, sizeof(*address));
+ address->tmz_surface = tmz_surface;
+
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
plane_size->surface_size.x = 0;
plane_size->surface_size.y = 0;
@@ -3588,6 +3794,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const uint64_t tiling_flags,
struct dc_plane_info *plane_info,
struct dc_plane_address *address,
+ bool tmz_surface,
bool force_disable_dcc)
{
const struct drm_framebuffer *fb = plane_state->fb;
@@ -3631,6 +3838,14 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
case DRM_FORMAT_P010:
plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
break;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
+ break;
default:
DRM_ERROR(
"Unsupported screen format %s\n",
@@ -3670,7 +3885,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->rotation, tiling_flags,
&plane_info->tiling_info,
&plane_info->plane_size,
- &plane_info->dcc, address,
+ &plane_info->dcc, address, tmz_surface,
force_disable_dcc);
if (ret)
return ret;
@@ -3694,6 +3909,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct dc_plane_info plane_info;
uint64_t tiling_flags;
int ret;
+ bool tmz_surface = false;
bool force_disable_dcc = false;
ret = fill_dc_scaling_info(plane_state, &scaling_info);
@@ -3705,7 +3921,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->clip_rect = scaling_info.clip_rect;
dc_plane_state->scaling_quality = scaling_info.scaling_quality;
- ret = get_fb_info(amdgpu_fb, &tiling_flags);
+ ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
if (ret)
return ret;
@@ -3713,6 +3929,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
&plane_info,
&dc_plane_state->address,
+ tmz_surface,
force_disable_dcc);
if (ret)
return ret;
@@ -3800,8 +4017,7 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector,
- const struct drm_connector_state *state,
- bool is_y420)
+ bool is_y420, int requested_bpc)
{
uint8_t bpc;
@@ -3821,10 +4037,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector,
bpc = bpc ? bpc : 8;
}
- if (!state)
- state = connector->state;
-
- if (state) {
+ if (requested_bpc > 0) {
/*
* Cap display bpc based on the user requested value.
*
@@ -3833,7 +4046,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector,
* or if this was called outside of atomic check, so it
* can't be used directly.
*/
- bpc = min(bpc, state->max_requested_bpc);
+ bpc = min_t(u8, bpc, requested_bpc);
/* Round down to the nearest even number. */
bpc = bpc - (bpc & 1);
@@ -3955,7 +4168,8 @@ static void fill_stream_properties_from_drm_display_mode(
const struct drm_display_mode *mode_in,
const struct drm_connector *connector,
const struct drm_connector_state *connector_state,
- const struct dc_stream_state *old_stream)
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
@@ -3985,8 +4199,9 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
timing_out->display_color_depth = convert_color_depth_from_display_info(
- connector, connector_state,
- (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
+ connector,
+ (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
+ requested_bpc);
timing_out->scan_type = SCANNING_TYPE_NODATA;
timing_out->hdmi_vic = 0;
@@ -4192,7 +4407,8 @@ static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct drm_display_mode *drm_mode,
const struct dm_connector_state *dm_state,
- const struct dc_stream_state *old_stream)
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
{
struct drm_display_mode *preferred_mode = NULL;
struct drm_connector *drm_connector;
@@ -4277,10 +4493,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base, con_state, NULL);
+ &mode, &aconnector->base, con_state, NULL, requested_bpc);
else
fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base, con_state, old_stream);
+ &mode, &aconnector->base, con_state, old_stream, requested_bpc);
stream->timing.flags.DSC = 0;
@@ -4317,14 +4533,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
- if (stream->link->psr_feature_enabled) {
+ if (stream->link->psr_settings.psr_feature_enabled) {
struct dc *core_dc = stream->link->ctx->dc;
if (dc_is_dmcu_initialized(core_dc)) {
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
-
- stream->psr_version = dmcu->dmcu_version.psr_version;
-
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
@@ -4803,16 +5015,54 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
create_eml_sink(aconnector);
}
+static struct dc_stream_state *
+create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ struct dc_stream_state *stream;
+ int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
+ enum dc_status dc_result = DC_OK;
+
+ do {
+ stream = create_stream_for_sink(aconnector, drm_mode,
+ dm_state, old_stream,
+ requested_bpc);
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ break;
+ }
+
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
+
+ if (dc_result != DC_OK) {
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
+ drm_mode->hdisplay,
+ drm_mode->vdisplay,
+ drm_mode->clock,
+ dc_result);
+
+ dc_stream_release(stream);
+ stream = NULL;
+ requested_bpc -= 2; /* lower bpc to retry validation */
+ }
+
+ } while (stream == NULL && requested_bpc >= 6);
+
+ return stream;
+}
+
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int result = MODE_ERROR;
struct dc_sink *dc_sink;
- struct amdgpu_device *adev = connector->dev->dev_private;
/* TODO: Unhardcode stream count */
struct dc_stream_state *stream;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- enum dc_status dc_result = DC_OK;
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(mode->flags & DRM_MODE_FLAG_DBLSCAN))
@@ -4833,24 +5083,11 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
goto fail;
}
- stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
- if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
- goto fail;
- }
-
- dc_result = dc_validate_stream(adev->dm.dc, stream);
-
- if (dc_result == DC_OK)
+ stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
+ if (stream) {
+ dc_stream_release(stream);
result = MODE_OK;
- else
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
- mode->hdisplay,
- mode->vdisplay,
- mode->clock,
- dc_result);
-
- dc_stream_release(stream);
+ }
fail:
/* TODO: error handling*/
@@ -5173,10 +5410,12 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
return 0;
if (!state->duplicated) {
+ int max_bpc = conn_state->max_requested_bpc;
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
aconnector->force_yuv420_output;
- color_depth = convert_color_depth_from_display_info(connector, conn_state,
- is_y420);
+ color_depth = convert_color_depth_from_display_info(connector,
+ is_y420,
+ max_bpc);
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
clock = adjusted_mode->clock;
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
@@ -5331,6 +5570,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
uint64_t tiling_flags;
uint32_t domain;
int r;
+ bool tmz_surface = false;
bool force_disable_dcc = false;
dm_plane_state_old = to_dm_plane_state(plane->state);
@@ -5380,6 +5620,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
+ tmz_surface = amdgpu_bo_encrypted(rbo);
+
ttm_eu_backoff_reservation(&ticket, &list);
afb->address = amdgpu_bo_gpu_offset(rbo);
@@ -5395,7 +5637,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
adev, afb, plane_state->format, plane_state->rotation,
tiling_flags, &plane_state->tiling_info,
&plane_state->plane_size, &plane_state->dcc,
- &plane_state->address,
+ &plane_state->address, tmz_surface,
force_disable_dcc);
}
@@ -5542,6 +5784,12 @@ static int get_plane_formats(const struct drm_plane *plane,
formats[num_formats++] = DRM_FORMAT_NV12;
if (plane_cap && plane_cap->pixel_format_support.p010)
formats[num_formats++] = DRM_FORMAT_P010;
+ if (plane_cap && plane_cap->pixel_format_support.fp16) {
+ formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
+ formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
+ formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
+ formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
+ }
break;
case DRM_PLANE_TYPE_OVERLAY:
@@ -6569,6 +6817,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
unsigned long flags;
struct amdgpu_bo *abo;
uint64_t tiling_flags;
+ bool tmz_surface = false;
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
@@ -6621,6 +6870,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
}
fill_dc_scaling_info(new_plane_state,
@@ -6663,12 +6913,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ tmz_surface = amdgpu_bo_encrypted(abo);
+
amdgpu_bo_unreserve(abo);
fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags,
&bundle->plane_infos[planes_count],
&bundle->flip_addrs[planes_count].address,
+ tmz_surface,
false);
DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
@@ -6814,7 +7067,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
}
mutex_lock(&dm->dc_lock);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_allow_active)
+ acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
dc_commit_updates_for_stream(dm->dc,
@@ -6825,12 +7078,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_state);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->psr_version &&
- !acrtc_state->stream->link->psr_feature_enabled)
+ acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
amdgpu_dm_link_setup_psr(acrtc_state->stream);
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_feature_enabled &&
- !acrtc_state->stream->link->psr_allow_active) {
+ acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
+ !acrtc_state->stream->link->psr_settings.psr_allow_active) {
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -7144,7 +7397,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
/* i.e. reset mode */
if (dm_old_crtc_state->stream) {
- if (dm_old_crtc_state->stream->link->psr_allow_active)
+ if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
@@ -7592,10 +7845,10 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto skip_modeset;
- new_stream = create_stream_for_sink(aconnector,
- &new_crtc_state->mode,
- dm_new_conn_state,
- dm_old_crtc_state->stream);
+ new_stream = create_validate_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+ dm_new_conn_state,
+ dm_old_crtc_state->stream);
/*
* we can have no stream on ACTION_SET if a display
@@ -8056,6 +8309,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
uint64_t tiling_flags;
+ bool tmz_surface = false;
new_plane_crtc = new_plane_state->crtc;
new_dm_plane_state = to_dm_plane_state(new_plane_state);
@@ -8085,6 +8339,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
new_dm_plane_state->dc_state->gamma_correction;
bundle->surface_updates[num_plane].in_transfer_func =
new_dm_plane_state->dc_state->in_transfer_func;
+ bundle->surface_updates[num_plane].gamut_remap_matrix =
+ &new_dm_plane_state->dc_state->gamut_remap_matrix;
bundle->stream_update.gamut_remap =
&new_dm_crtc_state->stream->gamut_remap_matrix;
bundle->stream_update.output_csc_transform =
@@ -8101,14 +8357,14 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
bundle->surface_updates[num_plane].scaling_info = scaling_info;
if (amdgpu_fb) {
- ret = get_fb_info(amdgpu_fb, &tiling_flags);
+ ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
if (ret)
goto cleanup;
ret = fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags,
plane_info,
- &flip_addr->address,
+ &flip_addr->address, tmz_surface,
false);
if (ret)
goto cleanup;
@@ -8609,8 +8865,17 @@ static void amdgpu_dm_set_psr_caps(struct dc_link *link)
return;
if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
dpcd_data, sizeof(dpcd_data))) {
- link->psr_feature_enabled = dpcd_data[0] ? true:false;
- DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
+ link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
+
+ if (dpcd_data[0] == 0) {
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->psr_settings.psr_feature_enabled = false;
+ } else {
+ link->psr_settings.psr_version = DC_PSR_VERSION_1;
+ link->psr_settings.psr_feature_enabled = true;
+ }
+
+ DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
}
}
@@ -8625,16 +8890,14 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
struct dc_link *link = NULL;
struct psr_config psr_config = {0};
struct psr_context psr_context = {0};
- struct dc *dc = NULL;
bool ret = false;
if (stream == NULL)
return false;
link = stream->link;
- dc = link->ctx->dc;
- psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
+ psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
if (psr_config.psr_version > 0) {
psr_config.psr_exit_link_training_required = 0x1;
@@ -8646,7 +8909,7 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
}
- DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
+ DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 5cab3e65d992..d61186ff411d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -315,6 +315,7 @@ struct amdgpu_display_manager {
#endif
struct drm_atomic_state *cached_state;
+ struct dc_state *cached_dc_state;
struct dm_comressor_info compressor;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 2233d293a707..4dfb6b55bb2e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -239,7 +239,8 @@ static int __set_output_tf(struct dc_transfer_func *func,
* instead to simulate this.
*/
gamma->type = GAMMA_CUSTOM;
- res = mod_color_calculate_degamma_params(func, gamma, true);
+ res = mod_color_calculate_degamma_params(NULL, func,
+ gamma, true);
} else {
/*
* Assume sRGB. The actual mapping will depend on whether the
@@ -271,7 +272,7 @@ static int __set_input_tf(struct dc_transfer_func *func,
__drm_lut_to_dc_gamma(lut, gamma, false);
- res = mod_color_calculate_degamma_params(func, gamma, true);
+ res = mod_color_calculate_degamma_params(NULL, func, gamma, true);
dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
@@ -419,9 +420,21 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state)
{
const struct drm_color_lut *degamma_lut;
+ enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
uint32_t degamma_size;
int r;
+ /* Get the correct base transfer function for implicit degamma. */
+ switch (dc_plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ /* DC doesn't have a transfer function for BT601 specifically. */
+ tf = TRANSFER_FUNCTION_BT709;
+ break;
+ default:
+ break;
+ }
+
if (crtc->cm_has_degamma) {
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut,
&degamma_size);
@@ -455,8 +468,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
* map these to the atomic one instead.
*/
if (crtc->cm_is_degamma_srgb)
- dc_plane_state->in_transfer_func->tf =
- TRANSFER_FUNCTION_SRGB;
+ dc_plane_state->in_transfer_func->tf = tf;
else
dc_plane_state->in_transfer_func->tf =
TRANSFER_FUNCTION_LINEAR;
@@ -471,7 +483,12 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
* in linear space. Assume that the input is sRGB.
*/
dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ dc_plane_state->in_transfer_func->tf = tf;
+
+ if (tf != TRANSFER_FUNCTION_SRGB &&
+ !mod_color_calculate_degamma_params(NULL,
+ dc_plane_state->in_transfer_func, NULL, false))
+ return -ENOMEM;
} else {
/* ...Otherwise we can just bypass the DGM block. */
dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 0461fecd68db..076af267b488 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -32,7 +32,7 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h"
#include "dm_helpers.h"
-#include "dmub/inc/dmub_srv.h"
+#include "dmub/dmub_srv.h"
struct dmub_debugfs_trace_header {
uint32_t entry_count;
@@ -838,6 +838,44 @@ static int vrr_range_show(struct seq_file *m, void *data)
return 0;
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+/*
+ * Returns the HDCP capability of the Display (1.4 for now).
+ *
+ * NOTE* Not all HDMI displays report their HDCP caps even when they are capable.
+ * Since its rare for a display to not be HDCP 1.4 capable, we set HDMI as always capable.
+ *
+ * Example usage: cat /sys/kernel/debug/dri/0/DP-1/hdcp_sink_capability
+ * or cat /sys/kernel/debug/dri/0/HDMI-A-1/hdcp_sink_capability
+ */
+static int hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ bool hdcp_cap, hdcp2_cap;
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id);
+
+ hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link);
+ hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link);
+
+
+ if (hdcp_cap)
+ seq_printf(m, "%s ", "HDCP1.4");
+ if (hdcp2_cap)
+ seq_printf(m, "%s ", "HDCP2.2");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_printf(m, "%s ", "None");
+
+ seq_puts(m, "\n");
+
+ return 0;
+}
+#endif
/* function description
*
* generic SDP message access for testing
@@ -964,6 +1002,9 @@ DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc);
DEFINE_SHOW_ATTRIBUTE(vrr_range);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
+#endif
static const struct file_operations dp_link_settings_debugfs_fops = {
.owner = THIS_MODULE,
@@ -1019,12 +1060,23 @@ static const struct {
{"test_pattern", &dp_phy_test_pattern_fops},
{"output_bpc", &output_bpc_fops},
{"vrr_range", &vrr_range_fops},
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ {"hdcp_sink_capability", &hdcp_sink_capability_fops},
+#endif
{"sdp_message", &sdp_message_fops},
{"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
{"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
{"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
};
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+static const struct {
+ char *name;
+ const struct file_operations *fops;
+} hdmi_debugfs_entries[] = {
+ {"hdcp_sink_capability", &hdcp_sink_capability_fops}
+};
+#endif
/*
* Force YUV420 output if available from the given mode
*/
@@ -1093,6 +1145,15 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
+ debugfs_create_file(hdmi_debugfs_entries[i].name,
+ 0644, dir, connector,
+ hdmi_debugfs_entries[i].fops);
+ }
+ }
+#endif
}
/*
@@ -1167,8 +1228,9 @@ static int current_backlight_read(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct dc *dc = adev->dm.dc;
- unsigned int backlight = dc_get_current_backlight_pwm(dc);
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ unsigned int backlight = dc_link_get_backlight_level(dm->backlight_link);
seq_printf(m, "0x%x\n", backlight);
return 0;
@@ -1184,8 +1246,9 @@ static int target_backlight_read(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct dc *dc = adev->dm.dc;
- unsigned int backlight = dc_get_target_backlight_pwm(dc);
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ unsigned int backlight = dc_link_get_target_backlight_pwm(dm->backlight_link);
seq_printf(m, "0x%x\n", backlight);
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index c20fb08c450b..b086d5c906e0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to find connector for link!");
+ DC_LOG_DC("Failed to find connector for link!\n");
return false;
}
@@ -554,6 +554,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct dc_sink *sink)
{
struct amdgpu_dm_connector *aconnector = link->priv;
+ struct drm_connector *connector = &aconnector->base;
struct i2c_adapter *ddc;
int retry = 3;
enum dc_edid_status edid_status;
@@ -571,6 +572,15 @@ enum dc_edid_status dm_helpers_read_local_edid(
edid = drm_get_edid(&aconnector->base, ddc);
+ /* DP Compliance Test 4.2.2.6 */
+ if (link->aux_mode && connector->edid_corrupt)
+ drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
+
+ if (!edid && connector->edid_corrupt) {
+ connector->edid_corrupt = false;
+ return EDID_BAD_CHECKSUM;
+ }
+
if (!edid)
return EDID_NO_RESPONSE;
@@ -605,34 +615,10 @@ enum dc_edid_status dm_helpers_read_local_edid(
DRM_ERROR("EDID err: %d, on connector: %s",
edid_status,
aconnector->base.name);
- if (link->aux_mode) {
- union test_request test_request = { {0} };
- union test_response test_response = { {0} };
-
- dm_helpers_dp_read_dpcd(ctx,
- link,
- DP_TEST_REQUEST,
- &test_request.raw,
- sizeof(union test_request));
-
- if (!test_request.bits.EDID_READ)
- return edid_status;
- test_response.bits.EDID_CHECKSUM_WRITE = 1;
-
- dm_helpers_dp_write_dpcd(ctx,
- link,
- DP_TEST_EDID_CHECKSUM,
- &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
- 1);
-
- dm_helpers_dp_write_dpcd(ctx,
- link,
- DP_TEST_RESPONSE,
- &test_response.raw,
- sizeof(test_response));
-
- }
+ /* DP Compliance Test 4.2.2.3 */
+ if (link->aux_mode)
+ drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
return edid_status;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index d2917759b7ab..ae0a7ef1d595 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -41,53 +41,10 @@
#include "amdgpu_dm_debugfs.h"
#endif
-
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dc/dcn20/dcn20_resource.h"
#endif
-/* #define TRACE_DPCD */
-
-#ifdef TRACE_DPCD
-#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
-
-static inline char *side_band_msg_type_to_str(uint32_t address)
-{
- static char str[10] = {0};
-
- if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
- strcpy(str, "DOWN_REQ");
- else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
- strcpy(str, "UP_REP");
- else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
- strcpy(str, "DOWN_REP");
- else
- strcpy(str, "UP_REQ");
-
- return str;
-}
-
-static void log_dpcd(uint8_t type,
- uint32_t address,
- uint8_t *data,
- uint32_t size,
- bool res)
-{
- DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
- (type == DP_AUX_NATIVE_READ) ||
- (type == DP_AUX_I2C_READ) ?
- "Read" : "Write",
- address,
- SIDE_BAND_MSG(address) ?
- side_band_msg_type_to_str(address) : "Nop",
- res ? "OK" : "Fail");
-
- if (res) {
- print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
- }
-}
-#endif
-
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -136,17 +93,23 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
static void
dm_dp_mst_connector_destroy(struct drm_connector *connector)
{
- struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
- struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
+ struct amdgpu_dm_connector *aconnector =
+ to_amdgpu_dm_connector(connector);
+ struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder;
- kfree(amdgpu_dm_connector->edid);
- amdgpu_dm_connector->edid = NULL;
+ if (aconnector->dc_sink) {
+ dc_link_remove_remote_sink(aconnector->dc_link,
+ aconnector->dc_sink);
+ dc_sink_release(aconnector->dc_sink);
+ }
+
+ kfree(aconnector->edid);
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
drm_connector_cleanup(connector);
- drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
- kfree(amdgpu_dm_connector);
+ drm_dp_mst_put_port_malloc(aconnector->port);
+ kfree(aconnector);
}
static int
@@ -435,40 +398,13 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/
amdgpu_dm_connector_funcs_reset(connector);
- DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
-
drm_dp_mst_get_port_malloc(port);
- DRM_DEBUG_KMS(":%d\n", connector->base.id);
-
return connector;
}
-static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
- DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
-
- if (aconnector->dc_sink) {
- amdgpu_dm_update_freesync_caps(connector, NULL);
- dc_link_remove_remote_sink(aconnector->dc_link,
- aconnector->dc_sink);
- dc_sink_release(aconnector->dc_sink);
- aconnector->dc_sink = NULL;
- aconnector->dc_link->cur_link_settings.lane_count = 0;
- }
-
- drm_connector_unregister(connector);
- drm_connector_put(connector);
-}
-
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
- .destroy_connector = dm_dp_destroy_mst_connector,
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index 7ad0cad0f4ef..01b99e0d788e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -24,8 +24,7 @@
# It provides the general basic services required by other DAL
# subcomponents.
-BASICS = conversion.o fixpt31_32.o \
- log_helpers.o vector.o dc_common.o
+BASICS = conversion.o fixpt31_32.o vector.o dc_common.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 8edc2506d49e..bed91572f82a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -113,13 +113,19 @@ static void encoder_control_dmcub(
struct dc_dmub_srv *dmcub,
struct dig_encoder_stream_setup_parameters_v1_5 *dig)
{
- struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 };
+ union dmub_rb_cmd cmd;
- encoder_control.header.type = DMUB_CMD__VBIOS;
- encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
- encoder_control.encoder_control.dig.stream_param = *dig;
+ memset(&cmd, 0, sizeof(cmd));
- dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header);
+ cmd.digx_encoder_control.header.type = DMUB_CMD__VBIOS;
+ cmd.digx_encoder_control.header.sub_type =
+ DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
+ cmd.digx_encoder_control.header.payload_bytes =
+ sizeof(cmd.digx_encoder_control) -
+ sizeof(cmd.digx_encoder_control.header);
+ cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig;
+
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
@@ -238,14 +244,19 @@ static void transmitter_control_dmcub(
struct dc_dmub_srv *dmcub,
struct dig_transmitter_control_parameters_v1_6 *dig)
{
- struct dmub_rb_cmd_dig1_transmitter_control transmitter_control;
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
- transmitter_control.header.type = DMUB_CMD__VBIOS;
- transmitter_control.header.sub_type =
+ cmd.dig1_transmitter_control.header.type = DMUB_CMD__VBIOS;
+ cmd.dig1_transmitter_control.header.sub_type =
DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL;
- transmitter_control.transmitter_control.dig = *dig;
+ cmd.dig1_transmitter_control.header.payload_bytes =
+ sizeof(cmd.dig1_transmitter_control) -
+ sizeof(cmd.dig1_transmitter_control.header);
+ cmd.dig1_transmitter_control.transmitter_control.dig = *dig;
- dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header);
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
@@ -339,13 +350,18 @@ static void set_pixel_clock_dmcub(
struct dc_dmub_srv *dmcub,
struct set_pixel_clock_parameter_v1_7 *clk)
{
- struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 };
+ union dmub_rb_cmd cmd;
- pixel_clock.header.type = DMUB_CMD__VBIOS;
- pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
- pixel_clock.pixel_clock.clk = *clk;
+ memset(&cmd, 0, sizeof(cmd));
- dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header);
+ cmd.set_pixel_clock.header.type = DMUB_CMD__VBIOS;
+ cmd.set_pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
+ cmd.set_pixel_clock.header.payload_bytes =
+ sizeof(cmd.set_pixel_clock) -
+ sizeof(cmd.set_pixel_clock.header);
+ cmd.set_pixel_clock.pixel_clock.clk = *clk;
+
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
@@ -705,13 +721,19 @@ static void enable_disp_power_gating_dmcub(
struct dc_dmub_srv *dmcub,
struct enable_disp_power_gating_parameters_v2_1 *pwr)
{
- struct dmub_rb_cmd_enable_disp_power_gating power_gating;
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
- power_gating.header.type = DMUB_CMD__VBIOS;
- power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
- power_gating.power_gating.pwr = *pwr;
+ cmd.enable_disp_power_gating.header.type = DMUB_CMD__VBIOS;
+ cmd.enable_disp_power_gating.header.sub_type =
+ DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
+ cmd.enable_disp_power_gating.header.payload_bytes =
+ sizeof(cmd.enable_disp_power_gating) -
+ sizeof(cmd.enable_disp_power_gating.header);
+ cmd.enable_disp_power_gating.power_gating.pwr = *pwr;
- dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 3960a8db94cb..1e5a92b192a1 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -690,6 +690,26 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
struct dc_debug_options *dbg,
struct dc_state *context)
{
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /**
+ * Workaround for avoiding pipe-split in cases where we'd split
+ * planes that are too small, resulting in splits that aren't
+ * valid for the scaler.
+ */
+ if (pipe->plane_state &&
+ (pipe->plane_state->dst_rect.width <= 16 ||
+ pipe->plane_state->dst_rect.height <= 16 ||
+ pipe->plane_state->src_rect.width <= 16 ||
+ pipe->plane_state->src_rect.height <= 16)) {
+ hack_disable_optional_pipe_split(v);
+ return;
+ }
+ }
+
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
hack_disable_optional_pipe_split(v);
@@ -702,7 +722,6 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
}
-
unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id)
{
/* for low power RV2 variants, the highest voltage level we want is 0 */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 8ec2dfe45d40..a5c2114e4292 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -90,7 +90,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
if (edp_link) {
- clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active;
+ clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
dc_link_set_psr_allow_active(edp_link, false, false);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index 26db1c5d4e4d..b210f8e9d592 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -131,7 +131,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int dprefclk_wdivider;
int dprefclk_src_sel;
- int dp_ref_clk_khz = 600000;
+ int dp_ref_clk_khz;
int target_div;
/* ASSERT DP Reference Clock source is from DFS*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 97b7f32294fd..c320b7af7d34 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -97,9 +97,6 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
VBIOSSMC_MSG_SetDispclkFreq,
requested_dispclk_khz / 1000);
- /* Actual dispclk set is returned in the parameter register */
- actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
-
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 47431ca6986d..6f93a6ca4cf0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -66,6 +66,8 @@
#include "dce/dce_i2c.h"
+#include "dmub/dmub_srv.h"
+
#define CTX \
dc->ctx
@@ -348,7 +350,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream == stream)
+ if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
break;
}
/* Stream not found */
@@ -365,6 +367,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
param.windowb_x_end = pipe->stream->timing.h_addressable;
param.windowb_y_end = pipe->stream->timing.v_addressable;
+ param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
+ param.odm_mode = pipe->next_odm_pipe ? 1:0;
+
/* Default to the union of both windows */
param.selection = UNION_WINDOW_A_B;
param.continuous_mode = continuous;
@@ -1011,9 +1016,17 @@ static void program_timing_sync(
}
}
- /* set first pipe with plane as master */
+ /* set first unblanked pipe as master */
for (j = 0; j < group_size; j++) {
- if (pipe_set[j]->plane_state) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
if (j == 0)
break;
@@ -1034,9 +1047,17 @@ static void program_timing_sync(
status->timing_sync_info.master = false;
}
- /* remove any other pipes with plane as they have already been synced */
+ /* remove any other unblanked pipes as they have already been synced */
for (j = j + 1; j < group_size; j++) {
- if (pipe_set[j]->plane_state) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;
@@ -2204,7 +2225,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (should_program_abm) {
if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
- pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
} else {
pipe_ctx->stream_res.abm->funcs->set_abm_level(
pipe_ctx->stream_res.abm, stream->abm_level);
@@ -2517,6 +2538,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ DC_ERROR("Mode validation failed for stream update!\n");
+ dc_release_state(context);
+ return;
+ }
+
commit_planes_for_stream(
dc,
srf_updates,
@@ -2640,33 +2667,12 @@ void dc_set_power_state(
void dc_resume(struct dc *dc)
{
-
uint32_t i;
for (i = 0; i < dc->link_count; i++)
core_link_resume(dc->links[i]);
}
-unsigned int dc_get_current_backlight_pwm(struct dc *dc)
-{
- struct abm *abm = dc->res_pool->abm;
-
- if (abm)
- return abm->funcs->get_current_backlight(abm);
-
- return 0;
-}
-
-unsigned int dc_get_target_backlight_pwm(struct dc *dc)
-{
- struct abm *abm = dc->res_pool->abm;
-
- if (abm)
- return abm->funcs->get_target_backlight(abm);
-
- return 0;
-}
-
bool dc_is_dmcu_initialized(struct dc *dc)
{
struct dmcu *dmcu = dc->res_pool->dmcu;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 67cfff1586e9..48ab51533d5d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include "dm_services.h"
-#include "atom.h"
+#include "atomfirmware.h"
#include "dm_helpers.h"
#include "dc.h"
#include "grph_object_id.h"
@@ -46,10 +46,11 @@
#include "dmcu.h"
#include "hw/clk_mgr.h"
#include "dce/dmub_psr.h"
+#include "dmub/dmub_srv.h"
+#include "inc/hw/panel_cntl.h"
#define DC_LOGGER_INIT(logger)
-
#define LINK_INFO(...) \
DC_LOG_HW_HOTPLUG( \
__VA_ARGS__)
@@ -64,11 +65,11 @@
enum {
PEAK_FACTOR_X1000 = 1006,
/*
- * Some receivers fail to train on first try and are good
- * on subsequent tries. 2 retries should be plenty. If we
- * don't have a successful training then we don't expect to
- * ever get one.
- */
+ * Some receivers fail to train on first try and are good
+ * on subsequent tries. 2 retries should be plenty. If we
+ * don't have a successful training then we don't expect to
+ * ever get one.
+ */
LINK_TRAINING_MAX_VERIFY_RETRY = 2
};
@@ -79,7 +80,7 @@ static void dc_link_destruct(struct dc_link *link)
{
int i;
- if (link->hpd_gpio != NULL) {
+ if (link->hpd_gpio) {
dal_gpio_destroy_irq(&link->hpd_gpio);
link->hpd_gpio = NULL;
}
@@ -87,7 +88,10 @@ static void dc_link_destruct(struct dc_link *link)
if (link->ddc)
dal_ddc_service_destroy(&link->ddc);
- if(link->link_enc)
+ if (link->panel_cntl)
+ link->panel_cntl->funcs->destroy(&link->panel_cntl);
+
+ if (link->link_enc)
link->link_enc->funcs->destroy(&link->link_enc);
if (link->local_sink)
@@ -98,8 +102,8 @@ static void dc_link_destruct(struct dc_link *link)
}
struct gpio *get_hpd_gpio(struct dc_bios *dcb,
- struct graphics_object_id link_id,
- struct gpio_service *gpio_service)
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service)
{
enum bp_result bp_result;
struct graphics_object_hpd_info hpd_info;
@@ -116,10 +120,9 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
return NULL;
}
- return dal_gpio_service_create_irq(
- gpio_service,
- pin_info.offset,
- pin_info.mask);
+ return dal_gpio_service_create_irq(gpio_service,
+ pin_info.offset,
+ pin_info.mask);
}
/*
@@ -134,13 +137,10 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
* @return
* true on success, false otherwise
*/
-static bool program_hpd_filter(
- const struct dc_link *link)
+static bool program_hpd_filter(const struct dc_link *link)
{
bool result = false;
-
struct gpio *hpd;
-
int delay_on_connect_in_ms = 0;
int delay_on_disconnect_in_ms = 0;
@@ -159,10 +159,10 @@ static bool program_hpd_filter(
case SIGNAL_TYPE_DISPLAY_PORT_MST:
/* Program hpd filter to allow DP signal to settle */
/* 500: not able to detect MST <-> SST switch as HPD is low for
- * only 100ms on DELL U2413
- * 0: some passive dongle still show aux mode instead of i2c
- * 20-50:not enough to hide bouncing HPD with passive dongle.
- * also see intermittent i2c read issues.
+ * only 100ms on DELL U2413
+ * 0: some passive dongle still show aux mode instead of i2c
+ * 20-50: not enough to hide bouncing HPD with passive dongle.
+ * also see intermittent i2c read issues.
*/
delay_on_connect_in_ms = 80;
delay_on_disconnect_in_ms = 0;
@@ -175,7 +175,8 @@ static bool program_hpd_filter(
}
/* Obtain HPD handle */
- hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
if (!hpd)
return result;
@@ -226,8 +227,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
}
/* todo: may need to lock gpio access */
- hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
- if (hpd_pin == NULL)
+ hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+ if (!hpd_pin)
goto hpd_gpio_failure;
dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
@@ -248,8 +250,7 @@ hpd_gpio_failure:
return false;
}
-static enum ddc_transaction_type get_ddc_transaction_type(
- enum signal_type sink_signal)
+static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal)
{
enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
@@ -270,7 +271,8 @@ static enum ddc_transaction_type get_ddc_transaction_type(
case SIGNAL_TYPE_DISPLAY_PORT_MST:
/* MST does not use I2COverAux, but there is the
* SPECIAL use case for "immediate dwnstrm device
- * access" (EPR#370830). */
+ * access" (EPR#370830).
+ */
transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
break;
@@ -281,9 +283,8 @@ static enum ddc_transaction_type get_ddc_transaction_type(
return transaction_type;
}
-static enum signal_type get_basic_signal_type(
- struct graphics_object_id encoder,
- struct graphics_object_id downstream)
+static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
+ struct graphics_object_id downstream)
{
if (downstream.type == OBJECT_TYPE_CONNECTOR) {
switch (downstream.id) {
@@ -369,10 +370,11 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
/* Open GPIO and set it to I2C mode */
/* Note: this GpioMode_Input will be converted
* to GpioConfigType_I2cAuxDualMode in GPIO component,
- * which indicates we need additional delay */
+ * which indicates we need additional delay
+ */
- if (GPIO_RESULT_OK != dal_ddc_open(
- ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
+ if (dal_ddc_open(ddc, GPIO_MODE_INPUT,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) {
dal_ddc_close(ddc);
return present;
@@ -406,25 +408,25 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
* @brief
* Detect output sink type
*/
-static enum signal_type link_detect_sink(
- struct dc_link *link,
- enum dc_detect_reason reason)
+static enum signal_type link_detect_sink(struct dc_link *link,
+ enum dc_detect_reason reason)
{
- enum signal_type result = get_basic_signal_type(
- link->link_enc->id, link->link_id);
+ enum signal_type result = get_basic_signal_type(link->link_enc->id,
+ link->link_id);
/* Internal digital encoder will detect only dongles
- * that require digital signal */
+ * that require digital signal
+ */
/* Detection mechanism is different
* for different native connectors.
* LVDS connector supports only LVDS signal;
* PCIE is a bus slot, the actual connector needs to be detected first;
* eDP connector supports only eDP signal;
- * HDMI should check straps for audio */
+ * HDMI should check straps for audio
+ */
/* PCIE detects the actual connector on add-on board */
-
if (link->link_id.id == CONNECTOR_ID_PCIE) {
/* ZAZTODO implement PCIE add-on card detection */
}
@@ -432,8 +434,10 @@ static enum signal_type link_detect_sink(
switch (link->link_id.id) {
case CONNECTOR_ID_HDMI_TYPE_A: {
/* check audio support:
- * if native HDMI is not supported, switch to DVI */
- struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+ * if native HDMI is not supported, switch to DVI
+ */
+ struct audio_support *aud_support =
+ &link->dc->res_pool->audio_support;
if (!aud_support->hdmi_audio_native)
if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
@@ -461,16 +465,15 @@ static enum signal_type link_detect_sink(
return result;
}
-static enum signal_type decide_signal_from_strap_and_dongle_type(
- enum display_dongle_type dongle_type,
- struct audio_support *audio_support)
+static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type,
+ struct audio_support *audio_support)
{
enum signal_type signal = SIGNAL_TYPE_NONE;
switch (dongle_type) {
case DISPLAY_DONGLE_DP_HDMI_DONGLE:
if (audio_support->hdmi_audio_on_dongle)
- signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
else
signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
break;
@@ -491,16 +494,14 @@ static enum signal_type decide_signal_from_strap_and_dongle_type(
return signal;
}
-static enum signal_type dp_passive_dongle_detection(
- struct ddc_service *ddc,
- struct display_sink_capability *sink_cap,
- struct audio_support *audio_support)
+static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap,
+ struct audio_support *audio_support)
{
- dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
- ddc, sink_cap);
- return decide_signal_from_strap_and_dongle_type(
- sink_cap->dongle_type,
- audio_support);
+ dal_ddc_service_i2c_query_dp_dual_mode_adaptor(ddc, sink_cap);
+
+ return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type,
+ audio_support);
}
static void link_disconnect_sink(struct dc_link *link)
@@ -519,6 +520,96 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
link->local_sink = prev_sink;
}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+bool dc_link_is_hdcp14(struct dc_link *link)
+{
+ bool ret = false;
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable,
+ * we can poll for bksv but some displays have an issue with this. Since its so rare
+ * for a display to not be 1.4 capable, this assumtion is ok
+ */
+ ret = true;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+bool dc_link_is_hdcp22(struct dc_link *link)
+{
+ bool ret = false;
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE &&
+ link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable &&
+ (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
+{
+ struct hdcp_protection_message msg22;
+ struct hdcp_protection_message msg14;
+
+ memset(&msg22, 0, sizeof(struct hdcp_protection_message));
+ memset(&msg14, 0, sizeof(struct hdcp_protection_message));
+ memset(link->hdcp_caps.rx_caps.raw, 0,
+ sizeof(link->hdcp_caps.rx_caps.raw));
+
+ if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->ddc->transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) ||
+ link->connector_signal == SIGNAL_TYPE_EDP) {
+ msg22.data = link->hdcp_caps.rx_caps.raw;
+ msg22.length = sizeof(link->hdcp_caps.rx_caps.raw);
+ msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS;
+ } else {
+ msg22.data = &link->hdcp_caps.rx_caps.fields.version;
+ msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version);
+ msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION;
+ }
+ msg22.version = HDCP_VERSION_22;
+ msg22.link = HDCP_LINK_PRIMARY;
+ msg22.max_retries = 5;
+ dc_process_hdcp_msg(signal, link, &msg22);
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED;
+
+ msg14.data = &link->hdcp_caps.bcaps.raw;
+ msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
+ msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
+ msg14.version = HDCP_VERSION_14;
+ msg14.link = HDCP_LINK_PRIMARY;
+ msg14.max_retries = 5;
+
+ status = dc_process_hdcp_msg(signal, link, &msg14);
+ }
+
+}
+#endif
static void read_current_link_settings_on_detect(struct dc_link *link)
{
@@ -532,18 +623,18 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
// Read DPCD 00101h to find out the number of lanes currently set
for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(
- link,
- DP_LANE_COUNT_SET,
- &lane_count_set.raw,
- sizeof(lane_count_set));
+ status = core_link_read_dpcd(link,
+ DP_LANE_COUNT_SET,
+ &lane_count_set.raw,
+ sizeof(lane_count_set));
/* First DPCD read after VDD ON can fail if the particular board
* does not have HPD pin wired correctly. So if DPCD read fails,
* which it should never happen, retry a few times. Target worst
* case scenario of 80 ms.
*/
if (status == DC_OK) {
- link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
+ link->cur_link_settings.lane_count =
+ lane_count_set.bits.LANE_COUNT_SET;
break;
}
@@ -552,7 +643,7 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
// Read DPCD 00100h to find if standard link rates are set
core_link_read_dpcd(link, DP_LINK_BW_SET,
- &link_bw_set, sizeof(link_bw_set));
+ &link_bw_set, sizeof(link_bw_set));
if (link_bw_set == 0) {
if (link->connector_signal == SIGNAL_TYPE_EDP) {
@@ -560,12 +651,12 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
* Read DPCD 00115h to find the edp link rate set used
*/
core_link_read_dpcd(link, DP_LINK_RATE_SET,
- &link_rate_set, sizeof(link_rate_set));
+ &link_rate_set, sizeof(link_rate_set));
// edp_supported_link_rates_count = 0 for DP
if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
link->cur_link_settings.link_rate =
- link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
link->cur_link_settings.link_rate_set = link_rate_set;
link->cur_link_settings.use_link_rate_set = true;
}
@@ -579,7 +670,7 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
}
// Read DPCD 00003h to find the max down spread.
core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
- &max_down_spread.raw, sizeof(max_down_spread));
+ &max_down_spread.raw, sizeof(max_down_spread));
link->cur_link_settings.link_spread =
max_down_spread.bits.MAX_DOWN_SPREAD ?
LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
@@ -612,6 +703,12 @@ static bool detect_dp(struct dc_link *link,
dal_ddc_service_set_transaction_type(link->ddc,
sink_caps->transaction_type);
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ /* In case of fallback to SST when topology discovery below fails
+ * HDCP caps will be querried again later by the upper layer (caller
+ * of this function). */
+ query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link);
+#endif
/*
* This call will initiate MST topology discovery. Which
* will detect MST ports and add new DRM connector DRM
@@ -683,12 +780,12 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
if (new_edid->length == 0)
return false;
- return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
+ return (memcmp(old_edid->raw_edid,
+ new_edid->raw_edid, new_edid->length) == 0);
}
-static bool wait_for_alt_mode(struct dc_link *link)
+static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
{
-
/**
* something is terribly wrong if time out is > 200ms. (5Hz)
* 500 microseconds * 400 tries us 200 ms
@@ -703,7 +800,7 @@ static bool wait_for_alt_mode(struct dc_link *link)
DC_LOGGER_INIT(link->ctx->logger);
- if (link->link_enc->funcs->is_in_alt_mode == NULL)
+ if (!link->link_enc->funcs->is_in_alt_mode)
return true;
is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
@@ -718,21 +815,21 @@ static bool wait_for_alt_mode(struct dc_link *link)
udelay(sleep_time_in_microseconds);
/* ask the link if alt mode is enabled, if so return ok */
if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) {
-
finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns = dm_get_elapse_time_in_ns(
- link->ctx, finish_timestamp, enter_timestamp);
+ time_taken_in_ns =
+ dm_get_elapse_time_in_ns(link->ctx,
+ finish_timestamp,
+ enter_timestamp);
DC_LOG_WARNING("Alt mode entered finished after %llu ms\n",
div_u64(time_taken_in_ns, 1000000));
return true;
}
-
}
finish_timestamp = dm_get_timestamp(link->ctx);
time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp,
enter_timestamp);
DC_LOG_WARNING("Alt mode has timed out after %llu ms\n",
- div_u64(time_taken_in_ns, 1000000));
+ div_u64(time_taken_in_ns, 1000000));
return false;
}
@@ -768,30 +865,30 @@ static bool dc_link_detect_helper(struct dc_link *link,
return false;
if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
- link->connector_signal == SIGNAL_TYPE_EDP) &&
- link->local_sink) {
-
+ link->connector_signal == SIGNAL_TYPE_EDP) &&
+ link->local_sink) {
// need to re-write OUI and brightness in resume case
if (link->connector_signal == SIGNAL_TYPE_EDP) {
dpcd_set_source_specific_data(link);
- dc_link_set_default_brightness_aux(link); //TODO: use cached
+ dc_link_set_default_brightness_aux(link);
+ //TODO: use cached
}
return true;
}
- if (false == dc_link_detect_sink(link, &new_connection_type)) {
+ if (!dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
return false;
}
prev_sink = link->local_sink;
- if (prev_sink != NULL) {
+ if (prev_sink) {
dc_sink_retain(prev_sink);
memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps));
}
- link_disconnect_sink(link);
+ link_disconnect_sink(link);
if (new_connection_type != dc_connection_none) {
link->type = new_connection_type;
link->link_state_valid = false;
@@ -838,35 +935,31 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
case SIGNAL_TYPE_DISPLAY_PORT: {
-
/* wa HPD high coming too early*/
if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
-
/* if alt mode times out, return false */
- if (wait_for_alt_mode(link) == false) {
+ if (!wait_for_entering_dp_alt_mode(link))
return false;
- }
}
- if (!detect_dp(
- link,
- &sink_caps,
- &converter_disable_audio,
- aud_support, reason)) {
- if (prev_sink != NULL)
+ if (!detect_dp(link, &sink_caps,
+ &converter_disable_audio,
+ aud_support, reason)) {
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
}
// Check if dpcp block is the same
- if (prev_sink != NULL) {
- if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
+ if (prev_sink) {
+ if (memcmp(&link->dpcd_caps, &prev_dpcd_caps,
+ sizeof(struct dpcd_caps)))
same_dpcd = false;
}
/* Active dongle downstream unplug*/
if (link->type == dc_connection_active_dongle &&
- link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
- if (prev_sink != NULL)
+ link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
+ if (prev_sink)
/* Downstream unplug */
dc_sink_release(prev_sink);
return true;
@@ -874,7 +967,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
if (link->type == dc_connection_mst_branch) {
LINK_INFO("link=%d, mst branch is now Connected\n",
- link->link_index);
+ link->link_index);
/* Need to setup mst link_cap struct here
* otherwise dc_link_detect() will leave mst link_cap
* empty which leads to allocate_mst_payload() has "0"
@@ -882,15 +975,15 @@ static bool dc_link_detect_helper(struct dc_link *link,
*/
dp_verify_mst_link_cap(link);
- if (prev_sink != NULL)
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
}
// For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
if (reason == DETECT_REASON_BOOT &&
- dc_ctx->dc->config.power_down_display_on_boot == false &&
- link->link_status.link_active == true)
+ !dc_ctx->dc->config.power_down_display_on_boot &&
+ link->link_status.link_active)
perform_dp_seamless_boot = true;
if (perform_dp_seamless_boot) {
@@ -903,24 +996,23 @@ static bool dc_link_detect_helper(struct dc_link *link,
default:
DC_ERROR("Invalid connector type! signal:%d\n",
- link->connector_signal);
- if (prev_sink != NULL)
+ link->connector_signal);
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
} /* switch() */
if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
- link->dpcd_sink_count = link->dpcd_caps.sink_count.
- bits.SINK_COUNT;
+ link->dpcd_sink_count =
+ link->dpcd_caps.sink_count.bits.SINK_COUNT;
else
link->dpcd_sink_count = 1;
- dal_ddc_service_set_transaction_type(
- link->ddc,
- sink_caps.transaction_type);
+ dal_ddc_service_set_transaction_type(link->ddc,
+ sink_caps.transaction_type);
- link->aux_mode = dal_ddc_service_is_in_aux_transaction_mode(
- link->ddc);
+ link->aux_mode =
+ dal_ddc_service_is_in_aux_transaction_mode(link->ddc);
sink_init_data.link = link;
sink_init_data.sink_signal = sink_caps.signal;
@@ -928,7 +1020,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
sink = dc_sink_create(&sink_init_data);
if (!sink) {
DC_ERROR("Failed to create sink!\n");
- if (prev_sink != NULL)
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
}
@@ -939,10 +1031,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
/* dc_sink_create returns a new reference */
link->local_sink = sink;
- edid_status = dm_helpers_read_local_edid(
- link->ctx,
- link,
- sink);
+ edid_status = dm_helpers_read_local_edid(link->ctx,
+ link, sink);
switch (edid_status) {
case EDID_BAD_CHECKSUM:
@@ -950,7 +1040,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
break;
case EDID_NO_RESPONSE:
DC_LOG_ERROR("No EDID read.\n");
-
/*
* Abort detection for non-DP connectors if we have
* no EDID
@@ -961,7 +1050,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
*/
if (dc_is_hdmi_signal(link->connector_signal) ||
dc_is_dvi_signal(link->connector_signal)) {
- if (prev_sink != NULL)
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
@@ -974,45 +1063,53 @@ static bool dc_link_detect_helper(struct dc_link *link,
link->ctx->dc->debug.disable_fec = true;
// Check if edid is the same
- if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
- same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
+ if ((prev_sink) &&
+ (edid_status == EDID_THE_SAME || edid_status == EDID_OK))
+ same_edid = is_same_edid(&prev_sink->dc_edid,
+ &sink->dc_edid);
if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
link->ctx->dc->debug.hdmi20_disable = true;
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ sink_caps.transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/*
* TODO debug why Dell 2413 doesn't like
* two link trainings
*/
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ query_hdcp_capability(sink->sink_signal, link);
+#endif
// verify link cap for SST non-seamless boot
if (!perform_dp_seamless_boot)
dp_verify_link_cap_with_retries(link,
- &link->reported_link_cap,
- LINK_TRAINING_MAX_VERIFY_RETRY);
+ &link->reported_link_cap,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
} else {
// If edid is the same, then discard new sink and revert back to original sink
if (same_edid) {
link_disconnect_remap(prev_sink, link);
sink = prev_sink;
prev_sink = NULL;
-
}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ query_hdcp_capability(sink->sink_signal, link);
+#endif
}
/* HDMI-DVI Dongle */
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
- !sink->edid_caps.edid_hdmi)
+ !sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
/* Connectivity log: detection */
for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
CONN_DATA_DETECT(link,
- &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
- DC_EDID_BLOCK_SIZE,
- "%s: [Block %d] ", sink->edid_caps.display_name, i);
+ &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
+ DC_EDID_BLOCK_SIZE,
+ "%s: [Block %d] ", sink->edid_caps.display_name, i);
}
DC_LOG_DETECTION_EDID_PARSER("%s: "
@@ -1047,17 +1144,18 @@ static bool dc_link_detect_helper(struct dc_link *link,
sink->edid_caps.audio_modes[i].sample_rate,
sink->edid_caps.audio_modes[i].sample_size);
}
-
} else {
/* From Connected-to-Disconnected. */
if (link->type == dc_connection_mst_branch) {
LINK_INFO("link=%d, mst branch is now Disconnected\n",
- link->link_index);
+ link->link_index);
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
link->mst_stream_alloc_table.stream_count = 0;
- memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations));
+ memset(link->mst_stream_alloc_table.stream_allocations,
+ 0,
+ sizeof(link->mst_stream_alloc_table.stream_allocations));
}
link->type = dc_connection_none;
@@ -1071,16 +1169,15 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
- link->link_index, sink,
- (sink_caps.signal == SIGNAL_TYPE_NONE ?
- "Disconnected":"Connected"), prev_sink,
- same_dpcd, same_edid);
+ link->link_index, sink,
+ (sink_caps.signal ==
+ SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"),
+ prev_sink, same_dpcd, same_edid);
- if (prev_sink != NULL)
+ if (prev_sink)
dc_sink_release(prev_sink);
return true;
-
}
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
@@ -1110,13 +1207,13 @@ bool dc_link_get_hpd_state(struct dc_link *dc_link)
return state;
}
-static enum hpd_source_id get_hpd_line(
- struct dc_link *link)
+static enum hpd_source_id get_hpd_line(struct dc_link *link)
{
struct gpio *hpd;
enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN;
- hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
if (hpd) {
switch (dal_irq_get_source(hpd)) {
@@ -1191,8 +1288,7 @@ static enum channel_id get_ddc_line(struct dc_link *link)
return channel;
}
-static enum transmitter translate_encoder_to_transmitter(
- struct graphics_object_id encoder)
+static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder)
{
switch (encoder.id) {
case ENCODER_ID_INTERNAL_UNIPHY:
@@ -1256,17 +1352,18 @@ static enum transmitter translate_encoder_to_transmitter(
}
}
-static bool dc_link_construct(
- struct dc_link *link,
- const struct link_init_data *init_params)
+static bool dc_link_construct(struct dc_link *link,
+ const struct link_init_data *init_params)
{
uint8_t i;
struct ddc_service_init_data ddc_service_init_data = { { 0 } };
struct dc_context *dc_ctx = init_params->ctx;
struct encoder_init_data enc_init_data = { 0 };
+ struct panel_cntl_init_data panel_cntl_init_data = { 0 };
struct integrated_info info = {{{ 0 }}};
struct dc_bios *bios = init_params->dc->ctx->dc_bios;
const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+
DC_LOGGER_INIT(dc_ctx->logger);
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
@@ -1278,23 +1375,27 @@ static bool dc_link_construct(
link->ctx = dc_ctx;
link->link_index = init_params->link_index;
- memset(&link->preferred_training_settings, 0, sizeof(struct dc_link_training_overrides));
- memset(&link->preferred_link_setting, 0, sizeof(struct dc_link_settings));
+ memset(&link->preferred_training_settings, 0,
+ sizeof(struct dc_link_training_overrides));
+ memset(&link->preferred_link_setting, 0,
+ sizeof(struct dc_link_settings));
- link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+ link->link_id =
+ bios->funcs->get_connector_id(bios, init_params->connector_index);
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
- __func__, init_params->connector_index,
- link->link_id.type, OBJECT_TYPE_CONNECTOR);
+ __func__, init_params->connector_index,
+ link->link_id.type, OBJECT_TYPE_CONNECTOR);
goto create_fail;
}
if (link->dc->res_pool->funcs->link_init)
link->dc->res_pool->funcs->link_init(link);
- link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
- if (link->hpd_gpio != NULL) {
+ link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+ if (link->hpd_gpio) {
dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
dal_gpio_unlock_pin(link->hpd_gpio);
link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
@@ -1314,9 +1415,9 @@ static bool dc_link_construct(
link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
case CONNECTOR_ID_DISPLAY_PORT:
- link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
+ link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
- if (link->hpd_gpio != NULL)
+ if (link->hpd_gpio)
link->irq_source_hpd_rx =
dal_irq_get_rx_source(link->hpd_gpio);
@@ -1324,42 +1425,60 @@ static bool dc_link_construct(
case CONNECTOR_ID_EDP:
link->connector_signal = SIGNAL_TYPE_EDP;
- if (link->hpd_gpio != NULL) {
+ if (link->hpd_gpio) {
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx =
dal_irq_get_rx_source(link->hpd_gpio);
}
+
break;
case CONNECTOR_ID_LVDS:
link->connector_signal = SIGNAL_TYPE_LVDS;
break;
default:
- DC_LOG_WARNING("Unsupported Connector type:%d!\n", link->link_id.id);
+ DC_LOG_WARNING("Unsupported Connector type:%d!\n",
+ link->link_id.id);
goto create_fail;
}
/* TODO: #DAL3 Implement id to str function.*/
LINK_INFO("Connector[%d] description:"
- "signal %d\n",
- init_params->connector_index,
- link->connector_signal);
+ "signal %d\n",
+ init_params->connector_index,
+ link->connector_signal);
ddc_service_init_data.ctx = link->ctx;
ddc_service_init_data.id = link->link_id;
ddc_service_init_data.link = link;
link->ddc = dal_ddc_service_create(&ddc_service_init_data);
- if (link->ddc == NULL) {
+ if (!link->ddc) {
DC_ERROR("Failed to create ddc_service!\n");
goto ddc_create_fail;
}
link->ddc_hw_inst =
- dal_ddc_get_line(
- dal_ddc_service_get_ddc_pin(link->ddc));
+ dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc));
+
+
+ if (link->dc->res_pool->funcs->panel_cntl_create &&
+ (link->link_id.id == CONNECTOR_ID_EDP ||
+ link->link_id.id == CONNECTOR_ID_LVDS)) {
+ panel_cntl_init_data.ctx = dc_ctx;
+ panel_cntl_init_data.inst = 0;
+ link->panel_cntl =
+ link->dc->res_pool->funcs->panel_cntl_create(
+ &panel_cntl_init_data);
+
+ if (link->panel_cntl == NULL) {
+ DC_ERROR("Failed to create link panel_cntl!\n");
+ goto panel_cntl_create_fail;
+ }
+ }
enc_init_data.ctx = dc_ctx;
- bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder);
+ bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
+ &enc_init_data.encoder);
enc_init_data.connector = link->link_id;
enc_init_data.channel = get_ddc_line(link);
enc_init_data.hpd_source = get_hpd_line(link);
@@ -1367,11 +1486,11 @@ static bool dc_link_construct(
link->hpd_src = enc_init_data.hpd_source;
enc_init_data.transmitter =
- translate_encoder_to_transmitter(enc_init_data.encoder);
- link->link_enc = link->dc->res_pool->funcs->link_enc_create(
- &enc_init_data);
+ translate_encoder_to_transmitter(enc_init_data.encoder);
+ link->link_enc =
+ link->dc->res_pool->funcs->link_enc_create(&enc_init_data);
- if (link->link_enc == NULL) {
+ if (!link->link_enc) {
DC_ERROR("Failed to create link encoder!\n");
goto link_enc_create_fail;
}
@@ -1379,8 +1498,9 @@ static bool dc_link_construct(
link->link_enc_hw_inst = link->link_enc->transmitter;
for (i = 0; i < 4; i++) {
- if (BP_RESULT_OK !=
- bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag)) {
+ if (bp_funcs->get_device_tag(dc_ctx->dc_bios,
+ link->link_id, i,
+ &link->device_tag) != BP_RESULT_OK) {
DC_ERROR("Failed to find device tag!\n");
goto device_tag_fail;
}
@@ -1388,13 +1508,14 @@ static bool dc_link_construct(
/* Look for device tag that matches connector signal,
* CRT for rgb, LCD for other supported signal tyes
*/
- if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id))
+ if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
+ link->device_tag.dev_id))
continue;
- if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT
- && link->connector_signal != SIGNAL_TYPE_RGB)
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT &&
+ link->connector_signal != SIGNAL_TYPE_RGB)
continue;
- if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD
- && link->connector_signal == SIGNAL_TYPE_RGB)
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD &&
+ link->connector_signal == SIGNAL_TYPE_RGB)
continue;
break;
}
@@ -1406,16 +1527,16 @@ static bool dc_link_construct(
for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
struct external_display_path *path =
&info.ext_disp_conn_info.path[i];
- if (path->device_connector_id.enum_id == link->link_id.enum_id
- && path->device_connector_id.id == link->link_id.id
- && path->device_connector_id.type == link->link_id.type) {
- if (link->device_tag.acpi_device != 0
- && path->device_acpi_enum == link->device_tag.acpi_device) {
+ if (path->device_connector_id.enum_id == link->link_id.enum_id &&
+ path->device_connector_id.id == link->link_id.id &&
+ path->device_connector_id.type == link->link_id.type) {
+ if (link->device_tag.acpi_device != 0 &&
+ path->device_acpi_enum == link->device_tag.acpi_device) {
link->ddi_channel_mapping = path->channel_mapping;
link->chip_caps = path->caps;
} else if (path->device_tag ==
- link->device_tag.dev_id.raw_device_tag) {
+ link->device_tag.dev_id.raw_device_tag) {
link->ddi_channel_mapping = path->channel_mapping;
link->chip_caps = path->caps;
}
@@ -1431,15 +1552,20 @@ static bool dc_link_construct(
*/
program_hpd_filter(link);
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
return true;
device_tag_fail:
link->link_enc->funcs->destroy(&link->link_enc);
link_enc_create_fail:
+ if (link->panel_cntl != NULL)
+ link->panel_cntl->funcs->destroy(&link->panel_cntl);
+panel_cntl_create_fail:
dal_ddc_service_destroy(&link->ddc);
ddc_create_fail:
create_fail:
- if (link->hpd_gpio != NULL) {
+ if (link->hpd_gpio) {
dal_gpio_destroy_irq(&link->hpd_gpio);
link->hpd_gpio = NULL;
}
@@ -2339,9 +2465,28 @@ enum dc_status dc_link_validate_mode_timing(
return DC_OK;
}
+static struct abm *get_abm_from_stream_res(const struct dc_link *link)
+{
+ int i;
+ struct dc *dc = link->ctx->dc;
+ struct abm *abm = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx.stream;
+
+ if (stream && stream->link == link) {
+ abm = pipe_ctx.stream_res.abm;
+ break;
+ }
+ }
+ return abm;
+}
+
int dc_link_get_backlight_level(const struct dc_link *link)
{
- struct abm *abm = link->ctx->dc->res_pool->abm;
+
+ struct abm *abm = get_abm_from_stream_res(link);
if (abm == NULL || abm->funcs->get_current_backlight == NULL)
return DC_ERROR_UNEXPECTED;
@@ -2349,71 +2494,63 @@ int dc_link_get_backlight_level(const struct dc_link *link)
return (int) abm->funcs->get_current_backlight(abm);
}
-bool dc_link_set_backlight_level(const struct dc_link *link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp)
+int dc_link_get_target_backlight_pwm(const struct dc_link *link)
{
- struct dc *dc = link->ctx->dc;
- struct abm *abm = dc->res_pool->abm;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- unsigned int controller_id = 0;
- bool use_smooth_brightness = true;
- int i;
- DC_LOGGER_INIT(link->ctx->logger);
+ struct abm *abm = get_abm_from_stream_res(link);
- if ((dmcu == NULL) ||
- (abm == NULL) ||
- (abm->funcs->set_backlight_level_pwm == NULL))
- return false;
+ if (abm == NULL || abm->funcs->get_target_backlight == NULL)
+ return DC_ERROR_UNEXPECTED;
- use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+ return (int) abm->funcs->get_target_backlight(abm);
+}
- DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
- backlight_pwm_u16_16, backlight_pwm_u16_16);
+static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
+{
+ int i;
+ struct dc *dc = link->ctx->dc;
+ struct pipe_ctx *pipe_ctx = NULL;
- if (dc_is_embedded_signal(link->connector_signal)) {
- for (i = 0; i < MAX_PIPES; i++) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
- if (dc->current_state->res_ctx.
- pipe_ctx[i].stream->link
- == link) {
- /* DMCU -1 for all controller id values,
- * therefore +1 here
- */
- controller_id =
- dc->current_state->
- res_ctx.pipe_ctx[i].stream_res.tg->inst +
- 1;
-
- /* Disable brightness ramping when the display is blanked
- * as it can hang the DMCU
- */
- if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
- frame_ramp = 0;
- }
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ break;
}
}
- abm->funcs->set_backlight_level_pwm(
- abm,
- backlight_pwm_u16_16,
- frame_ramp,
- controller_id,
- use_smooth_brightness);
}
- return true;
+ return pipe_ctx;
}
-bool dc_link_set_abm_disable(const struct dc_link *link)
+bool dc_link_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
{
struct dc *dc = link->ctx->dc;
- struct abm *abm = dc->res_pool->abm;
- if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
- return false;
+ DC_LOGGER_INIT(link->ctx->logger);
+ DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
+ backlight_pwm_u16_16, backlight_pwm_u16_16);
+
+ if (dc_is_embedded_signal(link->connector_signal)) {
+ struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
- abm->funcs->set_abm_immediate_disable(abm);
+ if (pipe_ctx) {
+ /* Disable brightness ramping when the display is blanked
+ * as it can hang the DMCU
+ */
+ if (pipe_ctx->plane_state == NULL)
+ frame_ramp = 0;
+ } else {
+ ASSERT(false);
+ return false;
+ }
+ dc->hwss.set_backlight_level(
+ pipe_ctx,
+ backlight_pwm_u16_16,
+ frame_ramp);
+ }
return true;
}
@@ -2423,12 +2560,12 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr;
- if (psr != NULL && link->psr_feature_enabled)
+ if (psr != NULL && link->psr_settings.psr_feature_enabled)
psr->funcs->psr_enable(psr, allow_active);
- else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+ else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
- link->psr_allow_active = allow_active;
+ link->psr_settings.psr_allow_active = allow_active;
return true;
}
@@ -2439,9 +2576,9 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr;
- if (psr != NULL && link->psr_feature_enabled)
+ if (psr != NULL && link->psr_settings.psr_feature_enabled)
psr->funcs->psr_get_state(psr, psr_state);
- else if (dmcu != NULL && link->psr_feature_enabled)
+ else if (dmcu != NULL && link->psr_settings.psr_feature_enabled)
dmcu->funcs->get_psr_state(dmcu, psr_state);
return true;
@@ -2612,14 +2749,14 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->frame_delay = 0;
if (psr)
- link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
+ link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
else
- link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+ link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
/* psr_enabled == 0 indicates setup_psr did not succeed, but this
* should not happen since firmware should be running at this point
*/
- if (link->psr_feature_enabled == 0)
+ if (link->psr_settings.psr_feature_enabled == 0)
ASSERT(0);
return true;
@@ -2966,7 +3103,7 @@ void core_link_enable_stream(
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
@@ -3040,6 +3177,18 @@ void core_link_enable_stream(
if (pipe_ctx->stream->dpms_off)
return;
+ /* Have to setup DSC before DIG FE and BE are connected (which happens before the
+ * link training). This is to make sure the bandwidth sent to DIG BE won't be
+ * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
+ * will be automatically set at a later time when the video is enabled
+ * (DP_VID_STREAM_EN = 1).
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dp_set_dsc_enable(pipe_ctx, true);
+ }
+
status = enable_link(state, pipe_ctx);
if (status != DC_OK) {
@@ -3067,11 +3216,6 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, true);
- }
dc->hwss.enable_stream(pipe_ctx);
/* Set DPS PPS SDP (AKA "info frames") */
@@ -3101,6 +3245,10 @@ void core_link_enable_stream(
dp_set_dsc_enable(pipe_ctx, true);
}
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ core_link_set_avmute(pipe_ctx, false);
+ }
}
void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
@@ -3109,10 +3257,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ core_link_set_avmute(pipe_ctx, true);
+ }
+
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, true);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 256889eed93e..aefd29a440b5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -599,7 +599,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
do {
struct aux_payload current_payload;
bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >
- payload->length ? true : false;
+ payload->length;
current_payload.address = payload->address;
current_payload.data = &payload->data[retrieved];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index caa090d0b6ac..91cd884d6f25 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -13,7 +13,6 @@
#include "core_status.h"
#include "dpcd_defs.h"
-#include "resource.h"
#define DC_LOGGER \
link->ctx->logger
@@ -1737,19 +1736,10 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
- /* Set Default link settings */
- struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
- LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
-
- /* Higher link settings based on feature supported */
- if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
- max_link_cap.link_rate = LINK_RATE_HIGH2;
-
- if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
- max_link_cap.link_rate = LINK_RATE_HIGH3;
+ struct dc_link_settings max_link_cap = {0};
- if (link->link_enc->funcs->get_max_link_cap)
- link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+ /* get max link encoder capability */
+ link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
@@ -2453,7 +2443,7 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
{
union dpcd_psr_configuration psr_configuration;
- if (!link->psr_feature_enabled)
+ if (!link->psr_settings.psr_feature_enabled)
return false;
dm_helpers_dp_read_dpcd(
@@ -2557,7 +2547,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
/* get phy test pattern and pattern parameters from DP receiver */
core_link_read_dpcd(
link,
- DP_TEST_PHY_PATTERN,
+ DP_PHY_TEST_PATTERN,
&dpcd_test_pattern.raw,
sizeof(dpcd_test_pattern));
core_link_read_dpcd(
@@ -4267,7 +4257,7 @@ void dpcd_set_source_specific_data(struct dc_link *link)
{
const uint32_t post_oui_delay = 30; // 30ms
uint8_t dspc = 0;
- enum dc_status ret = DC_ERROR_UNEXPECTED;
+ enum dc_status ret;
ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
sizeof(dspc));
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 51e0ee6e7695..6590f51caefa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -400,7 +400,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
struct dc_stream_state *stream = pipe_ctx->stream;
bool result = false;
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
result = true;
else
result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index f4bcc71b2920..0c5619364e7d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -532,6 +532,24 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
+int get_num_mpc_splits(struct pipe_ctx *pipe)
+{
+ int mpc_split_count = 0;
+ struct pipe_ctx *other_pipe = pipe->bottom_pipe;
+
+ while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+ mpc_split_count++;
+ other_pipe = other_pipe->bottom_pipe;
+ }
+ other_pipe = pipe->top_pipe;
+ while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+ mpc_split_count++;
+ other_pipe = other_pipe->top_pipe;
+ }
+
+ return mpc_split_count;
+}
+
int get_num_odm_splits(struct pipe_ctx *pipe)
{
int odm_split_count = 0;
@@ -556,16 +574,11 @@ static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *spli
/*Check for mpc split*/
struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+ *split_count = get_num_mpc_splits(pipe_ctx);
while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
(*split_idx)++;
- (*split_count)++;
split_pipe = split_pipe->top_pipe;
}
- split_pipe = pipe_ctx->bottom_pipe;
- while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
- (*split_count)++;
- split_pipe = split_pipe->bottom_pipe;
- }
} else {
/*Get odm split index*/
struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
@@ -692,6 +705,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
/* Round up, assume original video size always even dimensions */
data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
+
+ data->viewport_unadjusted = data->viewport;
+ data->viewport_c_unadjusted = data->viewport_c;
}
static void calculate_recout(struct pipe_ctx *pipe_ctx)
@@ -1061,8 +1077,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
calculate_viewport(pipe_ctx);
- if (pipe_ctx->plane_res.scl_data.viewport.height < 16 ||
- pipe_ctx->plane_res.scl_data.viewport.width < 16) {
+ if (pipe_ctx->plane_res.scl_data.viewport.height < 12 ||
+ pipe_ctx->plane_res.scl_data.viewport.width < 12) {
if (store_h_border_left) {
restore_border_left_from_dst(pipe_ctx,
store_h_border_left);
@@ -1358,9 +1374,6 @@ bool dc_add_plane_to_context(
dc_plane_state_retain(plane_state);
while (head_pipe) {
- tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
- ASSERT(tail_pipe);
-
free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -1378,6 +1391,8 @@ bool dc_add_plane_to_context(
free_pipe->plane_state = plane_state;
if (head_pipe != free_pipe) {
+ tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
+ ASSERT(tail_pipe);
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
@@ -1545,35 +1560,6 @@ bool dc_add_all_planes_for_stream(
return add_all_planes_for_stream(dc, stream, &set, 1, context);
}
-
-static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream,
- struct dc_stream_state *new_stream)
-{
- if (cur_stream == NULL)
- return true;
-
- if (memcmp(&cur_stream->hdr_static_metadata,
- &new_stream->hdr_static_metadata,
- sizeof(struct dc_info_packet)) != 0)
- return true;
-
- return false;
-}
-
-static bool is_vsc_info_packet_changed(struct dc_stream_state *cur_stream,
- struct dc_stream_state *new_stream)
-{
- if (cur_stream == NULL)
- return true;
-
- if (memcmp(&cur_stream->vsc_infopacket,
- &new_stream->vsc_infopacket,
- sizeof(struct dc_info_packet)) != 0)
- return true;
-
- return false;
-}
-
static bool is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream)
{
@@ -1608,15 +1594,9 @@ static bool are_stream_backends_same(
if (is_timing_changed(stream_a, stream_b))
return false;
- if (is_hdr_static_meta_changed(stream_a, stream_b))
- return false;
-
if (stream_a->dpms_off != stream_b->dpms_off)
return false;
- if (is_vsc_info_packet_changed(stream_a, stream_b))
- return false;
-
return true;
}
@@ -1756,21 +1736,6 @@ static struct audio *find_first_free_audio(
return 0;
}
-bool resource_is_stream_unchanged(
- struct dc_state *old_context, struct dc_stream_state *stream)
-{
- int i;
-
- for (i = 0; i < old_context->stream_count; i++) {
- struct dc_stream_state *old_stream = old_context->streams[i];
-
- if (are_stream_backends_same(old_stream, stream))
- return true;
- }
-
- return false;
-}
-
/**
* dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
*/
@@ -2025,17 +1990,6 @@ enum dc_status resource_map_pool_resources(
int pipe_idx = -1;
struct dc_bios *dcb = dc->ctx->dc_bios;
- /* TODO Check if this is needed */
- /*if (!resource_is_stream_unchanged(old_context, stream)) {
- if (stream != NULL && old_context->streams[i] != NULL) {
- stream->bit_depth_params =
- old_context->streams[i]->bit_depth_params;
- stream->clamping = old_context->streams[i]->clamping;
- continue;
- }
- }
- */
-
calculate_phy_pix_clks(stream);
/* TODO: Check Linux */
@@ -2718,19 +2672,16 @@ bool pipe_need_reprogram(
if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
return true;
- if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream))
- return true;
-
if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off)
return true;
- if (is_vsc_info_packet_changed(pipe_ctx_old->stream, pipe_ctx->stream))
- return true;
-
if (false == pipe_ctx_old->stream->link->link_state_valid &&
false == pipe_ctx_old->stream->dpms_off)
return true;
+ if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
+ return true;
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index a249a0e5edd0..9e16af22e4aa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -54,6 +54,7 @@ static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_da
sink->ctx = link->ctx;
sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
sink->converter_disable_audio = init_params->converter_disable_audio;
+ sink->is_mst_legacy = init_params->sink_is_legacy;
sink->dc_container_id = NULL;
sink->sink_id = init_params->link->ctx->dc_sink_id_count;
// increment dc_sink_id_count because we don't want two sinks with same ID
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 1935cf6601eb..85908561c741 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -29,6 +29,9 @@
#include "dc_types.h"
#include "grph_object_defs.h"
#include "logger_types.h"
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+#include "hdcp_types.h"
+#endif
#include "gpio_types.h"
#include "link_service_types.h"
#include "grph_object_ctrl_defs.h"
@@ -39,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.76"
+#define DC_VER "3.2.84"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -95,6 +98,49 @@ struct dc_plane_cap {
} max_downscale_factor;
};
+// Color management caps (DPP and MPC)
+struct rom_curve_caps {
+ uint16_t srgb : 1;
+ uint16_t bt2020 : 1;
+ uint16_t gamma2_2 : 1;
+ uint16_t pq : 1;
+ uint16_t hlg : 1;
+};
+
+struct dpp_color_caps {
+ uint16_t dcn_arch : 1; // all DCE generations treated the same
+ // input lut is different than most LUTs, just plain 256-entry lookup
+ uint16_t input_lut_shared : 1; // shared with DGAM
+ uint16_t icsc : 1;
+ uint16_t dgam_ram : 1;
+ uint16_t post_csc : 1; // before gamut remap
+ uint16_t gamma_corr : 1;
+
+ // hdr_mult and gamut remap always available in DPP (in that order)
+ // 3d lut implies shaper LUT,
+ // it may be shared with MPC - check MPC:shared_3d_lut flag
+ uint16_t hw_3d_lut : 1;
+ uint16_t ogam_ram : 1; // blnd gam
+ uint16_t ocsc : 1;
+ struct rom_curve_caps dgam_rom_caps;
+ struct rom_curve_caps ogam_rom_caps;
+};
+
+struct mpc_color_caps {
+ uint16_t gamut_remap : 1;
+ uint16_t ogam_ram : 1;
+ uint16_t ocsc : 1;
+ uint16_t num_3dluts : 3; //3d lut always assumes a preceding shaper LUT
+ uint16_t shared_3d_lut:1; //can be in either DPP or MPC, but single instance
+
+ struct rom_curve_caps ogam_rom_caps;
+};
+
+struct dc_color_caps {
+ struct dpp_color_caps dpp;
+ struct mpc_color_caps mpc;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -117,9 +163,9 @@ struct dc_caps {
bool psp_setup_panel_mode;
bool extended_aux_timeout_support;
bool dmcub_support;
- bool hw_3d_lut;
enum dp_protocol_version max_dp_protocol_version;
struct dc_plane_cap planes[MAX_PLANES];
+ struct dc_color_caps color;
};
struct dc_bug_wa {
@@ -230,7 +276,8 @@ struct dc_config {
bool forced_clocks;
bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
- bool psr_on_dmub;
+ bool disable_dmcu;
+ bool enable_4to1MPC;
};
enum visual_confirm {
@@ -238,6 +285,7 @@ enum visual_confirm {
VISUAL_CONFIRM_SURFACE = 1,
VISUAL_CONFIRM_HDR = 2,
VISUAL_CONFIRM_MPCTREE = 4,
+ VISUAL_CONFIRM_PSR = 5,
};
enum dcc_option {
@@ -429,6 +477,7 @@ struct dc_debug_options {
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
bool disable_dsc;
+ bool enable_dram_clock_change_one_display_vactive;
};
struct dc_debug_data {
@@ -474,6 +523,7 @@ struct dc_bounding_box_overrides {
int urgent_latency_ns;
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
+ int dummy_clock_change_latency_ns;
/* This forces a hard min on the DCFCLK we use
* for DML. Unlike the debug option for forcing
* DCFCLK, this override affects watermark calculations
@@ -987,6 +1037,7 @@ struct dpcd_caps {
union dpcd_fec_capability fec_cap;
struct dpcd_dsc_capabilities dsc_caps;
struct dc_lttpr_caps lttpr_caps;
+ struct psr_caps psr_caps;
};
@@ -1004,6 +1055,35 @@ union dpcd_sink_ext_caps {
uint8_t raw;
};
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+union hdcp_rx_caps {
+ struct {
+ uint8_t version;
+ uint8_t reserved;
+ struct {
+ uint8_t repeater : 1;
+ uint8_t hdcp_capable : 1;
+ uint8_t reserved : 6;
+ } byte0;
+ } fields;
+ uint8_t raw[3];
+};
+
+union hdcp_bcaps {
+ struct {
+ uint8_t HDCP_CAPABLE:1;
+ uint8_t REPEATER:1;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+struct hdcp_caps {
+ union hdcp_rx_caps rx_caps;
+ union hdcp_bcaps bcaps;
+};
+#endif
+
#include "dc_link.h"
/*******************************************************************************
@@ -1046,7 +1126,7 @@ struct dc_sink {
void *priv;
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
-
+ bool is_mst_legacy;
struct dc_sink_dsc_caps dsc_caps;
struct dc_sink_fec_caps fec_caps;
@@ -1073,6 +1153,7 @@ struct dc_sink_init_data {
struct dc_link *link;
uint32_t dongle_max_pix_clk;
bool converter_disable_audio;
+ bool sink_is_legacy;
};
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
@@ -1104,9 +1185,16 @@ void dc_set_power_state(
struct dc *dc,
enum dc_acpi_cm_power_state power_state);
void dc_resume(struct dc *dc);
-unsigned int dc_get_current_backlight_pwm(struct dc *dc);
-unsigned int dc_get_target_backlight_pwm(struct dc *dc);
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+/*
+ * HDCP Interfaces
+ */
+enum hdcp_message_status dc_process_hdcp_msg(
+ enum signal_type signal,
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info);
+#endif
bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 59c298a6484f..eea2429ac67d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -25,7 +25,7 @@
#include "dc.h"
#include "dc_dmub_srv.h"
-#include "../dmub/inc/dmub_srv.h"
+#include "../dmub/dmub_srv.h"
static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
struct dmub_srv *dmub)
@@ -58,7 +58,7 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
}
void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
- struct dmub_cmd_header *cmd)
+ union dmub_rb_cmd *cmd)
{
struct dmub_srv *dmub = dc_dmub_srv->dmub;
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 754b6077539c..a3a09ccb6d26 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -27,10 +27,9 @@
#define _DMUB_DC_SRV_H_
#include "os_types.h"
-#include "../dmub/inc/dmub_cmd.h"
+#include "dmub/dmub_srv.h"
struct dmub_srv;
-struct dmub_cmd_header;
struct dc_reg_helper_state {
bool gather_in_progress;
@@ -49,7 +48,7 @@ struct dc_dmub_srv {
};
void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
- struct dmub_cmd_header *cmd);
+ union dmub_rb_cmd *cmd);
void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index bb2730e9521e..af177c087d3b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -740,5 +740,11 @@ struct dpcd_dsc_capabilities {
union dpcd_dsc_ext_capabilities dsc_ext_caps;
};
+/* These parameters are from PSR capabilities reported by Sink DPCD */
+struct psr_caps {
+ unsigned char psr_version;
+ unsigned int psr_rfb_setup_time;
+ bool psr_exit_link_training_required;
+};
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 737048d8a96c..85a0170be544 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -50,7 +50,7 @@ static inline void submit_dmub_read_modify_write(
gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
@@ -73,7 +73,7 @@ static inline void submit_dmub_burst_write(
gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
@@ -92,7 +92,7 @@ static inline void submit_dmub_reg_wait(
gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
memset(cmd_buf, 0, sizeof(*cmd_buf));
offload->reg_seq_count = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 00ff5e98278c..f63fc25aa6c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -66,6 +66,22 @@ struct time_stamp {
struct link_trace {
struct time_stamp time_stamp;
};
+
+/* PSR feature flags */
+struct psr_settings {
+ bool psr_feature_enabled; // PSR is supported by sink
+ bool psr_allow_active; // PSR is currently active
+ enum dc_psr_version psr_version; // Internal PSR version, determined based on DPCD
+
+ /* These parameters are calculated in Driver,
+ * based on display timing and Sink capabilities.
+ * If VBLANK region is too small and Sink takes a long time
+ * to set up RFB, it may take an extra frame to enter PSR state.
+ */
+ bool psr_frame_capture_indication_req;
+ unsigned int psr_sdp_transmit_line_num_deadline;
+};
+
/*
* A link contains one or more sinks and their connected status.
* The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
@@ -118,6 +134,7 @@ struct dc_link {
struct dc_context *ctx;
+ struct panel_cntl *panel_cntl;
struct link_encoder *link_enc;
struct graphics_object_id link_id;
union ddi_channel_mapping ddi_channel_mapping;
@@ -126,11 +143,14 @@ struct dc_link {
uint32_t dongle_max_pix_clk;
unsigned short chip_caps;
unsigned int dpcd_sink_count;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ struct hdcp_caps hdcp_caps;
+#endif
enum edp_revision edp_revision;
- bool psr_feature_enabled;
- bool psr_allow_active;
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+ struct psr_settings psr_settings;
+
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
@@ -197,7 +217,7 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link);
int dc_link_get_backlight_level(const struct dc_link *dc_link);
-bool dc_link_set_abm_disable(const struct dc_link *dc_link);
+int dc_link_get_target_backlight_pwm(const struct dc_link *link);
bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait);
@@ -290,6 +310,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
* DPCD access interfaces
*/
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+bool dc_link_is_hdcp14(struct dc_link *link);
+bool dc_link_is_hdcp22(struct dc_link *link);
+#endif
void dc_link_set_drive_settings(struct dc *dc,
struct link_training_settings *lt_settings,
const struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index a5c7ef47b8d3..49aad691e687 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -167,8 +167,6 @@ struct dc_stream_state {
/* TODO: custom INFO packets */
/* TODO: ABM info (DMCU) */
- /* PSR info */
- unsigned char psr_version;
/* TODO: CEA VIC */
/* DMCU info */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 0d210104ba0a..f236da1c1859 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -862,4 +862,9 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_max_line_width;
};
+enum dc_psr_version {
+ DC_PSR_VERSION_1 = 0,
+ DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index fbfcff700971..f704a8fd52e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -29,7 +29,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index b8a3fc505c9b..4e87e70237e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -55,7 +55,7 @@
#define MCP_DISABLE_ABM_IMMEDIATELY 255
-static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
+static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id, uint32_t panel_inst)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
uint32_t rampingBoundary = 0xFFFF;
@@ -83,125 +83,12 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
return true;
}
-static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
-{
- uint64_t current_backlight;
- uint32_t round_result;
- uint32_t pwm_period_cntl, bl_period, bl_int_count;
- uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
- uint32_t bl_period_mask, bl_pwm_mask;
-
- pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
- REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
- REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
-
- bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
- REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
- REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
-
- if (bl_int_count == 0)
- bl_int_count = 16;
-
- bl_period_mask = (1 << bl_int_count) - 1;
- bl_period &= bl_period_mask;
-
- bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
-
- if (fractional_duty_cycle_en == 0)
- bl_pwm &= bl_pwm_mask;
- else
- bl_pwm &= 0xFFFF;
-
- current_backlight = bl_pwm << (1 + bl_int_count);
-
- if (bl_period == 0)
- bl_period = 0xFFFF;
-
- current_backlight = div_u64(current_backlight, bl_period);
- current_backlight = (current_backlight + 1) >> 1;
-
- current_backlight = (uint64_t)(current_backlight) * bl_period;
-
- round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
-
- round_result = (round_result >> (bl_int_count-1)) & 1;
-
- current_backlight >>= bl_int_count;
- current_backlight += round_result;
-
- return (uint32_t)(current_backlight);
-}
-
-static void driver_set_backlight_level(struct dce_abm *abm_dce,
- uint32_t backlight_pwm_u16_16)
-{
- uint32_t backlight_16bit;
- uint32_t masked_pwm_period;
- uint8_t bit_count;
- uint64_t active_duty_cycle;
- uint32_t pwm_period_bitcnt;
-
- /*
- * 1. Find 16 bit backlight active duty cycle, where 0 <= backlight
- * active duty cycle <= backlight period
- */
-
- /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
- */
- REG_GET_2(BL_PWM_PERIOD_CNTL,
- BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
- BL_PWM_PERIOD, &masked_pwm_period);
-
- if (pwm_period_bitcnt == 0)
- bit_count = 16;
- else
- bit_count = pwm_period_bitcnt;
-
- /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
- masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
-
- /* 1.2 Calculate integer active duty cycle required upper 16 bits
- * contain integer component, lower 16 bits contain fractional component
- * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
- */
- active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
-
- /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
- * components shift by bitCount then mask 16 bits and add rounding bit
- * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
- */
- backlight_16bit = active_duty_cycle >> bit_count;
- backlight_16bit &= 0xFFFF;
- backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
-
- /*
- * 2. Program register with updated value
- */
-
- /* 2.1 Lock group 2 backlight registers */
-
- REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
- BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
- BL_PWM_GRP1_REG_LOCK, 1);
-
- // 2.2 Write new active duty cycle
- REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
-
- /* 2.3 Unlock group 2 backlight registers */
- REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
- BL_PWM_GRP1_REG_LOCK, 0);
-
- /* 3 Wait for pending bit to be cleared */
- REG_WAIT(BL_PWM_GRP1_REG_LOCK,
- BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
- 1, 10000);
-}
-
static void dmcu_set_backlight_level(
struct dce_abm *abm_dce,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp,
- uint32_t controller_id)
+ uint32_t controller_id,
+ uint32_t panel_id)
{
unsigned int backlight_8_bit = 0;
uint32_t s2;
@@ -213,7 +100,7 @@ static void dmcu_set_backlight_level(
// Take MSB of fractional part since backlight is not max
backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
- dce_abm_set_pipe(&abm_dce->base, controller_id);
+ dce_abm_set_pipe(&abm_dce->base, controller_id, panel_id);
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
@@ -248,10 +135,9 @@ static void dmcu_set_backlight_level(
0, 1, 80000);
}
-static void dce_abm_init(struct abm *abm)
+static void dce_abm_init(struct abm *abm, uint32_t backlight)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
@@ -331,86 +217,12 @@ static bool dce_abm_set_level(struct abm *abm, uint32_t level)
return true;
}
-static bool dce_abm_immediate_disable(struct abm *abm)
+static bool dce_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
{
- struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-
if (abm->dmcu_is_running == false)
return true;
- dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY);
-
- abm->stored_backlight_registers.BL_PWM_CNTL =
- REG_READ(BL_PWM_CNTL);
- abm->stored_backlight_registers.BL_PWM_CNTL2 =
- REG_READ(BL_PWM_CNTL2);
- abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
- REG_READ(BL_PWM_PERIOD_CNTL);
-
- REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
- &abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
- return true;
-}
-
-static bool dce_abm_init_backlight(struct abm *abm)
-{
- struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- uint32_t value;
-
- /* It must not be 0, so we have to restore them
- * Bios bug w/a - period resets to zero,
- * restoring to cache values which is always correct
- */
- REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
- if (value == 0 || value == 1) {
- if (abm->stored_backlight_registers.BL_PWM_CNTL != 0) {
- REG_WRITE(BL_PWM_CNTL,
- abm->stored_backlight_registers.BL_PWM_CNTL);
- REG_WRITE(BL_PWM_CNTL2,
- abm->stored_backlight_registers.BL_PWM_CNTL2);
- REG_WRITE(BL_PWM_PERIOD_CNTL,
- abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
- REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
- BL_PWM_REF_DIV,
- abm->stored_backlight_registers.
- LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
- } else {
- /* TODO: Note: This should not really happen since VBIOS
- * should have initialized PWM registers on boot.
- */
- REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
- REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
- }
- } else {
- abm->stored_backlight_registers.BL_PWM_CNTL =
- REG_READ(BL_PWM_CNTL);
- abm->stored_backlight_registers.BL_PWM_CNTL2 =
- REG_READ(BL_PWM_CNTL2);
- abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
- REG_READ(BL_PWM_PERIOD_CNTL);
-
- REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
- &abm->stored_backlight_registers.
- LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
- }
-
- /* Have driver take backlight control
- * TakeBacklightControl(true)
- */
- value = REG_READ(BIOS_SCRATCH_2);
- value |= ATOM_S2_VRI_BRIGHT_ENABLE;
- REG_WRITE(BIOS_SCRATCH_2, value);
-
- /* Enable the backlight output */
- REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
-
- /* Disable fractional pwm if configured */
- REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN,
- abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1);
-
- /* Unlock group 2 backlight registers */
- REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
- BL_PWM_GRP1_REG_LOCK, 0);
+ dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY, panel_inst);
return true;
}
@@ -420,21 +232,18 @@ static bool dce_abm_set_backlight_level_pwm(
unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int controller_id,
- bool use_smooth_brightness)
+ unsigned int panel_inst)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_pwm_u16_16, backlight_pwm_u16_16);
- /* If DMCU is in reset state, DMCU is uninitialized */
- if (use_smooth_brightness)
- dmcu_set_backlight_level(abm_dce,
- backlight_pwm_u16_16,
- frame_ramp,
- controller_id);
- else
- driver_set_backlight_level(abm_dce, backlight_pwm_u16_16);
+ dmcu_set_backlight_level(abm_dce,
+ backlight_pwm_u16_16,
+ frame_ramp,
+ controller_id,
+ panel_inst);
return true;
}
@@ -442,12 +251,12 @@ static bool dce_abm_set_backlight_level_pwm(
static const struct abm_funcs dce_funcs = {
.abm_init = dce_abm_init,
.set_abm_level = dce_abm_set_level,
- .init_backlight = dce_abm_init_backlight,
.set_pipe = dce_abm_set_pipe,
.set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
.get_current_backlight = dce_abm_get_current_backlight,
.get_target_backlight = dce_abm_get_target_backlight,
- .set_abm_immediate_disable = dce_abm_immediate_disable
+ .init_abm_config = NULL,
+ .set_abm_immediate_disable = dce_abm_immediate_disable,
};
static void dce_abm_construct(
@@ -461,10 +270,6 @@ static void dce_abm_construct(
base->ctx = ctx;
base->funcs = &dce_funcs;
- base->stored_backlight_registers.BL_PWM_CNTL = 0;
- base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
- base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
- base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
base->dmcu_is_running = false;
abm_dce->regs = regs;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index ba0caaffa24b..9718a4823372 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -30,11 +30,6 @@
#include "abm.h"
#define ABM_COMMON_REG_LIST_DCE_BASE() \
- SR(BL_PWM_PERIOD_CNTL), \
- SR(BL_PWM_CNTL), \
- SR(BL_PWM_CNTL2), \
- SR(BL_PWM_GRP1_REG_LOCK), \
- SR(LVTMA_PWRSEQ_REF_DIV), \
SR(MASTER_COMM_CNTL_REG), \
SR(MASTER_COMM_CMD_REG), \
SR(MASTER_COMM_DATA_REG1)
@@ -85,15 +80,6 @@
.field_name = reg_name ## __ ## field_name ## post_fix
#define ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
- ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
- ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
- ABM_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
- ABM_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
- ABM_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
- ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
- ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
- ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh), \
- ABM_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
@@ -178,19 +164,10 @@
type ABM1_HG_REG_READ_MISSED_FRAME_CLEAR; \
type ABM1_LS_REG_READ_MISSED_FRAME_CLEAR; \
type ABM1_BL_REG_READ_MISSED_FRAME_CLEAR; \
- type BL_PWM_PERIOD; \
- type BL_PWM_PERIOD_BITCNT; \
- type BL_ACTIVE_INT_FRAC_CNT; \
- type BL_PWM_FRACTIONAL_EN; \
type MASTER_COMM_INTERRUPT; \
type MASTER_COMM_CMD_REG_BYTE0; \
type MASTER_COMM_CMD_REG_BYTE1; \
- type MASTER_COMM_CMD_REG_BYTE2; \
- type BL_PWM_REF_DIV; \
- type BL_PWM_EN; \
- type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
- type BL_PWM_GRP1_REG_LOCK; \
- type BL_PWM_GRP1_REG_UPDATE_PENDING
+ type MASTER_COMM_CMD_REG_BYTE2
struct dce_abm_shift {
ABM_REG_FIELD_LIST(uint8_t);
@@ -201,10 +178,6 @@ struct dce_abm_mask {
};
struct dce_abm_registers {
- uint32_t BL_PWM_PERIOD_CNTL;
- uint32_t BL_PWM_CNTL;
- uint32_t BL_PWM_CNTL2;
- uint32_t LVTMA_PWRSEQ_REF_DIV;
uint32_t DC_ABM1_HG_SAMPLE_RATE;
uint32_t DC_ABM1_LS_SAMPLE_RATE;
uint32_t BL1_PWM_BL_UPDATE_SAMPLE_RATE;
@@ -219,7 +192,6 @@ struct dce_abm_registers {
uint32_t MASTER_COMM_CMD_REG;
uint32_t MASTER_COMM_DATA_REG1;
uint32_t BIOS_SCRATCH_2;
- uint32_t BL_PWM_GRP1_REG_LOCK;
};
struct dce_abm {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 2e992fbc0d71..d2ad0504b0de 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1014,39 +1014,6 @@ struct pixel_rate_range_table_entry {
unsigned short div_factor;
};
-static const struct pixel_rate_range_table_entry video_optimized_pixel_rates[] = {
- // /1.001 rates
- {25170, 25180, 25200, 1000, 1001}, //25.2MHz -> 25.17
- {59340, 59350, 59400, 1000, 1001}, //59.4Mhz -> 59.340
- {74170, 74180, 74250, 1000, 1001}, //74.25Mhz -> 74.1758
- {125870, 125880, 126000, 1000, 1001}, //126Mhz -> 125.87
- {148350, 148360, 148500, 1000, 1001}, //148.5Mhz -> 148.3516
- {167830, 167840, 168000, 1000, 1001}, //168Mhz -> 167.83
- {222520, 222530, 222750, 1000, 1001}, //222.75Mhz -> 222.527
- {257140, 257150, 257400, 1000, 1001}, //257.4Mhz -> 257.1429
- {296700, 296710, 297000, 1000, 1001}, //297Mhz -> 296.7033
- {342850, 342860, 343200, 1000, 1001}, //343.2Mhz -> 342.857
- {395600, 395610, 396000, 1000, 1001}, //396Mhz -> 395.6
- {409090, 409100, 409500, 1000, 1001}, //409.5Mhz -> 409.091
- {445050, 445060, 445500, 1000, 1001}, //445.5Mhz -> 445.055
- {467530, 467540, 468000, 1000, 1001}, //468Mhz -> 467.5325
- {519230, 519240, 519750, 1000, 1001}, //519.75Mhz -> 519.231
- {525970, 525980, 526500, 1000, 1001}, //526.5Mhz -> 525.974
- {545450, 545460, 546000, 1000, 1001}, //546Mhz -> 545.455
- {593400, 593410, 594000, 1000, 1001}, //594Mhz -> 593.4066
- {623370, 623380, 624000, 1000, 1001}, //624Mhz -> 623.377
- {692300, 692310, 693000, 1000, 1001}, //693Mhz -> 692.308
- {701290, 701300, 702000, 1000, 1001}, //702Mhz -> 701.2987
- {791200, 791210, 792000, 1000, 1001}, //792Mhz -> 791.209
- {890100, 890110, 891000, 1000, 1001}, //891Mhz -> 890.1099
- {1186810, 1186820, 1188000, 1000, 1001},//1188Mhz -> 1186.8131
-
- // *1.001 rates
- {27020, 27030, 27000, 1001, 1000}, //27Mhz
- {54050, 54060, 54000, 1001, 1000}, //54Mhz
- {108100, 108110, 108000, 1001, 1000},//108Mhz
-};
-
static bool dcn20_program_pix_clk(
struct clock_source *clock_source,
struct pixel_clk_params *pix_clk_params,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index c5aa1f48593a..5479d959ec62 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -27,10 +27,6 @@
#include "dc_types.h"
-#define BL_REG_LIST()\
- SR(LVTMA_PWRSEQ_CNTL), \
- SR(LVTMA_PWRSEQ_STATE)
-
#define HWSEQ_DCEF_REG_LIST_DCE8() \
.DCFE_CLOCK_CONTROL[0] = mmCRTC0_CRTC_DCFE_CLOCK_CONTROL, \
.DCFE_CLOCK_CONTROL[1] = mmCRTC1_CRTC_DCFE_CLOCK_CONTROL, \
@@ -94,20 +90,17 @@
SRII(BLND_CONTROL, BLND, 0),\
SRII(BLND_CONTROL, BLND, 1),\
SR(BLNDV_CONTROL),\
- HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
- BL_REG_LIST()
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
#define HWSEQ_DCE8_REG_LIST() \
HWSEQ_DCEF_REG_LIST_DCE8(), \
HWSEQ_BLND_REG_LIST(), \
- HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
- BL_REG_LIST()
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
#define HWSEQ_DCE10_REG_LIST() \
HWSEQ_DCEF_REG_LIST(), \
HWSEQ_BLND_REG_LIST(), \
- HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
- BL_REG_LIST()
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
#define HWSEQ_ST_REG_LIST() \
HWSEQ_DCE11_REG_LIST_BASE(), \
@@ -134,8 +127,7 @@
SR(DCHUB_FB_LOCATION),\
SR(DCHUB_AGP_BASE),\
SR(DCHUB_AGP_BOT),\
- SR(DCHUB_AGP_TOP), \
- BL_REG_LIST()
+ SR(DCHUB_AGP_TOP)
#define HWSEQ_VG20_REG_LIST() \
HWSEQ_DCE120_REG_LIST(),\
@@ -144,8 +136,7 @@
#define HWSEQ_DCE112_REG_LIST() \
HWSEQ_DCE10_REG_LIST(), \
HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
- HWSEQ_PHYPLL_REG_LIST(CRTC), \
- BL_REG_LIST()
+ HWSEQ_PHYPLL_REG_LIST(CRTC)
#define HWSEQ_DCN_REG_LIST()\
SR(REFCLK_CNTL), \
@@ -207,8 +198,7 @@
SR(D3VGA_CONTROL), \
SR(D4VGA_CONTROL), \
SR(VGA_TEST_CONTROL), \
- SR(DC_IP_REQUEST_CNTL), \
- BL_REG_LIST()
+ SR(DC_IP_REQUEST_CNTL)
#define HWSEQ_DCN2_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
@@ -273,8 +263,7 @@
SR(D4VGA_CONTROL), \
SR(D5VGA_CONTROL), \
SR(D6VGA_CONTROL), \
- SR(DC_IP_REQUEST_CNTL), \
- BL_REG_LIST()
+ SR(DC_IP_REQUEST_CNTL)
#define HWSEQ_DCN21_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
@@ -324,15 +313,9 @@
SR(D4VGA_CONTROL), \
SR(D5VGA_CONTROL), \
SR(D6VGA_CONTROL), \
- SR(DC_IP_REQUEST_CNTL), \
- BL_REG_LIST()
+ SR(DC_IP_REQUEST_CNTL)
struct dce_hwseq_registers {
-
- /* Backlight registers */
- uint32_t LVTMA_PWRSEQ_CNTL;
- uint32_t LVTMA_PWRSEQ_STATE;
-
uint32_t DCFE_CLOCK_CONTROL[6];
uint32_t DCFEV_CLOCK_CONTROL;
uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL;
@@ -465,26 +448,18 @@ struct dce_hwseq_registers {
HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
-#define HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
-
#define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
.DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\
- HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
#define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\
HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\
HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\
- HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
#define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\
HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
@@ -507,8 +482,7 @@ struct dce_hwseq_registers {
HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND0_BLND_),\
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_),\
- HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh),\
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh)
#define HWSEQ_VG20_MASK_SH_LIST(mask_sh)\
HWSEQ_DCE12_MASK_SH_LIST(mask_sh),\
@@ -570,8 +544,7 @@ struct dce_hwseq_registers {
HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
- HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh)
#define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -630,8 +603,7 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN19_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \
- HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh)
#define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -671,10 +643,7 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN16_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
- HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh)
#define HWSEQ_REG_FIELD_LIST(type) \
type DCFE_CLOCK_ENABLE; \
@@ -706,11 +675,7 @@ struct dce_hwseq_registers {
type PF_LFB_REGION;\
type PF_MAX_REGION;\
type ENABLE_L1_TLB;\
- type SYSTEM_ACCESS_MODE;\
- type LVTMA_BLON;\
- type LVTMA_DIGON;\
- type LVTMA_DIGON_OVRD;\
- type LVTMA_PWRSEQ_TARGET_STATE_R;
+ type SYSTEM_ACCESS_MODE;
#define HWSEQ_DCN_REG_FIELD_LIST(type) \
type HUBP_VTG_SEL; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 8527cce81c6f..8d8c84c81b34 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -118,7 +118,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
.enable_hpd = dce110_link_encoder_enable_hpd,
.disable_hpd = dce110_link_encoder_disable_hpd,
.is_dig_enabled = dce110_is_dig_enabled,
- .destroy = dce110_link_encoder_destroy
+ .destroy = dce110_link_encoder_destroy,
+ .get_max_link_cap = dce110_link_encoder_get_max_link_cap
};
static enum bp_result link_transmitter_control(
@@ -1389,3 +1390,20 @@ void dce110_link_encoder_disable_hpd(struct link_encoder *enc)
set_reg_field_value(value, 0, DC_HPD_CONTROL, DC_HPD_EN);
}
+
+void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ /* Set Default link settings */
+ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+
+ /* Higher link settings based on feature supported */
+ if (enc->features.flags.bits.IS_HBR2_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+ if (enc->features.flags.bits.IS_HBR3_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ *link_settings = max_link_cap;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 3c9368df4093..384389f0e2c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -271,4 +271,7 @@ void dce110_psr_program_secondary_packet(struct link_encoder *enc,
bool dce110_is_dig_enabled(struct link_encoder *enc);
+void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
#endif /* __DC_LINK_ENCODER__DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
new file mode 100644
index 000000000000..ebff9b1e312e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dc_dmub_srv.h"
+#include "panel_cntl.h"
+#include "dce_panel_cntl.h"
+#include "atom.h"
+
+#define TO_DCE_PANEL_CNTL(panel_cntl)\
+ container_of(panel_cntl, struct dce_panel_cntl, base)
+
+#define CTX \
+ dce_panel_cntl->base.ctx
+
+#define DC_LOGGER \
+ dce_panel_cntl->base.ctx->logger
+
+#define REG(reg)\
+ dce_panel_cntl->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dce_panel_cntl->shift->field_name, dce_panel_cntl->mask->field_name
+
+static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *dce_panel_cntl)
+{
+ uint64_t current_backlight;
+ uint32_t round_result;
+ uint32_t pwm_period_cntl, bl_period, bl_int_count;
+ uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+ uint32_t bl_period_mask, bl_pwm_mask;
+
+ pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+ REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
+ REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
+
+ bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
+ REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
+
+ if (bl_int_count == 0)
+ bl_int_count = 16;
+
+ bl_period_mask = (1 << bl_int_count) - 1;
+ bl_period &= bl_period_mask;
+
+ bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
+
+ if (fractional_duty_cycle_en == 0)
+ bl_pwm &= bl_pwm_mask;
+ else
+ bl_pwm &= 0xFFFF;
+
+ current_backlight = bl_pwm << (1 + bl_int_count);
+
+ if (bl_period == 0)
+ bl_period = 0xFFFF;
+
+ current_backlight = div_u64(current_backlight, bl_period);
+ current_backlight = (current_backlight + 1) >> 1;
+
+ current_backlight = (uint64_t)(current_backlight) * bl_period;
+
+ round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
+
+ round_result = (round_result >> (bl_int_count-1)) & 1;
+
+ current_backlight >>= bl_int_count;
+ current_backlight += round_result;
+
+ return (uint32_t)(current_backlight);
+}
+
+uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+ uint32_t value;
+ uint32_t current_backlight;
+
+ /* It must not be 0, so we have to restore them
+ * Bios bug w/a - period resets to zero,
+ * restoring to cache values which is always correct
+ */
+ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
+
+ if (value == 0 || value == 1) {
+ if (panel_cntl->stored_backlight_registers.BL_PWM_CNTL != 0) {
+ REG_WRITE(BL_PWM_CNTL,
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL);
+ REG_WRITE(BL_PWM_CNTL2,
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL2);
+ REG_WRITE(BL_PWM_PERIOD_CNTL,
+ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
+ REG_UPDATE(PWRSEQ_REF_DIV,
+ BL_PWM_REF_DIV,
+ panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+ } else {
+ /* TODO: Note: This should not really happen since VBIOS
+ * should have initialized PWM registers on boot.
+ */
+ REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
+ REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
+ }
+ } else {
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL =
+ REG_READ(BL_PWM_CNTL);
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 =
+ REG_READ(BL_PWM_CNTL2);
+ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+ REG_READ(BL_PWM_PERIOD_CNTL);
+
+ REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+ &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+ }
+
+ // Have driver take backlight control
+ // TakeBacklightControl(true)
+ value = REG_READ(BIOS_SCRATCH_2);
+ value |= ATOM_S2_VRI_BRIGHT_ENABLE;
+ REG_WRITE(BIOS_SCRATCH_2, value);
+
+ // Enable the backlight output
+ REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+
+ // Unlock group 2 backlight registers
+ REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_LOCK, 0);
+
+ current_backlight = calculate_16_bit_backlight_from_pwm(dce_panel_cntl);
+
+ return current_backlight;
+}
+
+bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+ uint32_t value;
+
+ REG_GET(PWRSEQ_CNTL, LVTMA_BLON, &value);
+
+ return value;
+}
+
+bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+ uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
+
+ REG_GET(PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
+
+ REG_GET_2(PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
+
+ return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
+}
+
+void dce_store_backlight_level(struct panel_cntl *panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL =
+ REG_READ(BL_PWM_CNTL);
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 =
+ REG_READ(BL_PWM_CNTL2);
+ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+ REG_READ(BL_PWM_PERIOD_CNTL);
+
+ REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+ &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+}
+
+void dce_driver_set_backlight(struct panel_cntl *panel_cntl,
+ uint32_t backlight_pwm_u16_16)
+{
+ uint32_t backlight_16bit;
+ uint32_t masked_pwm_period;
+ uint8_t bit_count;
+ uint64_t active_duty_cycle;
+ uint32_t pwm_period_bitcnt;
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+
+ /*
+ * 1. Find 16 bit backlight active duty cycle, where 0 <= backlight
+ * active duty cycle <= backlight period
+ */
+
+ /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
+ */
+ REG_GET_2(BL_PWM_PERIOD_CNTL,
+ BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
+ BL_PWM_PERIOD, &masked_pwm_period);
+
+ if (pwm_period_bitcnt == 0)
+ bit_count = 16;
+ else
+ bit_count = pwm_period_bitcnt;
+
+ /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
+ masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
+
+ /* 1.2 Calculate integer active duty cycle required upper 16 bits
+ * contain integer component, lower 16 bits contain fractional component
+ * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
+ */
+ active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
+
+ /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
+ * components shift by bitCount then mask 16 bits and add rounding bit
+ * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
+ */
+ backlight_16bit = active_duty_cycle >> bit_count;
+ backlight_16bit &= 0xFFFF;
+ backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
+
+ /*
+ * 2. Program register with updated value
+ */
+
+ /* 2.1 Lock group 2 backlight registers */
+
+ REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
+ BL_PWM_GRP1_REG_LOCK, 1);
+
+ // 2.2 Write new active duty cycle
+ REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
+
+ /* 2.3 Unlock group 2 backlight registers */
+ REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_LOCK, 0);
+
+ /* 3 Wait for pending bit to be cleared */
+ REG_WAIT(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
+ 1, 10000);
+}
+
+static void dce_panel_cntl_destroy(struct panel_cntl **panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(*panel_cntl);
+
+ kfree(dce_panel_cntl);
+ *panel_cntl = NULL;
+}
+
+static const struct panel_cntl_funcs dce_link_panel_cntl_funcs = {
+ .destroy = dce_panel_cntl_destroy,
+ .hw_init = dce_panel_cntl_hw_init,
+ .is_panel_backlight_on = dce_is_panel_backlight_on,
+ .is_panel_powered_on = dce_is_panel_powered_on,
+ .store_backlight_level = dce_store_backlight_level,
+ .driver_set_backlight = dce_driver_set_backlight,
+};
+
+void dce_panel_cntl_construct(
+ struct dce_panel_cntl *dce_panel_cntl,
+ const struct panel_cntl_init_data *init_data,
+ const struct dce_panel_cntl_registers *regs,
+ const struct dce_panel_cntl_shift *shift,
+ const struct dce_panel_cntl_mask *mask)
+{
+ struct panel_cntl *base = &dce_panel_cntl->base;
+
+ base->stored_backlight_registers.BL_PWM_CNTL = 0;
+ base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
+ base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
+ base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
+
+ dce_panel_cntl->regs = regs;
+ dce_panel_cntl->shift = shift;
+ dce_panel_cntl->mask = mask;
+
+ dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs;
+ dce_panel_cntl->base.ctx = init_data->ctx;
+ dce_panel_cntl->base.inst = init_data->inst;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
new file mode 100644
index 000000000000..70ec691e14d2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_PANEL_CNTL__DCE_H__
+#define __DC_PANEL_CNTL__DCE_H__
+
+#include "panel_cntl.h"
+
+/* set register offset with instance */
+#define DCE_PANEL_CNTL_SR(reg_name, block)\
+ .reg_name = mm ## block ## _ ## reg_name
+
+#define DCE_PANEL_CNTL_REG_LIST()\
+ DCE_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
+ DCE_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
+ DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+ SR(BL_PWM_CNTL), \
+ SR(BL_PWM_CNTL2), \
+ SR(BL_PWM_PERIOD_CNTL), \
+ SR(BL_PWM_GRP1_REG_LOCK), \
+ SR(BIOS_SCRATCH_2)
+
+#define DCN_PANEL_CNTL_SR(reg_name, block)\
+ .reg_name = BASE(mm ## block ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## _ ## reg_name
+
+#define DCN_PANEL_CNTL_REG_LIST()\
+ DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
+ DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
+ DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+ SR(BL_PWM_CNTL), \
+ SR(BL_PWM_CNTL2), \
+ SR(BL_PWM_PERIOD_CNTL), \
+ SR(BL_PWM_GRP1_REG_LOCK), \
+ SR(BIOS_SCRATCH_2)
+
+#define DCE_PANEL_CNTL_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DCE_PANEL_CNTL_MASK_SH_LIST(mask_sh) \
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh), \
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh)
+
+#define DCE_PANEL_CNTL_REG_FIELD_LIST(type) \
+ type LVTMA_BLON;\
+ type LVTMA_DIGON;\
+ type LVTMA_DIGON_OVRD;\
+ type LVTMA_PWRSEQ_TARGET_STATE_R; \
+ type BL_PWM_REF_DIV; \
+ type BL_PWM_EN; \
+ type BL_ACTIVE_INT_FRAC_CNT; \
+ type BL_PWM_FRACTIONAL_EN; \
+ type BL_PWM_PERIOD; \
+ type BL_PWM_PERIOD_BITCNT; \
+ type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
+ type BL_PWM_GRP1_REG_LOCK; \
+ type BL_PWM_GRP1_REG_UPDATE_PENDING
+
+struct dce_panel_cntl_shift {
+ DCE_PANEL_CNTL_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_panel_cntl_mask {
+ DCE_PANEL_CNTL_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_panel_cntl_registers {
+ uint32_t PWRSEQ_CNTL;
+ uint32_t PWRSEQ_STATE;
+ uint32_t BL_PWM_CNTL;
+ uint32_t BL_PWM_CNTL2;
+ uint32_t BL_PWM_PERIOD_CNTL;
+ uint32_t BL_PWM_GRP1_REG_LOCK;
+ uint32_t PWRSEQ_REF_DIV;
+ uint32_t BIOS_SCRATCH_2;
+};
+
+struct dce_panel_cntl {
+ struct panel_cntl base;
+ const struct dce_panel_cntl_registers *regs;
+ const struct dce_panel_cntl_shift *shift;
+ const struct dce_panel_cntl_mask *mask;
+};
+
+void dce_panel_cntl_construct(
+ struct dce_panel_cntl *panel_cntl,
+ const struct panel_cntl_init_data *init_data,
+ const struct dce_panel_cntl_registers *regs,
+ const struct dce_panel_cntl_shift *shift,
+ const struct dce_panel_cntl_mask *mask);
+
+#endif /* __DC_PANEL_CNTL__DCE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 451574971b96..4cdaaf4d881c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -1336,7 +1336,6 @@ static void dce110_se_audio_setup(
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
- uint32_t speakers = 0;
uint32_t channels = 0;
ASSERT(audio_info);
@@ -1344,7 +1343,6 @@ static void dce110_se_audio_setup(
/* This should not happen.it does so we don't get BSOD*/
return;
- speakers = audio_info->flags.info.ALLSPEAKERS;
channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
/* setup the audio stream source select (audio -> dig mapping) */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
new file mode 100644
index 000000000000..da0b29abfbda
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_abm.h"
+#include "dce_abm.h"
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
+#include "core_types.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed31_32.h"
+
+#include "atom.h"
+
+#define TO_DMUB_ABM(abm)\
+ container_of(abm, struct dce_abm, base)
+
+#define REG(reg) \
+ (dce_abm->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name
+
+#define CTX \
+ dce_abm->base.ctx
+
+#define DISABLE_ABM_IMMEDIATELY 255
+
+static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint32_t ramping_boundary = 0xFFFF;
+
+ cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
+ cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+ cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+static void dmcub_set_backlight_level(
+ struct dce_abm *dce_abm,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp,
+ uint32_t otg_inst,
+ uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dce_abm->base.ctx;
+ unsigned int backlight_8_bit = 0;
+ uint32_t s2;
+
+ if (backlight_pwm_u16_16 & 0x10000)
+ // Check for max backlight condition
+ backlight_8_bit = 0xFF;
+ else
+ // Take MSB of fractional part since backlight is not max
+ backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
+
+ dmub_abm_set_pipe(&dce_abm->base, otg_inst, panel_inst);
+
+ REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16);
+
+ if (otg_inst == 0)
+ frame_ramp = 0;
+
+ cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
+ cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
+ cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ // Update requested backlight level
+ s2 = REG_READ(BIOS_SCRATCH_2);
+
+ s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+ backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+ ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+ s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+ REG_WRITE(BIOS_SCRATCH_2, s2);
+}
+
+static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
+{
+ union dmub_rb_cmd cmd;
+ uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
+
+ cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
+ cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
+ cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+static void dmub_abm_init(struct abm *abm, uint32_t backlight)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x103);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x101);
+ REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x101);
+
+ REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
+ ABM1_HG_NUM_OF_BINS_SEL, 0,
+ ABM1_HG_VMAX_SEL, 1,
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
+
+ REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
+ ABM1_IPCSC_COEFF_SEL_R, 2,
+ ABM1_IPCSC_COEFF_SEL_G, 4,
+ ABM1_IPCSC_COEFF_SEL_B, 2);
+
+ REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
+ BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
+ BL1_PWM_TARGET_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_USER_LEVEL,
+ BL1_PWM_USER_LEVEL, backlight);
+
+ REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+
+ REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+
+ dmub_abm_enable_fractional_pwm(abm->ctx);
+}
+
+static unsigned int dmub_abm_get_current_backlight(struct abm *abm)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
+}
+
+static unsigned int dmub_abm_get_target_backlight(struct abm *abm)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
+}
+
+static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+
+ cmd.abm_set_level.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
+ cmd.abm_set_level.abm_set_level_data.level = level;
+ cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+static bool dmub_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
+{
+ dmub_abm_set_pipe(abm, DISABLE_ABM_IMMEDIATELY, panel_inst);
+
+ return true;
+}
+
+static bool dmub_abm_set_backlight_level_pwm(
+ struct abm *abm,
+ unsigned int backlight_pwm_u16_16,
+ unsigned int frame_ramp,
+ unsigned int otg_inst,
+ uint32_t panel_inst)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+
+ dmcub_set_backlight_level(dce_abm,
+ backlight_pwm_u16_16,
+ frame_ramp,
+ otg_inst,
+ panel_inst);
+
+ return true;
+}
+
+static bool dmub_abm_init_config(struct abm *abm,
+ const char *src,
+ unsigned int bytes)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+
+ // TODO: Optimize by only reading back final 4 bytes
+ dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+
+ // Copy iramtable into cw7
+ memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
+
+ // Fw will copy from cw7 to fw_state
+ cmd.abm_init_config.header.type = DMUB_CMD__ABM;
+ cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
+ cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.abm_init_config.abm_init_config_data.bytes = bytes;
+ cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+static const struct abm_funcs abm_funcs = {
+ .abm_init = dmub_abm_init,
+ .set_abm_level = dmub_abm_set_level,
+ .set_pipe = dmub_abm_set_pipe,
+ .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm,
+ .get_current_backlight = dmub_abm_get_current_backlight,
+ .get_target_backlight = dmub_abm_get_target_backlight,
+ .set_abm_immediate_disable = dmub_abm_immediate_disable,
+ .init_abm_config = dmub_abm_init_config,
+};
+
+static void dmub_abm_construct(
+ struct dce_abm *abm_dce,
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask)
+{
+ struct abm *base = &abm_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &abm_funcs;
+ base->dmcu_is_running = false;
+
+ abm_dce->regs = regs;
+ abm_dce->abm_shift = abm_shift;
+ abm_dce->abm_mask = abm_mask;
+}
+
+struct abm *dmub_abm_create(
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask)
+{
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+
+ if (abm_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
+
+ return &abm_dce->base;
+}
+
+void dmub_abm_destroy(struct abm **abm)
+{
+ struct dce_abm *abm_dce = TO_DMUB_ABM(*abm);
+
+ kfree(abm_dce);
+ *abm = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h
index 26583f346c39..3a5d5ac7a86e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
+ * Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,17 +23,18 @@
*
*/
-#include "core_types.h"
-#include "logger.h"
-#include "include/logger_interface.h"
-#include "dm_helpers.h"
+#ifndef __DMUB_ABM_H__
+#define __DMUB_ABM_H__
-void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
-{
- int i;
+#include "abm.h"
+#include "dce_abm.h"
- if (hex_data)
- for (i = 0; i < hex_data_count; i++)
- DC_LOG_DEBUG("%2.2X ", hex_data[i]);
-}
+struct abm *dmub_abm_create(
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask);
+void dmub_abm_destroy(struct abm **abm);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index bc109d4fc6e6..044a0133ebb1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -26,13 +26,51 @@
#include "dmub_psr.h"
#include "dc.h"
#include "dc_dmub_srv.h"
-#include "../../dmub/inc/dmub_srv.h"
-#include "../../dmub/inc/dmub_gpint_cmd.h"
+#include "dmub/dmub_srv.h"
#include "core_types.h"
#define MAX_PIPES 6
/**
+ * Convert dmcub psr state to dmcu psr state.
+ */
+static void convert_psr_state(uint32_t *psr_state)
+{
+ if (*psr_state == 0)
+ *psr_state = 0;
+ else if (*psr_state == 0x10)
+ *psr_state = 1;
+ else if (*psr_state == 0x11)
+ *psr_state = 2;
+ else if (*psr_state == 0x20)
+ *psr_state = 3;
+ else if (*psr_state == 0x21)
+ *psr_state = 4;
+ else if (*psr_state == 0x30)
+ *psr_state = 5;
+ else if (*psr_state == 0x31)
+ *psr_state = 6;
+ else if (*psr_state == 0x40)
+ *psr_state = 7;
+ else if (*psr_state == 0x41)
+ *psr_state = 8;
+ else if (*psr_state == 0x42)
+ *psr_state = 9;
+ else if (*psr_state == 0x43)
+ *psr_state = 10;
+ else if (*psr_state == 0x44)
+ *psr_state = 11;
+ else if (*psr_state == 0x50)
+ *psr_state = 12;
+ else if (*psr_state == 0x51)
+ *psr_state = 13;
+ else if (*psr_state == 0x52)
+ *psr_state = 14;
+ else if (*psr_state == 0x53)
+ *psr_state = 15;
+}
+
+/**
* Get PSR state from firmware.
*/
static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
@@ -43,6 +81,8 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
dmub_srv_get_gpint_response(srv, psr_state);
+
+ convert_psr_state(psr_state);
}
/**
@@ -53,19 +93,23 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
+ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
+ return false;
+
cmd.psr_set_version.header.type = DMUB_CMD__PSR;
cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
-
- if (stream->psr_version == 0x0) // Unsupported
- return false;
- else if (stream->psr_version == 0x1)
+ switch (stream->link->psr_settings.psr_version) {
+ case DC_PSR_VERSION_1:
cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1;
- else if (stream->psr_version == 0x2)
- cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2;
-
- cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
+ break;
+ case DC_PSR_VERSION_UNSUPPORTED:
+ default:
+ cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED;
+ break;
+ }
+ cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
@@ -89,7 +133,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
cmd.psr_enable.header.payload_bytes = 0; // Send header only
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
@@ -113,7 +157,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_level.header);
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
@@ -162,7 +206,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
// Hw insts
- copy_settings_data->dpphy_inst = psr_context->phyType;
+ copy_settings_data->dpphy_inst = psr_context->transmitterId;
copy_settings_data->aux_inst = psr_context->channel;
copy_settings_data->digfe_inst = psr_context->engineId;
copy_settings_data->digbe_inst = psr_context->transmitterId;
@@ -187,8 +231,10 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
copy_settings_data->frame_delay = psr_context->frame_delay;
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
+ copy_settings_data->debug.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
+ true : false;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 8f78bf9abbca..a28c4ae0f259 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -46,6 +46,7 @@
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "dce100/dce100_hw_sequencer.h"
+#include "dce/dce_panel_cntl.h"
#include "reg_helper.h"
@@ -249,6 +250,18 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_100_REG_LIST(id),\
@@ -627,6 +640,23 @@ struct link_encoder *dce100_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
struct output_pixel_processor *dce100_opp_create(
struct dc_context *ctx,
uint32_t inst)
@@ -943,6 +973,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
static const struct resource_funcs dce100_res_pool_funcs = {
.destroy = dce100_destroy_resource_pool,
.link_enc_create = dce100_link_encoder_create,
+ .panel_cntl_create = dce100_panel_cntl_create,
.validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 10527593868c..b77e9dc16086 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -53,6 +53,7 @@
#include "abm.h"
#include "audio.h"
#include "reg_helper.h"
+#include "panel_cntl.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
@@ -697,31 +698,6 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
}
-/*todo: cloned in stream enc, fix*/
-bool dce110_is_panel_backlight_on(struct dc_link *link)
-{
- struct dc_context *ctx = link->ctx;
- struct dce_hwseq *hws = ctx->dc->hwseq;
- uint32_t value;
-
- REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
-
- return value;
-}
-
-bool dce110_is_panel_powered_on(struct dc_link *link)
-{
- struct dc_context *ctx = link->ctx;
- struct dce_hwseq *hws = ctx->dc->hwseq;
- uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
-
- REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
-
- REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
-
- return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
-}
-
static enum bp_result link_transmitter_control(
struct dc_bios *bios,
struct bp_transmitter_control *cntl)
@@ -810,7 +786,6 @@ void dce110_edp_power_control(
bool power_up)
{
struct dc_context *ctx = link->ctx;
- struct dce_hwseq *hwseq = ctx->dc->hwseq;
struct bp_transmitter_control cntl = { 0 };
enum bp_result bp_result;
@@ -821,7 +796,11 @@ void dce110_edp_power_control(
return;
}
- if (power_up != hwseq->funcs.is_panel_powered_on(link)) {
+ if (!link->panel_cntl)
+ return;
+
+ if (power_up !=
+ link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl)) {
/* Send VBIOS command to prompt eDP panel power */
if (power_up) {
unsigned long long current_ts = dm_get_timestamp(ctx);
@@ -892,7 +871,6 @@ void dce110_edp_backlight_control(
bool enable)
{
struct dc_context *ctx = link->ctx;
- struct dce_hwseq *hws = ctx->dc->hwseq;
struct bp_transmitter_control cntl = { 0 };
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
@@ -901,7 +879,8 @@ void dce110_edp_backlight_control(
return;
}
- if (enable && hws->funcs.is_panel_backlight_on(link)) {
+ if (enable && link->panel_cntl &&
+ link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl)) {
DC_LOG_HW_RESUME_S3(
"%s: panel already powered up. Do nothing.\n",
__func__);
@@ -1087,7 +1066,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
hws->funcs.edp_backlight_control(link, false);
- dc_link_set_abm_disable(link);
+ link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1432,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
- pipe_ctx->stream->link->psr_feature_enabled = false;
+ pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
return DC_OK;
}
@@ -1838,7 +1817,7 @@ static bool should_enable_fbc(struct dc *dc,
return false;
/* PSR should not be enabled */
- if (pipe_ctx->stream->link->psr_feature_enabled)
+ if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled)
return false;
/* Nothing to compress */
@@ -2376,6 +2355,7 @@ static void init_hw(struct dc *dc)
struct abm *abm;
struct dmcu *dmcu;
struct dce_hwseq *hws = dc->hwseq;
+ uint32_t backlight = MAX_BACKLIGHT_LEVEL;
bp = dc->ctx->dc_bios;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2422,12 +2402,17 @@ static void init_hw(struct dc *dc)
audio->funcs->hw_init(audio);
}
- abm = dc->res_pool->abm;
- if (abm != NULL) {
- abm->funcs->init_backlight(abm);
- abm->funcs->abm_init(abm);
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->panel_cntl)
+ backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
}
+ abm = dc->res_pool->abm;
+ if (abm != NULL)
+ abm->funcs->abm_init(abm, backlight);
+
dmcu = dc->res_pool->dmcu;
if (dmcu != NULL && abm != NULL)
abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
@@ -2735,6 +2720,53 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.xfm, attributes);
}
+bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc *dc = link->ctx->dc;
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ struct panel_cntl *panel_cntl = link->panel_cntl;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ bool fw_set_brightness = true;
+ /* DMCU -1 for all controller id values,
+ * therefore +1 here
+ */
+ uint32_t controller_id = pipe_ctx->stream_res.tg->inst + 1;
+
+ if (abm == NULL || panel_cntl == NULL || (abm->funcs->set_backlight_level_pwm == NULL))
+ return false;
+
+ if (dmcu)
+ fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+
+ if (!fw_set_brightness && panel_cntl->funcs->driver_set_backlight)
+ panel_cntl->funcs->driver_set_backlight(panel_cntl, backlight_pwm_u16_16);
+ else
+ abm->funcs->set_backlight_level_pwm(
+ abm,
+ backlight_pwm_u16_16,
+ frame_ramp,
+ controller_id,
+ link->panel_cntl->inst);
+
+ return true;
+}
+
+void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
+{
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+
+ if (abm)
+ abm->funcs->set_abm_immediate_disable(abm,
+ pipe_ctx->stream->link->panel_cntl->inst);
+
+ if (panel_cntl)
+ panel_cntl->funcs->store_backlight_level(panel_cntl);
+}
+
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
@@ -2769,7 +2801,9 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dce110_set_cursor_position,
- .set_cursor_attribute = dce110_set_cursor_attribute
+ .set_cursor_attribute = dce110_set_cursor_attribute,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dce110_private_funcs = {
@@ -2785,8 +2819,6 @@ static const struct hwseq_private_funcs dce110_private_funcs = {
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.edp_backlight_control = dce110_edp_backlight_control,
- .is_panel_backlight_on = dce110_is_panel_backlight_on,
- .is_panel_powered_on = dce110_is_panel_powered_on,
};
void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 34be166e8ff0..fe5326df00f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -85,9 +85,10 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_link *link,
bool power_up);
-bool dce110_is_panel_backlight_on(struct dc_link *link);
-
-bool dce110_is_panel_powered_on(struct dc_link *link);
+bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
index 4245e1f818a3..e096d2b95ef9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
@@ -679,8 +679,7 @@ void dce110_opp_v_set_csc_default(
if (default_adjust->force_hw_default == false) {
const struct out_csc_color_matrix *elm;
/* currently parameter not in use */
- enum grph_color_adjust_option option =
- GRPH_COLOR_MATRIX_HW_DEFAULT;
+ enum grph_color_adjust_option option;
uint32_t i;
/*
* HW default false we program locally defined matrix
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index bf14e9ab040c..9597fc79d7fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -53,6 +53,7 @@
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
#include "dce/dce_i2c.h"
+#include "dce/dce_panel_cntl.h"
#define DC_LOGGER \
dc->ctx->logger
@@ -275,6 +276,18 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
static const struct dce110_aux_registers_shift aux_shift = {
DCE_AUX_MASK_SH_LIST(__SHIFT)
};
@@ -673,6 +686,23 @@ static struct link_encoder *dce110_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce110_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
static struct output_pixel_processor *dce110_opp_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1203,6 +1233,7 @@ struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link(
static const struct resource_funcs dce110_res_pool_funcs = {
.destroy = dce110_destroy_resource_pool,
.link_enc_create = dce110_link_encoder_create,
+ .panel_cntl_create = dce110_panel_cntl_create,
.validate_bandwidth = dce110_validate_bandwidth,
.validate_plane = dce110_validate_plane,
.acquire_idle_pipe_for_layer = dce110_acquire_underlay,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 700ad8b3e54b..51b3fe502670 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -51,6 +51,7 @@
#include "dce/dce_dmcu.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dce/dce_panel_cntl.h"
#include "reg_helper.h"
@@ -238,6 +239,18 @@ static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
aux_regs(5)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define hpd_regs(id)\
[id] = {\
HPD_REG_LIST(id)\
@@ -398,7 +411,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
@@ -631,6 +644,23 @@ struct link_encoder *dce112_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce112_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
static struct input_pixel_processor *dce112_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
@@ -1021,6 +1051,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce112_res_pool_funcs = {
.destroy = dce112_destroy_resource_pool,
.link_enc_create = dce112_link_encoder_create,
+ .panel_cntl_create = dce112_panel_cntl_create,
.validate_bandwidth = dce112_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce112_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 53ab88ef71f5..8f362e8c1787 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -44,6 +44,7 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_ipp.h"
#include "dce/dce_mem_input.h"
+#include "dce/dce_panel_cntl.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dce120/dce120_hw_sequencer.h"
@@ -293,6 +294,18 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
static const struct dce110_aux_registers_shift aux_shift = {
DCE12_AUX_MASK_SH_LIST(__SHIFT)
};
@@ -503,7 +516,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
@@ -715,6 +728,23 @@ static struct link_encoder *dce120_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce120_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
static struct input_pixel_processor *dce120_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
@@ -880,6 +910,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce120_res_pool_funcs = {
.destroy = dce120_destroy_resource_pool,
.link_enc_create = dce120_link_encoder_create,
+ .panel_cntl_create = dce120_panel_cntl_create,
.validate_bandwidth = dce112_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce112_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index 893261c81854..d2ceebdbdf51 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -36,34 +36,6 @@
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
-struct dce80_hw_seq_reg_offsets {
- uint32_t crtc;
-};
-
-static const struct dce80_hw_seq_reg_offsets reg_offsets[] = {
-{
- .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-}
-};
-
-#define HW_REG_CRTC(reg, id)\
- (reg + reg_offsets[id].crtc)
-
/*******************************************************************************
* Private definitions
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 2ad5c28c6e66..a19be9de2df7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -50,6 +50,7 @@
#include "dce/dce_hwseq.h"
#include "dce80/dce80_hw_sequencer.h"
#include "dce100/dce100_resource.h"
+#include "dce/dce_panel_cntl.h"
#include "reg_helper.h"
@@ -266,6 +267,18 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_80_REG_LIST(id),\
@@ -728,6 +741,23 @@ struct link_encoder *dce80_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce80_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
struct clock_source *dce80_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -909,6 +939,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce80_res_pool_funcs = {
.destroy = dce80_destroy_resource_pool,
.link_enc_create = dce80_link_encoder_create,
+ .panel_cntl_create = dce80_panel_cntl_create,
.validate_bandwidth = dce80_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 0e682b5aa3eb..7f8456b9988b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -134,13 +134,6 @@ bool dpp1_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
- uint32_t pixel_width;
-
- if (scl_data->viewport.width > scl_data->recout.width)
- pixel_width = scl_data->recout.width;
- else
- pixel_width = scl_data->viewport.width;
-
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
if (scl_data->format == PIXEL_FORMAT_FP16 &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index deccab0228d2..75637c291e75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -93,7 +93,6 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
-
/*
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 31b64733d693..319366ebb44f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1139,6 +1139,8 @@ void hubp1_cursor_set_position(
int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
int x_hotspot = pos->x_hotspot;
int y_hotspot = pos->y_hotspot;
+ int cursor_height = (int)hubp->curs_attr.height;
+ int cursor_width = (int)hubp->curs_attr.width;
uint32_t dst_x_offset;
uint32_t cur_en = pos->enable ? 1 : 0;
@@ -1152,10 +1154,16 @@ void hubp1_cursor_set_position(
if (hubp->curs_attr.address.quad_part == 0)
return;
+ // Rotated cursor width/height and hotspots tweaks for offset calculation
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
- src_x_offset = pos->y - pos->y_hotspot - param->viewport.x;
- y_hotspot = pos->x_hotspot;
- x_hotspot = pos->y_hotspot;
+ swap(cursor_height, cursor_width);
+ if (param->rotation == ROTATION_ANGLE_90) {
+ src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
+ src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+ }
+ } else if (param->rotation == ROTATION_ANGLE_180) {
+ src_x_offset = pos->x - param->viewport.x;
+ src_y_offset = pos->y - param->viewport.y;
}
if (param->mirror) {
@@ -1177,13 +1185,13 @@ void hubp1_cursor_set_position(
if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
- if (src_x_offset + (int)hubp->curs_attr.width <= 0)
+ if (src_x_offset + cursor_width <= 0)
cur_en = 0; /* not visible beyond left edge*/
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
- if (src_y_offset + (int)hubp->curs_attr.height <= 0)
+ if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 416afb99529d..77f16921e7f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -737,7 +737,8 @@ void dcn10_bios_golden_init(struct dc *dc)
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
if (allow_self_fresh_force_enable == false &&
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
- dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, true);
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
@@ -826,6 +827,14 @@ enum dc_status dcn10_enable_stream_timing(
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ black_color.color_r_cr = black_color.color_g_y;
+
if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
pipe_ctx->stream_res.tg->funcs->set_blank_color(
pipe_ctx->stream_res.tg,
@@ -903,7 +912,7 @@ static void dcn10_reset_back_end_for_pipe(
if (pipe_ctx->top_pipe == NULL) {
if (pipe_ctx->stream_res.abm)
- pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
@@ -1238,12 +1247,13 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
void dcn10_init_hw(struct dc *dc)
{
- int i;
+ int i, j;
struct abm *abm = dc->res_pool->abm;
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
+ uint32_t backlight = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -1333,17 +1343,28 @@ void dcn10_init_hw(struct dc *dc)
continue;
/*
- * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(),
- * which needs to read dpcd info with the help of aconnector.
- * If aconnector (dc->links[i]->prev) is NULL, then dpcd status
- * cannot be read.
+ * If any of the displays are lit up turn them off.
+ * The reason is that some MST hubs cannot be turned off
+ * completely until we tell them to do so.
+ * If not turned off, then displays connected to MST hub
+ * won't light up.
*/
- if (dc->links[i]->priv) {
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
- dp_receiver_power_ctrl(dc->links[i], false);
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
+ /* blank dp stream before power off receiver*/
+ if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
+ unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
+
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+ dp_receiver_power_ctrl(dc->links[i], false);
}
}
}
@@ -1361,17 +1382,54 @@ void dcn10_init_hw(struct dc *dc)
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
+ /* In headless boot cases, DIG may be turned
+ * on which causes HW/SW discrepancies.
+ * To avoid this, power down hardware on boot
+ * if DIG is turned on and seamless boot not enabled
+ */
+ if (dc->config.power_down_display_on_boot) {
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (edp_link &&
+ edp_link->link_enc->funcs->is_dig_enabled &&
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+ dc->hwss.edp_backlight_control &&
+ dc->hwss.power_down &&
+ dc->hwss.edp_power_control) {
+ dc->hwss.edp_backlight_control(edp_link, false);
+ dc->hwss.power_down(dc);
+ dc->hwss.edp_power_control(edp_link, false);
+ } else {
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
+ dc->hwss.power_down) {
+ dc->hwss.power_down(dc);
+ break;
+ }
+
+ }
+ }
+ }
+
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
audio->funcs->hw_init(audio);
}
- if (abm != NULL) {
- abm->funcs->init_backlight(abm);
- abm->funcs->abm_init(abm);
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->panel_cntl)
+ backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
}
+ if (abm != NULL)
+ abm->funcs->abm_init(abm, backlight);
+
if (dmcu != NULL && !dmcu->auto_load_dmcu)
dmcu->funcs->dmcu_init(dmcu);
@@ -2164,25 +2222,25 @@ void dcn10_get_surface_visual_confirm_color(
switch (pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB8888:
- /* set boarder color to red */
+ /* set border color to red */
color->color_r_cr = color_value;
break;
case PIXEL_FORMAT_ARGB2101010:
- /* set boarder color to blue */
+ /* set border color to blue */
color->color_b_cb = color_value;
break;
case PIXEL_FORMAT_420BPP8:
- /* set boarder color to green */
+ /* set border color to green */
color->color_g_y = color_value;
break;
case PIXEL_FORMAT_420BPP10:
- /* set boarder color to yellow */
+ /* set border color to yellow */
color->color_g_y = color_value;
color->color_r_cr = color_value;
break;
case PIXEL_FORMAT_FP16:
- /* set boarder color to white */
+ /* set border color to white */
color->color_r_cr = color_value;
color->color_b_cb = color_value;
color->color_g_y = color_value;
@@ -2207,25 +2265,25 @@ void dcn10_get_hdr_visual_confirm_color(
switch (top_pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB2101010:
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
- /* HDR10, ARGB2101010 - set boarder color to red */
+ /* HDR10, ARGB2101010 - set border color to red */
color->color_r_cr = color_value;
} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
- /* FreeSync 2 ARGB2101010 - set boarder color to pink */
+ /* FreeSync 2 ARGB2101010 - set border color to pink */
color->color_r_cr = color_value;
color->color_b_cb = color_value;
}
break;
case PIXEL_FORMAT_FP16:
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
- /* HDR10, FP16 - set boarder color to blue */
+ /* HDR10, FP16 - set border color to blue */
color->color_b_cb = color_value;
} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
- /* FreeSync 2 HDR - set boarder color to green */
+ /* FreeSync 2 HDR - set border color to green */
color->color_g_y = color_value;
}
break;
default:
- /* SDR - set boarder color to Gray */
+ /* SDR - set border color to Gray */
color->color_r_cr = color_value/2;
color->color_b_cb = color_value/2;
color->color_g_y = color_value/2;
@@ -2274,6 +2332,14 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
&blnd_cfg.black_color);
}
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ blnd_cfg.black_color.color_r_cr = blnd_cfg.black_color.color_g_y;
+
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
else
@@ -2510,12 +2576,12 @@ void dcn10_blank_pixel_data(
if (stream_res->tg->funcs->set_blank)
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
if (stream_res->abm) {
- stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
+ stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
+ stream->link->panel_cntl->inst);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
} else if (blank) {
- if (stream_res->abm)
- stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (stream_res->tg->funcs->set_blank)
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 9e8e32629e47..7cb8c3fb2665 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -73,6 +73,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.get_clock = dcn10_get_clock,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {
@@ -89,8 +91,6 @@ static const struct hwseq_private_funcs dcn10_private_funcs = {
.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
- .is_panel_backlight_on = dce110_is_panel_backlight_on,
- .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index d3617d6785a7..7fd385be3f3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -90,7 +90,8 @@ static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
.is_dig_enabled = dcn10_is_dig_enabled,
.get_dig_frontend = dcn10_get_dig_frontend,
.get_dig_mode = dcn10_get_dig_mode,
- .destroy = dcn10_link_encoder_destroy
+ .destroy = dcn10_link_encoder_destroy,
+ .get_max_link_cap = dcn10_link_encoder_get_max_link_cap,
};
static enum bp_result link_transmitter_control(
@@ -1370,7 +1371,6 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
DC_HPD_EN, 0);
}
-
#define AUX_REG(reg)\
(enc10->aux_regs->reg)
@@ -1425,3 +1425,19 @@ enum signal_type dcn10_get_dig_mode(
return SIGNAL_TYPE_NONE;
}
+void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ /* Set Default link settings */
+ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+
+ /* Higher link settings based on feature supported */
+ if (enc->features.flags.bits.IS_HBR2_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+ if (enc->features.flags.bits.IS_HBR3_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ *link_settings = max_link_cap;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 762109174fb8..68395bcc24fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -575,4 +575,7 @@ void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
enum signal_type dcn10_get_dig_mode(
struct link_encoder *enc);
+
+void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
#endif /* __DC_LINK_ENCODER__DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 17d96ec6acd8..ec0ab42becba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -299,6 +299,7 @@ void optc1_set_vtg_params(struct timing_generator *optc,
uint32_t asic_blank_end;
uint32_t v_init;
uint32_t v_fp2 = 0;
+ int32_t vertical_line_start;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -315,8 +316,9 @@ void optc1_set_vtg_params(struct timing_generator *optc,
patched_crtc_timing.v_border_top;
/* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
- if (optc1->vstartup_start > asic_blank_end)
- v_fp2 = optc1->vstartup_start - asic_blank_end;
+ vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
+ if (vertical_line_start < 0)
+ v_fp2 = -vertical_line_start;
/* Interlace */
if (REG(OTG_INTERLACE_CONTROL)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 9a459a8fe8a0..8d1e52fb0393 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -158,6 +158,7 @@ struct dcn_optc_registers {
uint32_t OTG_GSL_WINDOW_Y;
uint32_t OTG_VUPDATE_KEEPOUT;
uint32_t OTG_CRC_CNTL;
+ uint32_t OTG_CRC_CNTL2;
uint32_t OTG_CRC0_DATA_RG;
uint32_t OTG_CRC0_DATA_B;
uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
@@ -475,7 +476,11 @@ struct dcn_optc_registers {
type OPTC_DSC_SLICE_WIDTH;\
type OPTC_SEGMENT_WIDTH;\
type OPTC_DWB0_SOURCE_SELECT;\
- type OPTC_DWB1_SOURCE_SELECT;
+ type OPTC_DWB1_SOURCE_SELECT;\
+ type OTG_CRC_DSC_MODE;\
+ type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
+ type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
+ type OTG_CRC_DATA_FORMAT;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index ba849aa31e6e..17d5cb422025 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -51,6 +51,7 @@
#include "dce112/dce112_resource.h"
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
+#include "dce/dce_panel_cntl.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
@@ -329,6 +330,18 @@ static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
static const struct dce110_aux_registers_shift aux_shift = {
DCN10_AUX_MASK_SH_LIST(__SHIFT)
};
@@ -817,6 +830,23 @@ struct link_encoder *dcn10_link_encoder_create(
return &enc10->base;
}
+static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
struct clock_source *dcn10_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -1091,24 +1121,6 @@ static enum dc_status build_mapped_resource(
{
struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
- /*TODO Seems unneeded anymore */
- /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
- if (stream != NULL && old_context->streams[i] != NULL) {
- todo: shouldn't have to copy missing parameter here
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- stream->clamping.pixel_encoding =
- stream->timing.pixel_encoding;
-
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- build_clamping_params(stream);
-
- continue;
- }
- }
- */
-
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1301,6 +1313,7 @@ static const struct dc_cap_funcs cap_funcs = {
static const struct resource_funcs dcn10_res_pool_funcs = {
.destroy = dcn10_destroy_resource_pool,
.link_enc_create = dcn10_link_encoder_create,
+ .panel_cntl_create = dcn10_panel_cntl_create,
.validate_bandwidth = dcn_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
.validate_plane = dcn10_validate_plane,
@@ -1363,6 +1376,40 @@ static bool dcn10_resource_construct(
/* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
dc->caps.force_dp_tps4_for_cp2520 = true;
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 1;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 0;
+ dc->caps.color.dpp.ogam_ram = 1; // RGAM on DCN1
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 1;
+
+ /* no post-blend color operations */
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 0;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 0;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 7eba9333c328..07b2f9399671 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -1274,7 +1274,6 @@ static void enc1_se_audio_setup(
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t speakers = 0;
uint32_t channels = 0;
ASSERT(audio_info);
@@ -1282,7 +1281,6 @@ static void enc1_se_audio_setup(
/* This should not happen.it does so we don't get BSOD*/
return;
- speakers = audio_info->flags.info.ALLSPEAKERS;
channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
/* setup the audio stream source select (audio -> dig mapping) */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index 501532dd523a..c478213ba7ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -80,6 +80,7 @@ struct dcn20_hubbub {
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
struct dcn_watermark_set watermarks;
+ int num_vmid;
struct dcn20_vmid vmid[16];
unsigned int detile_buf_size;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index a023a4d59f41..da5333d165ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -961,8 +961,7 @@ void dcn20_blank_pixel_data(
width = width / odm_cnt;
if (blank) {
- if (stream_res->abm)
- stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
@@ -997,7 +996,8 @@ void dcn20_blank_pixel_data(
if (!blank)
if (stream_res->abm) {
- stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
+ stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
+ stream->link->panel_cntl->inst);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
}
@@ -1478,8 +1478,11 @@ static void dcn20_program_pipe(
if (pipe_ctx->update_flags.bits.odm)
hws->funcs.update_odm(dc, context, pipe_ctx);
- if (pipe_ctx->update_flags.bits.enable)
+ if (pipe_ctx->update_flags.bits.enable) {
dcn20_enable_plane(dc, pipe_ctx, context);
+ if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
+ dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
+ }
if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
@@ -2037,8 +2040,7 @@ static void dcn20_reset_back_end_for_pipe(
*/
if (pipe_ctx->top_pipe == NULL) {
- if (pipe_ctx->stream_res.abm)
- pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
@@ -2171,6 +2173,13 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
*/
mpcc_id = hubp->inst;
+ /* If there is no full update, don't need to touch MPC tree*/
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
+ !pipe_ctx->update_flags.bits.mpcc) {
+ mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
+ return;
+ }
+
/* check if this MPCC is already being used */
new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
/* remove MPCC if being used */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 8334bbd6eabb..2fbde4241559 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -84,6 +84,8 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
@@ -99,8 +101,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
- .is_panel_backlight_on = dce110_is_panel_backlight_on,
- .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index e4ac73035c84..8d209dae66e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -49,6 +49,12 @@
#define IND_REG(index) \
(enc10->link_regs->index)
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
static struct mpll_cfg dcn2_mpll_cfg[] = {
// RBR
@@ -260,6 +266,38 @@ void dcn20_link_encoder_enable_dp_output(
}
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t is_in_usb_c_dp4_mode = 0;
+
+ dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+ /* in usb c dp2 mode, max lane count is 2 */
+ if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+ if (!is_in_usb_c_dp4_mode)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+ }
+
+}
+
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t dp_alt_mode_disable = 0;
+ bool is_usb_c_alt_mode = false;
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ /* if value == 1 alt mode is disabled, otherwise it is enabled */
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+ }
+
+ return is_usb_c_alt_mode;
+}
+
#define AUX_REG(reg)\
(enc10->aux_regs->reg)
@@ -338,6 +376,8 @@ static const struct link_encoder_funcs dcn20_link_enc_funcs = {
.fec_is_active = enc2_fec_is_active,
.get_dig_mode = dcn10_get_dig_mode,
.get_dig_frontend = dcn10_get_dig_frontend,
+ .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
};
void dcn20_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 8cab8107fd94..284a1ee4d249 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -343,6 +343,10 @@ void dcn20_link_encoder_enable_dp_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc);
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
void dcn20_link_encoder_construct(
struct dcn20_link_encoder *enc20,
const struct encoder_init_data *init_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 570dfd9a243f..99cc095dc33c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -452,7 +452,7 @@ void mpc2_set_output_gamma(
next_mode = LUT_RAM_A;
mpc20_power_on_ogam_lut(mpc, mpcc_id, true);
- mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A ? true:false);
+ mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A);
if (next_mode == LUT_RAM_A)
mpc2_program_luta(mpc, mpcc_id, params);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index d875b0c38fde..8c16967fe018 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -409,6 +409,18 @@ void optc2_program_manual_trigger(struct timing_generator *optc)
OTG_TRIGA_MANUAL_TRIG, 1);
}
+bool optc2_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_2(OTG_CRC_CNTL2, 0,
+ OTG_CRC_DSC_MODE, params->dsc_mode,
+ OTG_CRC_DATA_STREAM_COMBINE_MODE, params->odm_mode);
+
+ return optc1_configure_crc(optc, params);
+}
+
static struct timing_generator_funcs dcn20_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -452,7 +464,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.clear_optc_underflow = optc1_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
- .configure_crc = optc1_configure_crc,
+ .configure_crc = optc2_configure_crc,
.set_dsc_config = optc2_set_dsc_config,
.set_dwb_source = optc2_set_dwb_source,
.set_odm_bypass = optc2_set_odm_bypass,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index 239cc40ae474..e0a0a8a8e2c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -36,6 +36,7 @@
SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
SRI(OTG_DSC_START_POSITION, OTG, inst),\
+ SRI(OTG_CRC_CNTL2, OTG, inst),\
SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
@@ -62,6 +63,10 @@
SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\
@@ -109,4 +114,6 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc);
void optc2_setup_manual_trigger(struct timing_generator *optc);
void optc2_program_manual_trigger(struct timing_generator *optc);
bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+bool optc2_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params);
#endif /* __DC_OPTC_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index e4348e3b6389..cef1aa938ab5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -61,6 +61,7 @@
#include "dcn20_dccg.h"
#include "dcn20_vmid.h"
#include "dc_link_ddc.h"
+#include "dce/dce_panel_cntl.h"
#include "navi10_ip_offset.h"
@@ -691,6 +692,18 @@ static const struct dcn10_link_enc_mask le_mask = {
DPCS_DCN2_MASK_SH_LIST(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_REG_LIST_DCN20(id),\
@@ -1293,6 +1306,23 @@ struct link_encoder *dcn20_link_encoder_create(
return &enc20->enc10.base;
}
+static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
struct clock_source *dcn20_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -1623,24 +1653,6 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
enum dc_status status = DC_OK;
struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
- /*TODO Seems unneeded anymore */
- /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
- if (stream != NULL && old_context->streams[i] != NULL) {
- todo: shouldn't have to copy missing parameter here
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- stream->clamping.pixel_encoding =
- stream->timing.pixel_encoding;
-
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- build_clamping_params(stream);
-
- continue;
- }
- }
- */
-
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1651,22 +1663,32 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
}
-static void acquire_dsc(struct resource_context *res_ctx,
- const struct resource_pool *pool,
+void dcn20_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
struct display_stream_compressor **dsc,
int pipe_idx)
{
int i;
+ const struct resource_pool *pool = dc->res_pool;
+ struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
- ASSERT(*dsc == NULL);
+ ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
*dsc = NULL;
+ /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
*dsc = pool->dscs[pipe_idx];
res_ctx->is_dsc_acquired[pipe_idx] = true;
return;
}
+ /* Return old DSC to avoid the need for re-programming */
+ if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
+ *dsc = dsc_old;
+ res_ctx->is_dsc_acquired[dsc_old->inst] = true;
+ return ;
+ }
+
/* Find first free DSC */
for (i = 0; i < pool->res_cap->num_dsc; i++)
if (!res_ctx->is_dsc_acquired[i]) {
@@ -1698,7 +1720,6 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
{
enum dc_status result = DC_OK;
int i;
- const struct resource_pool *pool = dc->res_pool;
/* Get a DSC if required and available */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1710,7 +1731,7 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
if (pipe_ctx->stream_res.dsc)
continue;
- acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
+ dcn20_acquire_dsc(dc, &dc_ctx->res_ctx, &pipe_ctx->stream_res.dsc, i);
/* The number of DSCs can be less than the number of pipes */
if (!pipe_ctx->stream_res.dsc) {
@@ -1838,12 +1859,13 @@ static void swizzle_to_dml_params(
}
bool dcn20_split_stream_for_odm(
+ const struct dc *dc,
struct resource_context *res_ctx,
- const struct resource_pool *pool,
struct pipe_ctx *prev_odm_pipe,
struct pipe_ctx *next_odm_pipe)
{
int pipe_idx = next_odm_pipe->pipe_idx;
+ const struct resource_pool *pool = dc->res_pool;
*next_odm_pipe = *prev_odm_pipe;
@@ -1901,7 +1923,7 @@ bool dcn20_split_stream_for_odm(
}
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
if (next_odm_pipe->stream->timing.flags.DSC == 1) {
- acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
+ dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
ASSERT(next_odm_pipe->stream_res.dsc);
if (next_odm_pipe->stream_res.dsc == NULL)
return false;
@@ -1939,8 +1961,6 @@ void dcn20_split_stream_for_mpc(
secondary_pipe->top_pipe = primary_pipe;
ASSERT(primary_pipe->plane_state);
- resource_build_scaling_params(primary_pipe);
- resource_build_scaling_params(secondary_pipe);
}
void dcn20_populate_dml_writeback_from_context(
@@ -2216,12 +2236,12 @@ int dcn20_populate_dml_pipes_from_context(
|| pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
- pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
- pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
- pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
- pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
- pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
- pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
+ pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport_unadjusted.y;
+ pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c_unadjusted.y;
+ pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport_unadjusted.width;
+ pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c_unadjusted.width;
+ pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport_unadjusted.height;
+ pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c_unadjusted.height;
pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
@@ -2570,13 +2590,15 @@ int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split,
+ int *split,
bool *merge)
{
int i, pipe_idx, vlevel_split;
int plane_count = 0;
bool force_split = false;
bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
+ struct vba_vars_st *v = &context->bw_ctx.dml.vba;
+ int max_mpc_comb = v->maxMpcComb;
if (context->stream_count > 1) {
if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
@@ -2584,10 +2606,22 @@ int dcn20_validate_apply_pipe_split_flags(
} else if (dc->debug.force_single_disp_pipe_split)
force_split = true;
- /* TODO: fix dc bugs and remove this split threshold thing */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ /**
+ * Workaround for avoiding pipe-split in cases where we'd split
+ * planes that are too small, resulting in splits that aren't
+ * valid for the scaler.
+ */
+ if (pipe->plane_state &&
+ (pipe->plane_state->dst_rect.width <= 16 ||
+ pipe->plane_state->dst_rect.height <= 16 ||
+ pipe->plane_state->src_rect.width <= 16 ||
+ pipe->plane_state->src_rect.height <= 16))
+ avoid_split = true;
+
+ /* TODO: fix dc bugs and remove this split threshold thing */
if (pipe->stream && !pipe->prev_odm_pipe &&
(!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
++plane_count;
@@ -2602,26 +2636,35 @@ int dcn20_validate_apply_pipe_split_flags(
continue;
for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
- if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1)
+ if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
+ v->ModeSupport[vlevel][0])
break;
/* Impossible to not split this pipe */
if (vlevel > context->bw_ctx.dml.soc.num_states)
vlevel = vlevel_split;
+ else
+ max_mpc_comb = 0;
pipe_idx++;
}
- context->bw_ctx.dml.vba.maxMpcComb = 0;
+ v->maxMpcComb = max_mpc_comb;
}
/* Split loop sets which pipe should be split based on dml outputs and dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx];
+ int pipe_plane = v->pipe_plane[pipe_idx];
+ bool split4mpc = context->stream_count == 1 && plane_count == 1
+ && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1)
- split[i] = true;
+ if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] > 1) {
+ if (split4mpc)
+ split[i] = 4;
+ else
+ split[i] = 2;
+ }
if ((pipe->stream->view_format ==
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
pipe->stream->view_format ==
@@ -2630,50 +2673,75 @@ int dcn20_validate_apply_pipe_split_flags(
TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
pipe->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_SIDE_BY_SIDE))
- split[i] = true;
+ split[i] = 2;
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
- split[i] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
+ split[i] = 2;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
}
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane];
-
- if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
- /*Already split odm pipe tree, don't try to split again*/
- split[i] = false;
- split[pipe->prev_odm_pipe->pipe_idx] = false;
- } else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
- && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
- /*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
- split[i] = false;
- split[pipe->top_pipe->pipe_idx] = false;
- } else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) {
- if (split[i] == false) {
- /*Exiting mpc/odm combine*/
- merge[i] = true;
+ v->ODMCombineEnabled[pipe_plane] =
+ v->ODMCombineEnablePerState[vlevel][pipe_plane];
+
+ if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+ if (get_num_mpc_splits(pipe) == 1) {
+ /*If need split for mpc but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 MPC */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 MPC */
+ else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 2 -> 1 MPC */
+ } else if (get_num_mpc_splits(pipe) == 3) {
+ /*If need split for mpc but 4 way split already*/
+ if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
+ || !pipe->bottom_pipe)) {
+ merge[i] = true; /* 4 -> 2 MPC */
+ } else if (split[i] == 0 && pipe->top_pipe &&
+ pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 4 -> 1 MPC */
+ split[i] = 0;
+ } else if (get_num_odm_splits(pipe)) {
+ /* ODM -> MPC transition */
+ ASSERT(0); /* NOT expected yet */
if (pipe->prev_odm_pipe) {
- ASSERT(0); /*should not actually happen yet*/
- merge[pipe->prev_odm_pipe->pipe_idx] = true;
- } else
- merge[pipe->top_pipe->pipe_idx] = true;
- } else {
- /*Transition from mpc combine to odm combine or vice versa*/
- ASSERT(0); /*should not actually happen yet*/
- split[i] = true;
- merge[i] = true;
- if (pipe->prev_odm_pipe) {
- split[pipe->prev_odm_pipe->pipe_idx] = true;
- merge[pipe->prev_odm_pipe->pipe_idx] = true;
- } else {
- split[pipe->top_pipe->pipe_idx] = true;
- merge[pipe->top_pipe->pipe_idx] = true;
+ split[i] = 0;
+ merge[i] = true;
+ }
+ }
+ } else {
+ if (get_num_odm_splits(pipe) == 1) {
+ /*If need split for odm but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 ODM */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 ODM */
+ else if (pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ } else if (get_num_odm_splits(pipe) == 3) {
+ /*If need split for odm but 4 way split already*/
+ if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
+ || !pipe->next_odm_pipe)) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* 4 -> 2 ODM */
+ } else if (split[i] == 0 && pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ split[i] = 0;
+ } else if (get_num_mpc_splits(pipe)) {
+ /* MPC -> ODM transition */
+ ASSERT(0); /* NOT expected yet */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+ split[i] = 0;
+ merge[i] = true;
}
}
}
/* Adjust dppclk when split is forced, do not bother with dispclk */
- if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
- context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
+ if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1)
+ v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
pipe_idx++;
}
@@ -2689,7 +2757,7 @@ bool dcn20_fast_validate_bw(
int *vlevel_out)
{
bool out = false;
- bool split[MAX_PIPES] = { false };
+ int split[MAX_PIPES] = { 0 };
int pipe_cnt, i, pipe_idx, vlevel;
ASSERT(pipes);
@@ -2731,7 +2799,7 @@ bool dcn20_fast_validate_bw(
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
if (!dcn20_split_stream_for_odm(
- &context->res_ctx, dc->res_pool,
+ dc, &context->res_ctx,
pipe, hsplit_pipe))
goto validate_fail;
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
@@ -2749,7 +2817,7 @@ bool dcn20_fast_validate_bw(
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
goto validate_fail;
- if (split[i]) {
+ if (split[i] == 2) {
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
/* pipe not split previously needs split */
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
@@ -2760,14 +2828,17 @@ bool dcn20_fast_validate_bw(
}
if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
if (!dcn20_split_stream_for_odm(
- &context->res_ctx, dc->res_pool,
+ dc, &context->res_ctx,
pipe, hsplit_pipe))
goto validate_fail;
dcn20_build_mapped_resource(dc, context, pipe->stream);
- } else
+ } else {
dcn20_split_stream_for_mpc(
- &context->res_ctx, dc->res_pool,
- pipe, hsplit_pipe);
+ &context->res_ctx, dc->res_pool,
+ pipe, hsplit_pipe);
+ if (!resource_build_scaling_params(pipe) || !resource_build_scaling_params(hsplit_pipe))
+ goto validate_fail;
+ }
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
}
} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
@@ -3007,7 +3078,7 @@ void dcn20_calculate_dlg_params(
pipe_idx,
cstate_en,
context->bw_ctx.bw.dcn.clk.p_state_change_support,
- false, false, false);
+ false, false, true);
context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
&context->res_ctx.pipe_ctx[i].rq_regs,
@@ -3091,6 +3162,8 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
dc->debug.disable_dram_clock_change_vactive_support;
+ context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
+ dc->debug.enable_dram_clock_change_one_display_vactive;
if (fast_validate) {
return dcn20_validate_bandwidth_internal(dc, context, true);
@@ -3189,8 +3262,6 @@ static struct dc_cap_funcs cap_funcs = {
enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
- enum dc_status result = DC_OK;
-
enum surface_pixel_format surf_pix_format = plane_state->format;
unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
@@ -3202,12 +3273,13 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat
swizzle = DC_SW_64KB_S;
plane_state->tiling_info.gfx9.swizzle = swizzle;
- return result;
+ return DC_OK;
}
static struct resource_funcs dcn20_res_pool_funcs = {
.destroy = dcn20_destroy_resource_pool,
.link_enc_create = dcn20_link_encoder_create,
+ .panel_cntl_create = dcn20_panel_cntl_create,
.validate_bandwidth = dcn20_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
@@ -3446,6 +3518,13 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
bb->dram_clock_change_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+
+ if ((int)(bb->dummy_pstate_latency_us * 1000)
+ != dc->bb_overrides.dummy_clock_change_latency_ns
+ && dc->bb_overrides.dummy_clock_change_latency_ns) {
+ bb->dummy_pstate_latency_us =
+ dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
+ }
}
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
@@ -3681,9 +3760,42 @@ static bool dcn20_resource_construct(
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
- dc->caps.hw_3d_lut = true;
dc->caps.extended_aux_timeout_support = true;
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 1;
+ // no OGAM ROM on DCN2, only MPC ROM
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
dc->debug = debug_defaults_drv;
} else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index 9d5bff9455fd..2c1959845c29 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -123,7 +123,7 @@ int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split,
+ int *split,
bool *merge);
void dcn20_release_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -135,10 +135,14 @@ void dcn20_split_stream_for_mpc(
struct pipe_ctx *primary_pipe,
struct pipe_ctx *secondary_pipe);
bool dcn20_split_stream_for_odm(
+ const struct dc *dc,
struct resource_context *res_ctx,
- const struct resource_pool *pool,
struct pipe_ctx *prev_odm_pipe,
struct pipe_ctx *next_odm_pipe);
+void dcn20_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct display_stream_compressor **dsc,
+ int pipe_idx);
struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
struct resource_context *res_ctx,
const struct resource_pool *pool,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index 5e2d14b897af..129f0b62f751 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -49,11 +49,6 @@
#define FN(reg_name, field_name) \
hubbub1->shifts->field_name, hubbub1->masks->field_name
-#ifdef NUM_VMID
-#undef NUM_VMID
-#endif
-#define NUM_VMID 16
-
static uint32_t convert_and_clamp(
uint32_t wm_ns,
uint32_t refclk_mhz,
@@ -138,7 +133,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
dcn21_dchvm_init(hubbub);
- return NUM_VMID;
+ return hubbub1->num_vmid;
}
bool hubbub21_program_urgent_watermarks(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index d285ba622d61..960a0716dde5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -778,21 +778,28 @@ void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_
{
struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa = { 0 };
-
- PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
- PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
- PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
- PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
- PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
- PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
- PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
- PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
- PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
- PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
+ cmd.PLAT_54186_wa.header.payload_bytes = sizeof(cmd.PLAT_54186_wa.flip);
+ cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS =
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
+ cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ cmd.PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
+ cmd.PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
+ cmd.PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
+ cmd.PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
+ cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
PERF_TRACE(); // TODO: remove after performance is stable.
- dc_dmub_srv_cmd_queue(dmcub, &PLAT_54186_wa.header);
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
PERF_TRACE(); // TODO: remove after performance is stable.
dc_dmub_srv_cmd_execute(dmcub);
PERF_TRACE(); // TODO: remove after performance is stable.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 4dd634118df2..a5baef7e7a7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -87,11 +87,9 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .set_cursor_position = dcn10_set_cursor_position,
- .set_cursor_attribute = dcn10_set_cursor_attribute,
- .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
- .optimize_pwr_state = dcn21_optimize_pwr_state,
- .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+ .power_down = dce110_power_down,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
@@ -107,8 +105,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
- .is_panel_backlight_on = dce110_is_panel_backlight_on,
- .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
index e45683ac871a..aa46c35b05a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -203,29 +203,6 @@ static bool update_cfg_data(
return true;
}
-void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc,
- struct dc_link_settings *link_settings)
-{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t value;
-
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value);
-
- if (!value && link_settings->lane_count > LANE_COUNT_TWO)
- link_settings->lane_count = LANE_COUNT_TWO;
-}
-
-bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc)
-{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t value;
-
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value);
-
- // if value == 1 alt mode is disabled, otherwise it is enabled
- return !value;
-}
-
bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
@@ -348,8 +325,8 @@ static const struct link_encoder_funcs dcn21_link_enc_funcs = {
.fec_set_ready = enc2_fec_set_ready,
.fec_is_active = enc2_fec_is_active,
.get_dig_frontend = dcn10_get_dig_frontend,
- .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode,
- .get_max_link_cap = dcn21_link_encoder_get_max_link_cap,
+ .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
};
void dcn21_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index a721bb401ef0..f00a56835084 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -61,6 +61,7 @@
#include "dcn21_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "dce110/dce110_resource.h"
+#include "dce/dce_panel_cntl.h"
#include "dcn20/dcn20_dwb.h"
#include "dcn20/dcn20_mmhubbub.h"
@@ -85,6 +86,7 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
#include "dce/dmub_psr.h"
+#include "dce/dmub_abm.h"
#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
@@ -803,7 +805,7 @@ static const struct resource_caps res_cap_rn = {
.num_pll = 5, // maybe 3 because the last two used for USB-c
.num_dwb = 1,
.num_ddc = 5,
- .num_vmid = 1,
+ .num_vmid = 16,
.num_dsc = 3,
};
@@ -995,9 +997,12 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
pool->base.dp_clock_source = NULL;
}
-
- if (pool->base.abm != NULL)
- dce_abm_destroy(&pool->base.abm);
+ if (pool->base.abm != NULL) {
+ if (pool->base.abm->ctx->dc->config.disable_dmcu)
+ dmub_abm_destroy(&pool->base.abm);
+ else
+ dce_abm_destroy(&pool->base.abm);
+ }
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
@@ -1290,6 +1295,7 @@ static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx)
vmid->shifts = &vmid_shifts;
vmid->masks = &vmid_masks;
}
+ hubbub->num_vmid = res_cap_rn.num_vmid;
return &hubbub->base;
}
@@ -1379,7 +1385,8 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- unsigned int i, j, closest_clk_lvl;
+ unsigned int i, closest_clk_lvl;
+ int j;
// Default clock levels are used for diags, which may lead to overclocking.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
@@ -1591,6 +1598,18 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = {
link_regs(4, E),
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define aux_regs(id)\
[id] = {\
DCN2_AUX_REG_LIST(id)\
@@ -1676,6 +1695,24 @@ static struct link_encoder *dcn21_link_encoder_create(
return &enc21->enc10.base;
}
+
+static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
#define CTX ctx
#define REG(reg_name) \
@@ -1694,12 +1731,8 @@ static int dcn21_populate_dml_pipes_from_context(
{
uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes);
int i;
- struct resource_context *res_ctx = &context->res_ctx;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
+ for (i = 0; i < pipe_cnt; i++) {
pipes[i].pipe.src.hostvm = 1;
pipes[i].pipe.src.gpuvm = 1;
@@ -1724,6 +1757,7 @@ enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_stat
static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
.link_enc_create = dcn21_link_encoder_create,
+ .panel_cntl_create = dcn21_panel_cntl_create,
.validate_bandwidth = dcn21_validate_bandwidth,
.populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
@@ -1770,7 +1804,6 @@ static bool dcn21_resource_construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 256;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.hw_3d_lut = true;
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
@@ -1779,6 +1812,40 @@ static bool dcn21_resource_construct(
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 1;
+ // no OGAM ROM on DCN2
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
@@ -1831,17 +1898,19 @@ static bool dcn21_resource_construct(
goto create_fail;
}
- pool->base.dmcu = dcn21_dmcu_create(ctx,
- &dmcu_regs,
- &dmcu_shift,
- &dmcu_mask);
- if (pool->base.dmcu == NULL) {
- dm_error("DC: failed to create dmcu!\n");
- BREAK_TO_DEBUGGER();
- goto create_fail;
+ if (!dc->config.disable_dmcu) {
+ pool->base.dmcu = dcn21_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
}
- if (dc->debug.disable_dmcu) {
+ if (dc->config.disable_dmcu) {
pool->base.psr = dmub_psr_create(ctx);
if (pool->base.psr == NULL) {
@@ -1851,15 +1920,16 @@ static bool dcn21_resource_construct(
}
}
- pool->base.abm = dce_abm_create(ctx,
+ if (dc->config.disable_dmcu)
+ pool->base.abm = dmub_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ else
+ pool->base.abm = dce_abm_create(ctx,
&abm_regs,
&abm_shift,
&abm_mask);
- if (pool->base.abm == NULL) {
- dm_error("DC: failed to create abm!\n");
- BREAK_TO_DEBUGGER();
- goto create_fail;
- }
pool->base.pp_smu = dcn21_pp_smu_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 5bbbafacc720..80170f9721ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2599,21 +2599,44 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
}
}
+ {
+ float SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
+ int PlaneWithMinActiveDRAMClockChangeMargin = -1;
+
mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
< mode_lib->vba.MinActiveDRAMClockChangeMargin) {
mode_lib->vba.MinActiveDRAMClockChangeMargin =
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ PlaneWithMinActiveDRAMClockChangeMargin = k;
+ } else {
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j) {
+ PlaneWithMinActiveDRAMClockChangeMargin = j;
+ }
+ }
+ }
}
}
mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (mode_lib->vba.BlendingAndTiming[k] == k))
+ && !(mode_lib->vba.BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
+ < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
+ SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank =
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ }
+ }
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+ mode_lib->vba.DRAMClockChangeWatermark += 25;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
@@ -2622,13 +2645,17 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.MinTTUVBlank[k] += 25;
}
}
- mode_lib->vba.DRAMClockChangeWatermark += 25;
+
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else if (mode_lib->vba.DummyPStateCheck &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
- if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+ if ((mode_lib->vba.SynchronizedVBlank
+ || mode_lib->vba.NumberOfActivePlanes == 1
+ || (SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0 &&
+ mode_lib->vba.AllowDramClockChangeOneDisplayVactive))
+ && mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
@@ -2640,6 +2667,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_unsupported;
}
}
+ }
for (k = 0; k <= mode_lib->vba.soc.num_states; k++)
for (j = 0; j < 2; j++)
mode_lib->vba.DRAMClockChangeSupport[k][j] = mode_lib->vba.DRAMClockChangeSupport[0][0];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index e6617c958bb8..a576eed94d9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -3190,6 +3190,7 @@ static void CalculateFlipSchedule(
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
double HostVMInefficiencyFactor;
+ double VRatioClamped;
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMInefficiencyFactor =
@@ -3222,31 +3223,32 @@ static void CalculateFlipSchedule(
*DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
*final_flip_bw = dml_max(PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), (MetaRowBytes + DPTEBytesPerRow) * HostVMInefficiencyFactor / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
+ VRatioClamped = (VRatio < 1.0) ? 1.0 : VRatio;
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dml_min(
- dpte_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / (VRatio / 2));
+ dpte_row_height * LineTime / VRatioClamped,
+ dpte_row_height_chroma * LineTime / (VRatioClamped / 2));
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = dml_min(
- meta_row_height * LineTime / VRatio,
- meta_row_height_chroma * LineTime / (VRatio / 2));
+ meta_row_height * LineTime / VRatioClamped,
+ meta_row_height_chroma * LineTime / (VRatioClamped / 2));
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / (VRatio / 2),
- meta_row_height_chroma * LineTime / (VRatio / 2));
+ dpte_row_height * LineTime / VRatioClamped,
+ meta_row_height * LineTime / VRatioClamped,
+ dpte_row_height_chroma * LineTime / (VRatioClamped / 2),
+ meta_row_height_chroma * LineTime / (VRatioClamped / 2));
}
} else {
if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
+ min_row_time = dpte_row_height * LineTime / VRatioClamped;
} else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ min_row_time = meta_row_height * LineTime / VRatioClamped;
} else {
min_row_time = dml_min(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio);
+ dpte_row_height * LineTime / VRatioClamped,
+ meta_row_height * LineTime / VRatioClamped);
}
}
@@ -5944,7 +5946,7 @@ static void CalculateMetaAndPTETimes(
* PixelPTEReqHeightY[k];
}
dpte_groups_per_row_luma_ub = dml_ceil(
- dpte_row_width_luma_ub[k] / dpte_group_width_luma,
+ (float) dpte_row_width_luma_ub[k] / dpte_group_width_luma,
1);
time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] * HTotal[k]
/ PixelClock[k] / dpte_groups_per_row_luma_ub;
@@ -5968,7 +5970,7 @@ static void CalculateMetaAndPTETimes(
* PixelPTEReqHeightC[k];
}
dpte_groups_per_row_chroma_ub = dml_ceil(
- dpte_row_width_chroma_ub[k]
+ (float) dpte_row_width_chroma_ub[k]
/ dpte_group_width_chroma,
1);
time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k]
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index b8ec08e3b7a3..90a5fefef05b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -1490,19 +1490,30 @@ static void dml_rq_dlg_get_dlg_params(
disp_dlg_regs->refcyc_per_pte_group_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
- ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
+ if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) &&
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l >= (unsigned int)dml_pow(2, 13))
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l = (1 << 13) - 1;
+ else
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
* (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_c);
- ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
+ if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) &&
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c >= (unsigned int)dml_pow(2, 13))
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c = (1 << 13) - 1;
+ else
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
< (unsigned int)dml_pow(2, 13));
}
- disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+ if (src->dcc)
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+ else
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = 0;
ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 687010c17324..439ffd04be34 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -118,9 +118,11 @@ struct _vcs_dpi_soc_bounding_box_st {
double urgent_latency_adjustment_fabric_clock_component_us;
double urgent_latency_adjustment_fabric_clock_reference_mhz;
bool disable_dram_clock_change_vactive_support;
+ bool allow_dram_clock_one_display_vactive;
};
struct _vcs_dpi_ip_params_st {
+ bool use_min_dcfclk;
bool gpuvm_enable;
bool hostvm_enable;
unsigned int gpuvm_max_page_table_levels;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 6b525c52124c..b19988f54721 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -224,6 +224,7 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us;
mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
mode_lib->vba.DummyPStateCheck;
+ mode_lib->vba.AllowDramClockChangeOneDisplayVactive = soc->allow_dram_clock_one_display_vactive;
mode_lib->vba.Downspreading = soc->downspread_percent;
mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
@@ -280,6 +281,7 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
ip_params_st *ip = &mode_lib->vba.ip;
// IP Parameters
+ mode_lib->vba.UseMinimumRequiredDCFCLK = ip->use_min_dcfclk;
mode_lib->vba.MaxNumDPP = ip->max_num_dpp;
mode_lib->vba.MaxNumOTG = ip->max_num_otg;
mode_lib->vba.MaxNumHDMIFRLOutputs = ip->max_num_hdmi_frl_outputs;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 3a734171f083..3f559e725ab1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -896,6 +896,8 @@ struct vba_vars_st {
bool dummystring[DC__NUM_DPP__MAX];
double BPP;
enum odm_combine_policy ODMCombinePolicy;
+ bool UseMinimumRequiredDCFCLK;
+ bool AllowDramClockChangeOneDisplayVactive;
};
bool CalculateMinAndMaxPrefetchMode(
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 87d682d25278..0ea6662a1563 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -129,7 +129,7 @@ static bool dsc_line_buff_depth_from_dpcd(int dpcd_line_buff_bit_depth, int *lin
static bool dsc_throughput_from_dpcd(int dpcd_throughput, int *throughput)
{
switch (dpcd_throughput) {
- case DP_DSC_THROUGHPUT_MODE_0_UPSUPPORTED:
+ case DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED:
*throughput = 0;
break;
case DP_DSC_THROUGHPUT_MODE_0_170:
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 6f730b5bfe42..5e384a8a83dc 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -322,3 +322,92 @@ static const struct protection_properties dp_11_protection = {
.process_transaction = dp_11_process_transaction
};
+static const struct protection_properties *get_protection_properties_by_signal(
+ struct dc_link *link,
+ enum signal_type st,
+ enum hdcp_version version)
+{
+ switch (version) {
+ case HDCP_VERSION_14:
+ switch (st) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return &hdmi_14_protection;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ if (link &&
+ (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+ link->dpcd_caps.dongle_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER)) {
+ return &non_supported_protection;
+ }
+ return &dp_11_protection;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ return &dp_11_protection;
+ default:
+ return &non_supported_protection;
+ }
+ break;
+ case HDCP_VERSION_22:
+ switch (st) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return &hdmi_14_protection; //todo version2.2
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ return &dp_11_protection; //todo version2.2
+ default:
+ return &non_supported_protection;
+ }
+ break;
+ default:
+ return &non_supported_protection;
+ }
+}
+
+enum hdcp_message_status dc_process_hdcp_msg(
+ enum signal_type signal,
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ enum hdcp_message_status status = HDCP_MESSAGE_FAILURE;
+ uint32_t i = 0;
+
+ const struct protection_properties *protection_props;
+
+ if (!message_info)
+ return HDCP_MESSAGE_UNSUPPORTED;
+
+ if (message_info->msg_id < HDCP_MESSAGE_ID_READ_BKSV ||
+ message_info->msg_id >= HDCP_MESSAGE_ID_MAX)
+ return HDCP_MESSAGE_UNSUPPORTED;
+
+ protection_props =
+ get_protection_properties_by_signal(
+ link,
+ signal,
+ message_info->version);
+
+ if (!protection_props->supported)
+ return HDCP_MESSAGE_UNSUPPORTED;
+
+ if (protection_props->process_transaction(
+ link,
+ message_info)) {
+ status = HDCP_MESSAGE_SUCCESS;
+ } else {
+ for (i = 0; i < message_info->max_retries; i++) {
+ if (protection_props->process_transaction(
+ link,
+ message_info)) {
+ status = HDCP_MESSAGE_SUCCESS;
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d523fc9547e7..c7fd702a4a87 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -38,6 +38,7 @@
#endif
#include "dwb.h"
#include "mcif_wb.h"
+#include "panel_cntl.h"
#define MAX_CLOCK_SOURCES 7
@@ -92,6 +93,8 @@ struct clk_bw_params;
struct resource_funcs {
void (*destroy)(struct resource_pool **pool);
void (*link_init)(struct dc_link *link);
+ struct panel_cntl*(*panel_cntl_create)(
+ const struct panel_cntl_init_data *panel_cntl_init_data);
struct link_encoder *(*link_enc_create)(
const struct encoder_init_data *init);
bool (*validate_bandwidth)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index d607b3191954..e8ce8c85adf1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -27,27 +27,17 @@
#include "dm_services_types.h"
-struct abm_backlight_registers {
- unsigned int BL_PWM_CNTL;
- unsigned int BL_PWM_CNTL2;
- unsigned int BL_PWM_PERIOD_CNTL;
- unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
-};
-
struct abm {
struct dc_context *ctx;
const struct abm_funcs *funcs;
bool dmcu_is_running;
- /* registers setting needs to be saved and restored at InitBacklight */
- struct abm_backlight_registers stored_backlight_registers;
};
struct abm_funcs {
- void (*abm_init)(struct abm *abm);
+ void (*abm_init)(struct abm *abm, uint32_t back_light);
bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
- bool (*set_abm_immediate_disable)(struct abm *abm);
- bool (*set_pipe)(struct abm *abm, unsigned int controller_id);
- bool (*init_backlight)(struct abm *abm);
+ bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst);
+ bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst);
/* backlight_pwm_u16_16 is unsigned 32 bit,
* 16 bit integer + 16 fractional, where 1.0 is max backlight value.
@@ -56,10 +46,13 @@ struct abm_funcs {
unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int controller_id,
- bool use_smooth_brightness);
+ unsigned int panel_inst);
unsigned int (*get_current_backlight)(struct abm *abm);
unsigned int (*get_target_backlight)(struct abm *abm);
+ bool (*init_abm_config)(struct abm *abm,
+ const char *src,
+ unsigned int bytes);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index f5dd0cc73c63..47a566d82d6e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -144,6 +144,8 @@ struct hubbub_funcs {
void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
+
+ void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
new file mode 100644
index 000000000000..f9ab5abb6462
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * panel_cntl.h
+ *
+ * Created on: Oct 6, 2015
+ * Author: yonsun
+ */
+
+#ifndef DC_PANEL_CNTL_H_
+#define DC_PANEL_CNTL_H_
+
+#include "dc_types.h"
+
+#define MAX_BACKLIGHT_LEVEL 0xFFFF
+
+struct panel_cntl_backlight_registers {
+ unsigned int BL_PWM_CNTL;
+ unsigned int BL_PWM_CNTL2;
+ unsigned int BL_PWM_PERIOD_CNTL;
+ unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+};
+
+struct panel_cntl_funcs {
+ void (*destroy)(struct panel_cntl **panel_cntl);
+ uint32_t (*hw_init)(struct panel_cntl *panel_cntl);
+ bool (*is_panel_backlight_on)(struct panel_cntl *panel_cntl);
+ bool (*is_panel_powered_on)(struct panel_cntl *panel_cntl);
+ void (*store_backlight_level)(struct panel_cntl *panel_cntl);
+ void (*driver_set_backlight)(struct panel_cntl *panel_cntl,
+ uint32_t backlight_pwm_u16_16);
+};
+
+struct panel_cntl_init_data {
+ struct dc_context *ctx;
+ uint32_t inst;
+};
+
+struct panel_cntl {
+ const struct panel_cntl_funcs *funcs;
+ struct dc_context *ctx;
+ uint32_t inst;
+ /* registers setting needs to be saved and restored at InitBacklight */
+ struct panel_cntl_backlight_registers stored_backlight_registers;
+};
+
+#endif /* DC_PANEL_CNTL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index e5e7d94026fc..f803191e3134 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -117,6 +117,9 @@ struct crc_params {
enum crc_selection selection;
+ uint8_t dsc_mode;
+ uint8_t odm_mode;
+
bool continuous_mode;
bool enable;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index fecc80c47c26..2947d1b15512 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -173,6 +173,8 @@ struct scaler_data {
struct scaling_taps taps;
struct rect viewport;
struct rect viewport_c;
+ struct rect viewport_unadjusted;
+ struct rect viewport_c_unadjusted;
struct rect recout;
struct scaling_ratios ratios;
struct scl_inits inits;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 08307f3796e3..8e72f077e552 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -75,9 +75,13 @@ struct hw_sequencer_funcs {
void (*wait_for_mpcc_disconnect)(struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx);
+ void (*edp_backlight_control)(
+ struct dc_link *link,
+ bool enable);
void (*program_triplebuffer)(const struct dc *dc,
struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
+ void (*power_down)(struct dc *dc);
/* Pipe Lock Related */
void (*pipe_control_lock)(struct dc *dc,
@@ -193,6 +197,12 @@ struct hw_sequencer_funcs {
unsigned int bufSize, unsigned int mask);
void (*clear_status_bits)(struct dc *dc, unsigned int mask);
+ bool (*set_backlight_level)(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+
+ void (*set_abm_immediate_disable)(struct pipe_ctx *pipe_ctx);
+
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index 52a26e6be066..36e906bb6bfc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -100,8 +100,6 @@ struct hwseq_private_funcs {
struct dc *dc);
void (*edp_backlight_control)(struct dc_link *link,
bool enable);
- bool (*is_panel_backlight_on)(struct dc_link *link);
- bool (*is_panel_powered_on)(struct dc_link *link);
void (*setup_vupdate_interrupt)(struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index ca4c36c0c9bc..a9be495af922 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -138,9 +138,6 @@ struct pipe_ctx *find_idle_secondary_pipe(
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe);
-bool resource_is_stream_unchanged(
- struct dc_state *old_context, struct dc_stream_state *stream);
-
bool resource_validate_attach_surfaces(
const struct dc_validation_set set[],
int set_count,
@@ -180,6 +177,8 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
+int get_num_mpc_splits(struct pipe_ctx *pipe);
+
int get_num_odm_splits(struct pipe_ctx *pipe);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
index 3464b2d5b89a..348e9a600a72 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -84,6 +84,14 @@ static void virtual_link_encoder_destroy(struct link_encoder **enc)
*enc = NULL;
}
+static void virtual_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ /* Set Default link settings */
+ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+ *link_settings = max_link_cap;
+}
static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
.validate_output_with_stream =
@@ -94,6 +102,7 @@ static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
.enable_dp_output = virtual_link_encoder_enable_dp_output,
.enable_dp_mst_output = virtual_link_encoder_enable_dp_mst_output,
.disable_output = virtual_link_encoder_disable_output,
+ .get_max_link_cap = virtual_link_encoder_get_max_link_cap,
.dp_set_lane_settings = virtual_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = virtual_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index c2671f2616c8..26d94eb5ab58 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -64,10 +64,11 @@
* other component within DAL.
*/
-#include "dmub_types.h"
-#include "dmub_cmd.h"
-#include "dmub_gpint_cmd.h"
-#include "dmub_rb.h"
+#include "inc/dmub_types.h"
+#include "inc/dmub_cmd.h"
+#include "inc/dmub_gpint_cmd.h"
+#include "inc/dmub_cmd_dal.h"
+#include "inc/dmub_rb.h"
#if defined(__cplusplus)
extern "C" {
@@ -75,7 +76,6 @@ extern "C" {
/* Forward declarations */
struct dmub_srv;
-struct dmub_cmd_header;
struct dmub_srv_common_regs;
/* enum dmub_status - return code for dmcub functions */
@@ -151,6 +151,7 @@ struct dmub_srv_region_params {
uint32_t inst_const_size;
uint32_t bss_data_size;
uint32_t vbios_size;
+ const uint8_t *fw_inst_const;
const uint8_t *fw_bss_data;
};
@@ -457,7 +458,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
* DMUB_STATUS_INVALID - unspecified error
*/
enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
- const struct dmub_cmd_header *cmd);
+ const union dmub_rb_cmd *cmd);
/**
* dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
@@ -565,6 +566,16 @@ dmub_srv_send_gpint_command(struct dmub_srv *dmub,
enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
uint32_t *response);
+/**
+ * dmub_flush_buffer_mem() - Read back entire frame buffer region.
+ * This ensures that the write from x86 has been flushed and will not
+ * hang the DMCUB.
+ * @fb: frame buffer to flush
+ *
+ * Can be called after software initialization.
+ */
+void dmub_flush_buffer_mem(const struct dmub_fb *fb);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 10b5fa9d2588..599bf2055bcb 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -228,6 +228,7 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t smu_optimizations_en;
uint8_t frame_delay;
uint8_t frame_cap_ind;
+ struct dmub_psr_debug_flags debug;
};
struct dmub_rb_cmd_psr_copy_settings {
@@ -260,6 +261,8 @@ struct dmub_rb_cmd_psr_set_version {
struct dmub_cmd_abm_set_pipe_data {
uint32_t ramping_boundary;
uint32_t otg_inst;
+ uint32_t panel_inst;
+ uint32_t set_pipe_option;
};
struct dmub_rb_cmd_abm_set_pipe {
@@ -303,6 +306,16 @@ struct dmub_rb_cmd_abm_set_pwm_frac {
struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data;
};
+struct dmub_cmd_abm_init_config_data {
+ union dmub_addr src;
+ uint16_t bytes;
+};
+
+struct dmub_rb_cmd_abm_init_config {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_init_config_data abm_init_config_data;
+};
+
union dmub_rb_cmd {
struct dmub_rb_cmd_read_modify_write read_modify_write;
struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq;
@@ -324,6 +337,7 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_set_level abm_set_level;
struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
+ struct dmub_rb_cmd_abm_init_config abm_init_config;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
index d37535d21928..e42de9ded275 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -32,17 +32,16 @@
*/
enum dmub_cmd_psr_type {
- DMUB_CMD__PSR_SET_VERSION = 0,
- DMUB_CMD__PSR_COPY_SETTINGS = 1,
- DMUB_CMD__PSR_ENABLE = 2,
- DMUB_CMD__PSR_DISABLE = 3,
- DMUB_CMD__PSR_SET_LEVEL = 4,
+ DMUB_CMD__PSR_SET_VERSION = 0,
+ DMUB_CMD__PSR_COPY_SETTINGS = 1,
+ DMUB_CMD__PSR_ENABLE = 2,
+ DMUB_CMD__PSR_DISABLE = 3,
+ DMUB_CMD__PSR_SET_LEVEL = 4,
};
enum psr_version {
- PSR_VERSION_1 = 0x10, // PSR Version 1
- PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update
- PSR_VERSION_2_1 = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+ PSR_VERSION_1 = 0,
+ PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
};
enum dmub_cmd_abm_type {
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
index df875fdd2ab0..2ae48c18bb5b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
@@ -33,8 +33,6 @@
extern "C" {
#endif
-struct dmub_cmd_header;
-
struct dmub_rb_init_params {
void *ctx;
void *base_address;
@@ -71,7 +69,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
}
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
- const struct dmub_cmd_header *cmd)
+ const union dmub_rb_cmd *cmd)
{
uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
const uint64_t *src = (const uint64_t *)cmd;
@@ -93,7 +91,7 @@ static inline bool dmub_rb_push_front(struct dmub_rb *rb,
}
static inline bool dmub_rb_front(struct dmub_rb *rb,
- struct dmub_cmd_header *cmd)
+ union dmub_rb_cmd *cmd)
{
uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
index 41d524b0db2f..bed5b023a396 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
@@ -49,6 +49,12 @@ extern "C" {
#define dmub_udelay(microseconds) udelay(microseconds)
#endif
+/* Maximum number of streams on any ASIC. */
+#define DMUB_MAX_STREAMS 6
+
+/* Maximum number of planes on any ASIC. */
+#define DMUB_MAX_PLANES 6
+
union dmub_addr {
struct {
uint32_t low_part;
@@ -57,6 +63,11 @@ union dmub_addr {
uint64_t quad_part;
};
+struct dmub_psr_debug_flags {
+ uint8_t visual_confirm : 1;
+ uint8_t reserved : 7;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 63bb9e2c81de..edc73d6d7ba2 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -23,7 +23,7 @@
*
*/
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn20.h"
@@ -186,14 +186,22 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
- dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset);
-
- REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
- REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
- REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
- DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
- DMCUB_REGION3_CW2_ENABLE, 1);
+ if (cw2->region.base != cw2->region.top) {
+ dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset,
+ &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
+ REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
+ DMCUB_REGION3_CW2_ENABLE, 1);
+ } else {
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET, 0);
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, 0);
+ REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, 0);
+ REG_WRITE(DMCUB_REGION3_CW2_TOP_ADDRESS, 0);
+ }
dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
index 5bed9fcd6b5c..e8f488232e34 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
@@ -23,7 +23,7 @@
*
*/
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn21.h"
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
index 4094eca212f0..ca0c8a54b635 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
@@ -24,7 +24,7 @@
*/
#include "dmub_reg.h"
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
struct dmub_reg_value_masks {
uint32_t value;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index ce32cc7933c4..0e3751d94cb0 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -23,7 +23,7 @@
*
*/
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
#include "dmub_dcn20.h"
#include "dmub_dcn21.h"
#include "dmub_fw_meta.h"
@@ -70,7 +70,7 @@ static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
return (val + factor - 1) / factor * factor;
}
-static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
+void dmub_flush_buffer_mem(const struct dmub_fb *fb)
{
const uint8_t *base = (const uint8_t *)fb->cpu_addr;
uint8_t buf[64];
@@ -91,18 +91,32 @@ static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
}
static const struct dmub_fw_meta_info *
-dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size)
+dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
{
const union dmub_fw_meta *meta;
+ const uint8_t *blob = NULL;
+ uint32_t blob_size = 0;
+ uint32_t meta_offset = 0;
+
+ if (params->fw_bss_data) {
+ /* Legacy metadata region. */
+ blob = params->fw_bss_data;
+ blob_size = params->bss_data_size;
+ meta_offset = DMUB_FW_META_OFFSET;
+ } else if (params->fw_inst_const) {
+ /* Combined metadata region. */
+ blob = params->fw_inst_const;
+ blob_size = params->inst_const_size;
+ meta_offset = 0;
+ }
- if (fw_bss_data == NULL)
+ if (!blob || !blob_size)
return NULL;
- if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET)
+ if (blob_size < sizeof(union dmub_fw_meta) + meta_offset)
return NULL;
- meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size -
- DMUB_FW_META_OFFSET -
+ meta = (const union dmub_fw_meta *)(blob + blob_size - meta_offset -
sizeof(union dmub_fw_meta));
if (meta->info.magic_value != DMUB_FW_META_MAGIC)
@@ -247,8 +261,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
mail->base = dmub_align(bios->top, 256);
mail->top = mail->base + DMUB_MAILBOX_SIZE;
- fw_info = dmub_get_fw_meta_info(params->fw_bss_data,
- params->bss_data_size);
+ fw_info = dmub_get_fw_meta_info(params);
if (fw_info) {
fw_state_size = fw_info->fw_region_size;
@@ -449,7 +462,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
}
enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
- const struct dmub_cmd_header *cmd)
+ const union dmub_rb_cmd *cmd)
{
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h
index f31e6befc8d6..42229b4effdc 100644
--- a/drivers/gpu/drm/amd/display/include/hdcp_types.h
+++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h
@@ -83,6 +83,12 @@ enum hdcp_link {
HDCP_LINK_SECONDARY
};
+enum hdcp_message_status {
+ HDCP_MESSAGE_SUCCESS,
+ HDCP_MESSAGE_FAILURE,
+ HDCP_MESSAGE_UNSUPPORTED
+};
+
struct hdcp_protection_message {
enum hdcp_version version;
/* relevant only for DVI */
@@ -91,6 +97,7 @@ struct hdcp_protection_message {
uint32_t length;
uint8_t max_retries;
uint8_t *data;
+ enum hdcp_message_status status;
};
#endif
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index 6e008de25629..02c23b04d34b 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -40,8 +40,6 @@ struct dc_state;
*
*/
-void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);
-
void pre_surface_trace(
struct dc *dc,
const struct dc_plane_state *const *plane_states,
@@ -102,14 +100,12 @@ void context_clock_trace(
#define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
do { \
(void)(link); \
- dc_conn_log_hex_linux(hex_data, hex_len); \
DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
} while (0)
#define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
do { \
(void)(link); \
- dc_conn_log_hex_linux(hex_data, hex_len); \
DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
} while (0)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index cac09d500fda..9431b48aecb4 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1782,7 +1782,8 @@ rgb_user_alloc_fail:
return ret;
}
-bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
+ struct dc_transfer_func *input_tf,
const struct dc_gamma *ramp, bool mapUserRamp)
{
struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts;
@@ -1801,11 +1802,29 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
/* we can use hardcoded curve for plain SRGB TF
* If linear, it's bypass if on user ramp
*/
- if (input_tf->type == TF_TYPE_PREDEFINED &&
- (input_tf->tf == TRANSFER_FUNCTION_SRGB ||
- input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
- !mapUserRamp)
- return true;
+ if (input_tf->type == TF_TYPE_PREDEFINED) {
+ if ((input_tf->tf == TRANSFER_FUNCTION_SRGB ||
+ input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
+ !mapUserRamp)
+ return true;
+
+ if (dc_caps != NULL &&
+ dc_caps->dpp.dcn_arch == 1) {
+
+ if (input_tf->tf == TRANSFER_FUNCTION_PQ &&
+ dc_caps->dpp.dgam_rom_caps.pq == 1)
+ return true;
+
+ if (input_tf->tf == TRANSFER_FUNCTION_GAMMA22 &&
+ dc_caps->dpp.dgam_rom_caps.gamma2_2 == 1)
+ return true;
+
+ // HLG OOTF not accounted for
+ if (input_tf->tf == TRANSFER_FUNCTION_HLG &&
+ dc_caps->dpp.dgam_rom_caps.hlg == 1)
+ return true;
+ }
+ }
input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
@@ -1902,7 +1921,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
- if (ramp->type == GAMMA_CUSTOM)
+ if (ramp && ramp->type == GAMMA_CUSTOM)
apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
ret = true;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 9994817a9a03..7f56226ba77a 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -30,6 +30,7 @@ struct dc_transfer_func;
struct dc_gamma;
struct dc_transfer_func_distributed_points;
struct dc_rgb_fixed;
+struct dc_color_caps;
enum dc_transfer_func_predefined;
/* For SetRegamma ADL interface support
@@ -100,7 +101,8 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
const struct freesync_hdr_tf_params *fs_params);
-bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
+bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
+ struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp);
bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index c33454a9e0b4..eb7421e83b86 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -443,7 +443,7 @@ static bool vrr_settings_require_update(struct core_freesync *core_freesync,
return true;
} else if (in_vrr->state == VRR_STATE_ACTIVE_FIXED &&
in_vrr->fixed.target_refresh_in_uhz !=
- in_config->min_refresh_in_uhz) {
+ in_config->fixed_refresh_in_uhz) {
return true;
} else if (in_vrr->min_refresh_in_uhz != min_refresh_in_uhz) {
return true;
@@ -491,7 +491,7 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
return false;
}
-static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
+static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
struct dc_info_packet *infopacket)
{
/* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
@@ -523,14 +523,74 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
vrr->state == VRR_STATE_ACTIVE_FIXED)
infopacket->sb[6] |= 0x04;
+ // For v1 & 2 infoframes program nominal if non-fs mode, otherwise full range
/* PB7 = FreeSync Minimum refresh rate (Hz) */
- infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+ if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+ vrr->state == VRR_STATE_ACTIVE_FIXED) {
+ infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+ } else {
+ infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ }
/* PB8 = FreeSync Maximum refresh rate (Hz)
* Note: We should never go above the field rate of the mode timing set.
*/
infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ //FreeSync HDR
+ infopacket->sb[9] = 0;
+ infopacket->sb[10] = 0;
+}
+
+static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
+ struct dc_info_packet *infopacket)
+{
+ /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
+ infopacket->sb[1] = 0x1A;
+
+ /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
+ infopacket->sb[2] = 0x00;
+
+ /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
+ infopacket->sb[3] = 0x00;
+
+ /* PB4 = Reserved */
+
+ /* PB5 = Reserved */
+
+ /* PB6 = [Bits 7:3 = Reserved] */
+
+ /* PB6 = [Bit 0 = FreeSync Supported] */
+ if (vrr->state != VRR_STATE_UNSUPPORTED)
+ infopacket->sb[6] |= 0x01;
+
+ /* PB6 = [Bit 1 = FreeSync Enabled] */
+ if (vrr->state != VRR_STATE_DISABLED &&
+ vrr->state != VRR_STATE_UNSUPPORTED)
+ infopacket->sb[6] |= 0x02;
+
+ /* PB6 = [Bit 2 = FreeSync Active] */
+ if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+ vrr->state == VRR_STATE_ACTIVE_FIXED)
+ infopacket->sb[6] |= 0x04;
+
+ if (vrr->state == VRR_STATE_ACTIVE_FIXED) {
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ infopacket->sb[7] = (unsigned char)((vrr->fixed_refresh_in_uhz + 500000) / 1000000);
+ /* PB8 = FreeSync Maximum refresh rate (Hz) */
+ infopacket->sb[8] = (unsigned char)((vrr->fixed_refresh_in_uhz + 500000) / 1000000);
+ } else if (vrr->state == VRR_STATE_ACTIVE_VARIABLE) {
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+ /* PB8 = FreeSync Maximum refresh rate (Hz) */
+ infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ } else {
+ // Non-fs case, program nominal range
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ /* PB8 = FreeSync Maximum refresh rate (Hz) */
+ infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ }
//FreeSync HDR
infopacket->sb[9] = 0;
@@ -678,7 +738,7 @@ static void build_vrr_infopacket_v1(enum signal_type signal,
unsigned int payload_size = 0;
build_vrr_infopacket_header_v1(signal, infopacket, &payload_size);
- build_vrr_infopacket_data(vrr, infopacket);
+ build_vrr_infopacket_data_v1(vrr, infopacket);
build_vrr_infopacket_checksum(&payload_size, infopacket);
infopacket->valid = true;
@@ -692,7 +752,24 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
unsigned int payload_size = 0;
build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
- build_vrr_infopacket_data(vrr, infopacket);
+ build_vrr_infopacket_data_v1(vrr, infopacket);
+
+ build_vrr_infopacket_fs2_data(app_tf, infopacket);
+
+ build_vrr_infopacket_checksum(&payload_size, infopacket);
+
+ infopacket->valid = true;
+}
+
+static void build_vrr_infopacket_v3(enum signal_type signal,
+ const struct mod_vrr_params *vrr,
+ enum color_transfer_func app_tf,
+ struct dc_info_packet *infopacket)
+{
+ unsigned int payload_size = 0;
+
+ build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
+ build_vrr_infopacket_data_v3(vrr, infopacket);
build_vrr_infopacket_fs2_data(app_tf, infopacket);
@@ -717,11 +794,14 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
return;
switch (packet_type) {
- case PACKET_TYPE_FS2:
+ case PACKET_TYPE_FS_V3:
+ build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
+ break;
+ case PACKET_TYPE_FS_V2:
build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
break;
case PACKET_TYPE_VRR:
- case PACKET_TYPE_FS1:
+ case PACKET_TYPE_FS_V1:
default:
build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
}
@@ -793,6 +873,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
calc_duration_in_us_from_refresh_in_uhz(
(unsigned int)max_refresh_in_uhz);
+ if (in_config->state == VRR_STATE_ACTIVE_FIXED)
+ in_out_vrr->fixed_refresh_in_uhz = in_config->fixed_refresh_in_uhz;
+ else
+ in_out_vrr->fixed_refresh_in_uhz = 0;
+
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
@@ -843,7 +928,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->min_refresh_in_uhz);
} else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) {
in_out_vrr->fixed.target_refresh_in_uhz =
- in_out_vrr->min_refresh_in_uhz;
+ in_out_vrr->fixed_refresh_in_uhz;
if (in_out_vrr->fixed.ramping_active &&
in_out_vrr->fixed.fixed_active) {
/* Do not update vtotals if ramping is already active
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index cc1d3f470b99..e9fbd94f8635 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -328,7 +328,8 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
/* add display to connection */
hdcp->connection.link = *link;
*display_container = *display;
- status = mod_hdcp_add_display_to_topology(hdcp, display->index);
+ status = mod_hdcp_add_display_to_topology(hdcp, display_container);
+
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
@@ -374,7 +375,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
status = mod_hdcp_remove_display_from_topology(hdcp, index);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- display->state = MOD_HDCP_DISPLAY_INACTIVE;
+ memset(display, 0, sizeof(struct mod_hdcp_display));
/* request authentication when connection is not reset */
if (current_state(hdcp) != HDCP_UNINITIALIZED)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 5cb4546be0ef..b0cefed2eb02 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -328,7 +328,7 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
/* psp functions */
enum mod_hdcp_status mod_hdcp_add_display_to_topology(
- struct mod_hdcp *hdcp, uint8_t index);
+ struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
struct mod_hdcp *hdcp, uint8_t index);
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
@@ -357,8 +357,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(
struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(
struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
- enum mod_hdcp_encryption_status *encryption_status);
/* ddc functions */
enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
@@ -503,11 +501,6 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display)
return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
}
-static inline uint8_t is_display_added(struct mod_hdcp_display *display)
-{
- return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-}
-
static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
{
return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
@@ -515,34 +508,23 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis
static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
{
- uint8_t added_count = 0;
+ uint8_t active_count = 0;
uint8_t i;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
if (is_display_active(&hdcp->displays[i]))
- added_count++;
- return added_count;
-}
-
-static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
-{
- uint8_t added_count = 0;
- uint8_t i;
-
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->displays[i]))
- added_count++;
- return added_count;
+ active_count++;
+ return active_count;
}
-static inline struct mod_hdcp_display *get_first_added_display(
+static inline struct mod_hdcp_display *get_first_active_display(
struct mod_hdcp *hdcp)
{
uint8_t i;
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->displays[i])) {
+ if (is_display_active(&hdcp->displays[i])) {
display = &hdcp->displays[i];
break;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 37c8c05497d6..f244b72e74e0 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 491c00f48026..549c113abcf7 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 44956f9ba178..fb6a19d020f9 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -98,8 +98,8 @@ char *mod_hdcp_status_to_str(int32_t status)
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
- case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
- return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
@@ -158,8 +158,8 @@ char *mod_hdcp_status_to_str(int32_t status)
return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED";
case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY:
return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY";
- case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION:
- return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING:
return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING";
case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE:
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index c2929815c3ee..fb1161dd7ea8 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -51,12 +51,15 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
struct ta_dtm_shared_memory *dtm_cmd;
struct mod_hdcp_display *display =
get_active_display_at_index(hdcp, index);
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- if (!display || !is_display_added(display))
+ if (!display || !is_display_active(display))
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ mutex_lock(&psp->dtm_context.mutex);
+
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
@@ -66,34 +69,33 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ } else {
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+ }
- display->state = MOD_HDCP_DISPLAY_ACTIVE;
- HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
-
- return MOD_HDCP_STATUS_SUCCESS;
-
+ mutex_unlock(&psp->dtm_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
- uint8_t index)
+ struct mod_hdcp_display *display)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
- struct mod_hdcp_display *display =
- get_active_display_at_index(hdcp, index);
struct mod_hdcp_link *link = &hdcp->connection.link;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->dtm_context.dtm_initialized) {
DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
- if (!display || is_display_added(display))
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+ mutex_lock(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
@@ -113,21 +115,24 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
- display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
+ status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ } else {
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->dtm_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->hdcp_context.hdcp_initialized) {
DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
@@ -135,6 +140,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
}
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ mutex_lock(&psp->hdcp_context.mutex);
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index;
@@ -144,16 +151,18 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
-
- hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
- memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
- sizeof(hdcp->auth.msg.hdcp1.aksv));
- memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
- sizeof(hdcp->auth.msg.hdcp1.an));
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
+ } else {
+ hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
+ memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+ memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
@@ -162,7 +171,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
uint8_t i = 0;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -171,27 +182,30 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
-
- HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_encryption_enabled(
- &hdcp->displays[i])) {
- hdcp->displays[i].state =
- MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_HDCP1_DISABLED_TRACE(hdcp,
- hdcp->displays[i].index);
- }
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
+ } else {
+ HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(&hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_HDCP1_DISABLED_TRACE(
+ hdcp, hdcp->displays[i].index);
+ }
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -206,10 +220,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
-
- if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+ } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) {
/* needs second part of authentication */
hdcp->connection.is_repeater = 1;
@@ -219,20 +232,22 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
} else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
hdcp->connection.is_hdcp1_revoked = 1;
- return MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
+ status = MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
} else
- return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
-
+ status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -241,14 +256,15 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION;
-
- if (!is_dp_mst_hdcp(hdcp)) {
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE;
+ } else if (!is_dp_mst_hdcp(hdcp)) {
display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index);
}
- return MOD_HDCP_STATUS_SUCCESS;
+
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
@@ -257,6 +273,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -287,6 +304,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
}
+ mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
@@ -296,14 +314,15 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
int i = 0;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->displays[i].adjust.disable)
- continue;
+ if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
+ continue;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -313,21 +332,26 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+ break;
+ }
hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -339,12 +363,12 @@ enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+ hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level != 1)
+ status = MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
- return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1)
- ? MOD_HDCP_STATUS_SUCCESS
- : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
@@ -364,19 +388,23 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
if (!psp->hdcp_context.hdcp_initialized) {
DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
return MOD_HDCP_STATUS_FAILURE;
}
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
- memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
-
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ mutex_lock(&psp->hdcp_context.mutex);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
@@ -393,12 +421,14 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
- hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
+ else
+ hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
@@ -406,7 +436,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
uint8_t i = 0;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -415,20 +447,21 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
-
- HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_encryption_enabled(
- &hdcp->displays[i])) {
- hdcp->displays[i].state =
- MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_HDCP2_DISABLED_TRACE(hdcp,
- hdcp->displays[i].index);
- }
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
+ } else {
+ HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(&hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_HDCP2_DISABLED_TRACE(
+ hdcp, hdcp->displays[i].index);
+ }
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
@@ -437,7 +470,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -452,12 +487,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
-
- memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.ake_init));
+ status = MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
+ else
+ memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ake_init));
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
@@ -466,7 +502,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -488,26 +526,32 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
-
- memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
-
- memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
- &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
- sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
-
- if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
- hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
- hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
- return MOD_HDCP_STATUS_SUCCESS;
- } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
- hdcp->connection.is_hdcp2_revoked = 1;
- return MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+ } else {
+ memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km,
+ &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+
+ memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
+ &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
+ sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+
+ if (msg_out->process.msg1_status ==
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+ hdcp->connection.is_km_stored =
+ msg_out->process.is_km_stored ? 1 : 0;
+ hdcp->connection.is_repeater =
+ msg_out->process.is_repeater ? 1 : 0;
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status ==
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ status = MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+ }
}
-
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
@@ -516,7 +560,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -543,16 +589,15 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
-
- if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+ else if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
else if (!hdcp->connection.is_km_stored &&
- msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
-
+ msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
@@ -561,7 +606,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -577,12 +624,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
-
- memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.lc_init));
+ status = MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
+ else
+ memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.lc_init));
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
@@ -591,7 +639,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -610,13 +660,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
-
- if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+ msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
@@ -625,7 +674,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -642,48 +693,55 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
-
- memcpy(hdcp->auth.msg.hdcp2.ske_eks, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.ske_eks));
- msg_out->prepare.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.ske_eks);
-
- if (is_dp_hdcp(hdcp)) {
- memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
- &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
- sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
+ } else {
+ memcpy(hdcp->auth.msg.hdcp2.ske_eks,
+ &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+ msg_out->prepare.msg1_desc.msg_size =
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks);
+
+ if (is_dp_hdcp(hdcp)) {
+ memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
+ &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
+ sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+ }
}
+ mutex_unlock(&psp->hdcp_context.mutex);
- return MOD_HDCP_STATUS_SUCCESS;
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
-
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
- memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ mutex_lock(&psp->hdcp_context.mutex);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
-
- if (!is_dp_mst_hdcp(hdcp)) {
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
+ } else if (!is_dp_mst_hdcp(hdcp)) {
display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index);
}
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
@@ -692,6 +750,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -712,23 +773,26 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
-
- memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
-
- if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
- hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
- hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
- return MOD_HDCP_STATUS_SUCCESS;
- } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
- hdcp->connection.is_hdcp2_revoked = 1;
- return MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+ } else {
+ memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack,
+ &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+
+ if (msg_out->process.msg1_status ==
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+ hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
+ hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status ==
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+ }
}
-
-
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
@@ -737,7 +801,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
uint8_t i;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -747,9 +813,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->displays[i].adjust.disable)
- continue;
+ if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
+ continue;
+
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
@@ -763,8 +829,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
- return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
- : MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION;
+ if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE;
+
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp)
@@ -774,7 +845,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -789,15 +862,17 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
-
- hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
-
- memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
+ } else {
+ hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
- return MOD_HDCP_STATUS_SUCCESS;
+ memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
+ &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+ }
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
@@ -806,7 +881,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -825,38 +902,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) &&
- (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
- ? MOD_HDCP_STATUS_SUCCESS
- : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
-}
-
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
- enum mod_hdcp_encryption_status *encryption_status)
-{
- struct psp_context *psp = hdcp->config.psp.handle;
- struct ta_hdcp_shared_memory *hdcp_cmd;
-
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
-
- memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
-
- hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = hdcp->auth.id;
- hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0;
- hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS;
- *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
-
- psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
-
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_FAILURE;
-
- if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 1) {
- if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == TA_HDCP2_CONTENT_TYPE__TYPE1)
- *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
- else
- *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
- }
+ if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS &&
+ msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
+
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dbe7835aabcf..0ba3cf7f336a 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -83,6 +83,8 @@ struct mod_freesync_config {
bool btr;
unsigned int min_refresh_in_uhz;
unsigned int max_refresh_in_uhz;
+ unsigned int fixed_refresh_in_uhz;
+
};
struct mod_vrr_params_btr {
@@ -112,6 +114,7 @@ struct mod_vrr_params {
uint32_t max_duration_in_us;
uint32_t max_refresh_in_uhz;
uint32_t min_duration_in_us;
+ uint32_t fixed_refresh_in_uhz;
struct dc_crtc_timing_adjust adjust;
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index c088602bc1a0..eed560eecbab 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -60,7 +60,7 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
- MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
@@ -90,7 +90,7 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED,
- MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
@@ -117,7 +117,6 @@ enum mod_hdcp_operation_mode {
enum mod_hdcp_display_state {
MOD_HDCP_DISPLAY_INACTIVE = 0,
MOD_HDCP_DISPLAY_ACTIVE,
- MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
};
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
index fe2117904329..198c0e64d13a 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -40,8 +40,9 @@ enum color_transfer_func {
enum vrr_packet_type {
PACKET_TYPE_VRR,
- PACKET_TYPE_FS1,
- PACKET_TYPE_FS2,
+ PACKET_TYPE_FS_V1,
+ PACKET_TYPE_FS_V2,
+ PACKET_TYPE_FS_V3,
PACKET_TYPE_VTEM
};
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index cff3ab15fc0c..7cd8a43d1889 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -144,7 +144,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/*VSC packet set to 2 when DP revision >= 1.2*/
- if (stream->psr_version != 0)
+ if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index e75a4bb94488..8c37bcc27132 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -24,6 +24,9 @@
#include "power_helpers.h"
#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc.h"
+#include "core_types.h"
#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
@@ -237,7 +240,7 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
}
static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters params,
- struct iram_table_v_2_2 *table)
+ struct iram_table_v_2_2 *table, bool big_endian)
{
unsigned int i;
unsigned int num_entries = NUM_BL_CURVE_SEGS;
@@ -261,10 +264,12 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
ASSERT(lut_index < params.backlight_lut_array_size);
- table->backlight_thresholds[i] =
- cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
- table->backlight_offsets[i] =
- cpu_to_be16(params.backlight_lut_array[lut_index]);
+ table->backlight_thresholds[i] = (big_endian) ?
+ cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)) :
+ cpu_to_le16(DIV_ROUNDUP((i * 65536), num_entries));
+ table->backlight_offsets[i] = (big_endian) ?
+ cpu_to_be16(params.backlight_lut_array[lut_index]) :
+ cpu_to_le16(params.backlight_lut_array[lut_index]);
}
}
@@ -584,18 +589,18 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->crgb_slope[7] = cpu_to_be16(0x1910);
fill_backlight_transform_table_v_2_2(
- params, ram_table);
+ params, ram_table, true);
}
-void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
+void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
{
unsigned int i, j;
unsigned int set = params.set;
ram_table->flags = 0x0;
-
- ram_table->min_abm_backlight =
- cpu_to_be16(params.min_abm_backlight);
+ ram_table->min_abm_backlight = (big_endian) ?
+ cpu_to_be16(params.min_abm_backlight) :
+ cpu_to_le16(params.min_abm_backlight);
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain;
@@ -619,33 +624,51 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->iir_curve[4] = 0x65;
//Gamma 2.2
- ram_table->crgb_thresh[0] = cpu_to_be16(0x127c);
- ram_table->crgb_thresh[1] = cpu_to_be16(0x151b);
- ram_table->crgb_thresh[2] = cpu_to_be16(0x17d5);
- ram_table->crgb_thresh[3] = cpu_to_be16(0x1a56);
- ram_table->crgb_thresh[4] = cpu_to_be16(0x1c83);
- ram_table->crgb_thresh[5] = cpu_to_be16(0x1e72);
- ram_table->crgb_thresh[6] = cpu_to_be16(0x20f0);
- ram_table->crgb_thresh[7] = cpu_to_be16(0x232b);
- ram_table->crgb_offset[0] = cpu_to_be16(0x2999);
- ram_table->crgb_offset[1] = cpu_to_be16(0x3999);
- ram_table->crgb_offset[2] = cpu_to_be16(0x4666);
- ram_table->crgb_offset[3] = cpu_to_be16(0x5999);
- ram_table->crgb_offset[4] = cpu_to_be16(0x6333);
- ram_table->crgb_offset[5] = cpu_to_be16(0x7800);
- ram_table->crgb_offset[6] = cpu_to_be16(0x8c00);
- ram_table->crgb_offset[7] = cpu_to_be16(0xa000);
- ram_table->crgb_slope[0] = cpu_to_be16(0x3609);
- ram_table->crgb_slope[1] = cpu_to_be16(0x2dfa);
- ram_table->crgb_slope[2] = cpu_to_be16(0x27ea);
- ram_table->crgb_slope[3] = cpu_to_be16(0x235d);
- ram_table->crgb_slope[4] = cpu_to_be16(0x2042);
- ram_table->crgb_slope[5] = cpu_to_be16(0x1dc3);
- ram_table->crgb_slope[6] = cpu_to_be16(0x1b1a);
- ram_table->crgb_slope[7] = cpu_to_be16(0x1910);
+ ram_table->crgb_thresh[0] = (big_endian) ? cpu_to_be16(0x127c) : cpu_to_le16(0x127c);
+ ram_table->crgb_thresh[1] = (big_endian) ? cpu_to_be16(0x151b) : cpu_to_le16(0x151b);
+ ram_table->crgb_thresh[2] = (big_endian) ? cpu_to_be16(0x17d5) : cpu_to_le16(0x17d5);
+ ram_table->crgb_thresh[3] = (big_endian) ? cpu_to_be16(0x1a56) : cpu_to_le16(0x1a56);
+ ram_table->crgb_thresh[4] = (big_endian) ? cpu_to_be16(0x1c83) : cpu_to_le16(0x1c83);
+ ram_table->crgb_thresh[5] = (big_endian) ? cpu_to_be16(0x1e72) : cpu_to_le16(0x1e72);
+ ram_table->crgb_thresh[6] = (big_endian) ? cpu_to_be16(0x20f0) : cpu_to_le16(0x20f0);
+ ram_table->crgb_thresh[7] = (big_endian) ? cpu_to_be16(0x232b) : cpu_to_le16(0x232b);
+ ram_table->crgb_offset[0] = (big_endian) ? cpu_to_be16(0x2999) : cpu_to_le16(0x2999);
+ ram_table->crgb_offset[1] = (big_endian) ? cpu_to_be16(0x3999) : cpu_to_le16(0x3999);
+ ram_table->crgb_offset[2] = (big_endian) ? cpu_to_be16(0x4666) : cpu_to_le16(0x4666);
+ ram_table->crgb_offset[3] = (big_endian) ? cpu_to_be16(0x5999) : cpu_to_le16(0x5999);
+ ram_table->crgb_offset[4] = (big_endian) ? cpu_to_be16(0x6333) : cpu_to_le16(0x6333);
+ ram_table->crgb_offset[5] = (big_endian) ? cpu_to_be16(0x7800) : cpu_to_le16(0x7800);
+ ram_table->crgb_offset[6] = (big_endian) ? cpu_to_be16(0x8c00) : cpu_to_le16(0x8c00);
+ ram_table->crgb_offset[7] = (big_endian) ? cpu_to_be16(0xa000) : cpu_to_le16(0xa000);
+ ram_table->crgb_slope[0] = (big_endian) ? cpu_to_be16(0x3609) : cpu_to_le16(0x3609);
+ ram_table->crgb_slope[1] = (big_endian) ? cpu_to_be16(0x2dfa) : cpu_to_le16(0x2dfa);
+ ram_table->crgb_slope[2] = (big_endian) ? cpu_to_be16(0x27ea) : cpu_to_le16(0x27ea);
+ ram_table->crgb_slope[3] = (big_endian) ? cpu_to_be16(0x235d) : cpu_to_le16(0x235d);
+ ram_table->crgb_slope[4] = (big_endian) ? cpu_to_be16(0x2042) : cpu_to_le16(0x2042);
+ ram_table->crgb_slope[5] = (big_endian) ? cpu_to_be16(0x1dc3) : cpu_to_le16(0x1dc3);
+ ram_table->crgb_slope[6] = (big_endian) ? cpu_to_be16(0x1b1a) : cpu_to_le16(0x1b1a);
+ ram_table->crgb_slope[7] = (big_endian) ? cpu_to_be16(0x1910) : cpu_to_le16(0x1910);
fill_backlight_transform_table_v_2_2(
- params, ram_table);
+ params, ram_table, big_endian);
+}
+
+bool dmub_init_abm_config(struct abm *abm,
+ struct dmcu_iram_parameters params)
+{
+ unsigned char ram_table[IRAM_SIZE];
+ bool result = false;
+
+ if (abm == NULL)
+ return false;
+
+ memset(&ram_table, 0, sizeof(ram_table));
+
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, false);
+ result = abm->funcs->init_abm_config(
+ abm, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+
+ return result;
}
bool dmcu_load_iram(struct dmcu *dmcu,
@@ -657,17 +680,17 @@ bool dmcu_load_iram(struct dmcu *dmcu,
if (dmcu == NULL)
return false;
- if (!dmcu->funcs->is_dmcu_initialized(dmcu))
+ if (dmcu && !dmcu->funcs->is_dmcu_initialized(dmcu))
return true;
memset(&ram_table, 0, sizeof(ram_table));
if (dmcu->dmcu_version.abm_version == 0x24) {
- fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
- result = dmcu->funcs->load_iram(
- dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
+ result = dmcu->funcs->load_iram(
+ dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
} else if (dmcu->dmcu_version.abm_version == 0x23) {
- fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
result = dmcu->funcs->load_iram(
dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index e54157026330..46fbca2e2cd1 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -26,6 +26,7 @@
#define MODULES_POWER_POWER_HELPERS_H_
#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
enum abm_defines {
@@ -44,5 +45,7 @@ struct dmcu_iram_parameters {
bool dmcu_load_iram(struct dmcu *dmcu,
struct dmcu_iram_parameters params);
+bool dmub_init_abm_config(struct abm *abm,
+ struct dmcu_iram_parameters params);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
deleted file mode 100644
index 03121ca64fe4..000000000000
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ /dev/null
@@ -1,448 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "mod_stats.h"
-#include "dm_services.h"
-#include "dc.h"
-#include "core_types.h"
-
-#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
-#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000000
-#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
-
-#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
-#define DAL_STATS_ENTRIES_REGKEY_DEFAULT 0x00350000
-#define DAL_STATS_ENTRIES_REGKEY_MAX 0x01000000
-
-#define DAL_STATS_EVENT_ENTRIES_DEFAULT 0x00000100
-
-#define MOD_STATS_NUM_VSYNCS 5
-#define MOD_STATS_EVENT_STRING_MAX 512
-
-struct stats_time_cache {
- unsigned int entry_id;
-
- unsigned long flip_timestamp_in_ns;
- unsigned long vupdate_timestamp_in_ns;
-
- unsigned int render_time_in_us;
- unsigned int avg_render_time_in_us_last_ten;
- unsigned int v_sync_time_in_us[MOD_STATS_NUM_VSYNCS];
- unsigned int num_vsync_between_flips;
-
- unsigned int flip_to_vsync_time_in_us;
- unsigned int vsync_to_flip_time_in_us;
-
- unsigned int min_window;
- unsigned int max_window;
- unsigned int v_total_min;
- unsigned int v_total_max;
- unsigned int event_triggers;
-
- unsigned int lfc_mid_point_in_us;
- unsigned int num_frames_inserted;
- unsigned int inserted_duration_in_us;
-
- unsigned int flags;
-};
-
-struct stats_event_cache {
- unsigned int entry_id;
- char event_string[MOD_STATS_EVENT_STRING_MAX];
-};
-
-struct core_stats {
- struct mod_stats public;
- struct dc *dc;
-
- bool enabled;
- unsigned int entries;
- unsigned int event_entries;
- unsigned int entry_id;
-
- struct stats_time_cache *time;
- unsigned int index;
-
- struct stats_event_cache *events;
- unsigned int event_index;
-
-};
-
-#define MOD_STATS_TO_CORE(mod_stats)\
- container_of(mod_stats, struct core_stats, public)
-
-bool mod_stats_init(struct mod_stats *mod_stats)
-{
- bool result = false;
- struct core_stats *core_stats = NULL;
- struct dc *dc = NULL;
-
- if (mod_stats == NULL)
- return false;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
- dc = core_stats->dc;
-
- return result;
-}
-
-struct mod_stats *mod_stats_create(struct dc *dc)
-{
- struct core_stats *core_stats = NULL;
- struct persistent_data_flag flag;
- unsigned int reg_data;
- int i = 0;
-
- if (dc == NULL)
- goto fail_construct;
-
- core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
-
- if (core_stats == NULL)
- goto fail_construct;
-
- core_stats->dc = dc;
-
- core_stats->enabled = DAL_STATS_ENABLE_REGKEY_DEFAULT;
- if (dm_read_persistent_data(dc->ctx, NULL, NULL,
- DAL_STATS_ENABLE_REGKEY,
- &reg_data, sizeof(unsigned int), &flag))
- core_stats->enabled = reg_data;
-
- if (core_stats->enabled) {
- core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
- if (dm_read_persistent_data(dc->ctx, NULL, NULL,
- DAL_STATS_ENTRIES_REGKEY,
- &reg_data, sizeof(unsigned int), &flag)) {
- if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
- core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
- else
- core_stats->entries = reg_data;
- }
- core_stats->time = kcalloc(core_stats->entries,
- sizeof(struct stats_time_cache),
- GFP_KERNEL);
-
- if (core_stats->time == NULL)
- goto fail_construct_time;
-
- core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT;
- core_stats->events = kcalloc(core_stats->event_entries,
- sizeof(struct stats_event_cache),
- GFP_KERNEL);
-
- if (core_stats->events == NULL)
- goto fail_construct_events;
-
- } else {
- core_stats->entries = 0;
- }
-
- /* Purposely leave index 0 unused so we don't need special logic to
- * handle calculation cases that depend on previous flip data.
- */
- core_stats->index = 1;
- core_stats->event_index = 0;
-
- // Keeps track of ordering within the different stats structures
- core_stats->entry_id = 0;
-
- return &core_stats->public;
-
-fail_construct_events:
- kfree(core_stats->time);
-
-fail_construct_time:
- kfree(core_stats);
-
-fail_construct:
- return NULL;
-}
-
-void mod_stats_destroy(struct mod_stats *mod_stats)
-{
- if (mod_stats != NULL) {
- struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- kfree(core_stats->time);
- kfree(core_stats->events);
- kfree(core_stats);
- }
-}
-
-void mod_stats_dump(struct mod_stats *mod_stats)
-{
- struct dc *dc = NULL;
- struct dal_logger *logger = NULL;
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- struct stats_event_cache *events = NULL;
- unsigned int time_index = 1;
- unsigned int event_index = 0;
- unsigned int index = 0;
- struct log_entry log_entry;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
- dc = core_stats->dc;
- logger = dc->ctx->logger;
- time = core_stats->time;
- events = core_stats->events;
-
- DISPLAY_STATS_BEGIN(log_entry);
-
- DISPLAY_STATS("==Display Caps==\n");
-
- DISPLAY_STATS("==Display Stats==\n");
-
- DISPLAY_STATS("%10s %10s %10s %10s %10s"
- " %11s %11s %17s %10s %14s"
- " %10s %10s %10s %10s %10s"
- " %10s %10s %10s %10s\n",
- "render", "avgRender",
- "minWindow", "midPoint", "maxWindow",
- "vsyncToFlip", "flipToVsync", "vsyncsBetweenFlip",
- "numFrame", "insertDuration",
- "vTotalMin", "vTotalMax", "eventTrigs",
- "vSyncTime1", "vSyncTime2", "vSyncTime3",
- "vSyncTime4", "vSyncTime5", "flags");
-
- for (int i = 0; i < core_stats->entry_id; i++) {
- if (event_index < core_stats->event_index &&
- i == events[event_index].entry_id) {
- DISPLAY_STATS("==Event==%s\n", events[event_index].event_string);
- event_index++;
- } else if (time_index < core_stats->index &&
- i == time[time_index].entry_id) {
- DISPLAY_STATS("%10u %10u %10u %10u %10u"
- " %11u %11u %17u %10u %14u"
- " %10u %10u %10u %10u %10u"
- " %10u %10u %10u %10u\n",
- time[time_index].render_time_in_us,
- time[time_index].avg_render_time_in_us_last_ten,
- time[time_index].min_window,
- time[time_index].lfc_mid_point_in_us,
- time[time_index].max_window,
- time[time_index].vsync_to_flip_time_in_us,
- time[time_index].flip_to_vsync_time_in_us,
- time[time_index].num_vsync_between_flips,
- time[time_index].num_frames_inserted,
- time[time_index].inserted_duration_in_us,
- time[time_index].v_total_min,
- time[time_index].v_total_max,
- time[time_index].event_triggers,
- time[time_index].v_sync_time_in_us[0],
- time[time_index].v_sync_time_in_us[1],
- time[time_index].v_sync_time_in_us[2],
- time[time_index].v_sync_time_in_us[3],
- time[time_index].v_sync_time_in_us[4],
- time[time_index].flags);
-
- time_index++;
- }
- }
-
- DISPLAY_STATS_END(log_entry);
-}
-
-void mod_stats_reset_data(struct mod_stats *mod_stats)
-{
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- unsigned int index = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- memset(core_stats->time, 0,
- sizeof(struct stats_time_cache) * core_stats->entries);
-
- memset(core_stats->events, 0,
- sizeof(struct stats_event_cache) * core_stats->event_entries);
-
- core_stats->index = 1;
- core_stats->event_index = 0;
-
- // Keeps track of ordering within the different stats structures
- core_stats->entry_id = 0;
-}
-
-void mod_stats_update_event(struct mod_stats *mod_stats,
- char *event_string,
- unsigned int length)
-{
- struct core_stats *core_stats = NULL;
- struct stats_event_cache *events = NULL;
- unsigned int index = 0;
- unsigned int copy_length = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- if (core_stats->event_index >= core_stats->event_entries)
- return;
-
- events = core_stats->events;
- index = core_stats->event_index;
-
- copy_length = length;
- if (length > MOD_STATS_EVENT_STRING_MAX)
- copy_length = MOD_STATS_EVENT_STRING_MAX;
-
- memcpy(&events[index].event_string, event_string, copy_length);
- events[index].event_string[copy_length - 1] = '\0';
-
- events[index].entry_id = core_stats->entry_id;
- core_stats->event_index++;
- core_stats->entry_id++;
-}
-
-void mod_stats_update_flip(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns)
-{
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- unsigned int index = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- if (core_stats->index >= core_stats->entries)
- return;
-
- time = core_stats->time;
- index = core_stats->index;
-
- time[index].flip_timestamp_in_ns = timestamp_in_ns;
- time[index].render_time_in_us =
- (timestamp_in_ns - time[index - 1].flip_timestamp_in_ns) / 1000;
-
- if (index >= 10) {
- for (unsigned int i = 0; i < 10; i++)
- time[index].avg_render_time_in_us_last_ten +=
- time[index - i].render_time_in_us;
- time[index].avg_render_time_in_us_last_ten /= 10;
- }
-
- if (time[index].num_vsync_between_flips > 0)
- time[index].vsync_to_flip_time_in_us =
- (timestamp_in_ns -
- time[index].vupdate_timestamp_in_ns) / 1000;
- else
- time[index].vsync_to_flip_time_in_us =
- (timestamp_in_ns -
- time[index - 1].vupdate_timestamp_in_ns) / 1000;
-
- time[index].entry_id = core_stats->entry_id;
- core_stats->index++;
- core_stats->entry_id++;
-}
-
-void mod_stats_update_vupdate(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns)
-{
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- unsigned int index = 0;
- unsigned int num_vsyncs = 0;
- unsigned int prev_vsync_in_ns = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- if (core_stats->index >= core_stats->entries)
- return;
-
- time = core_stats->time;
- index = core_stats->index;
- num_vsyncs = time[index].num_vsync_between_flips;
-
- if (num_vsyncs < MOD_STATS_NUM_VSYNCS) {
- if (num_vsyncs == 0) {
- prev_vsync_in_ns =
- time[index - 1].vupdate_timestamp_in_ns;
-
- time[index].flip_to_vsync_time_in_us =
- (timestamp_in_ns -
- time[index - 1].flip_timestamp_in_ns) /
- 1000;
- } else {
- prev_vsync_in_ns =
- time[index].vupdate_timestamp_in_ns;
- }
-
- time[index].v_sync_time_in_us[num_vsyncs] =
- (timestamp_in_ns - prev_vsync_in_ns) / 1000;
- }
-
- time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
- time[index].num_vsync_between_flips++;
-}
-
-void mod_stats_update_freesync(struct mod_stats *mod_stats,
- unsigned int v_total_min,
- unsigned int v_total_max,
- unsigned int event_triggers,
- unsigned int window_min,
- unsigned int window_max,
- unsigned int lfc_mid_point_in_us,
- unsigned int inserted_frames,
- unsigned int inserted_duration_in_us)
-{
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- unsigned int index = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- if (core_stats->index >= core_stats->entries)
- return;
-
- time = core_stats->time;
- index = core_stats->index;
-
- time[index].v_total_min = v_total_min;
- time[index].v_total_max = v_total_max;
- time[index].event_triggers = event_triggers;
- time[index].min_window = window_min;
- time[index].max_window = window_max;
- time[index].lfc_mid_point_in_us = lfc_mid_point_in_us;
- time[index].num_frames_inserted = inserted_frames;
- time[index].inserted_duration_in_us = inserted_duration_in_us;
-}
-
diff --git a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
index 00f132f8ad55..61ee4be35d27 100644
--- a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
+++ b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
@@ -112,9 +112,12 @@ uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb)
evict_vmids(core_vmid);
vmid = get_next_available_vmid(core_vmid);
- add_ptb_to_table(core_vmid, vmid, ptb);
+ if (vmid != -1) {
+ add_ptb_to_table(core_vmid, vmid, ptb);
- dc_setup_vm_context(core_vmid->dc, &va_config, vmid);
+ dc_setup_vm_context(core_vmid->dc, &va_config, vmid);
+ } else
+ ASSERT(0);
}
return vmid;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index d655a76bedc6..e98c84ef206f 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -40,6 +40,13 @@ enum amd_chip_flags {
AMD_EXP_HW_SUPPORT = 0x00080000UL,
};
+enum amd_apu_flags {
+ AMD_APU_IS_RAVEN = 0x00000001UL,
+ AMD_APU_IS_RAVEN2 = 0x00000002UL,
+ AMD_APU_IS_PICASSO = 0x00000004UL,
+ AMD_APU_IS_RENOIR = 0x00000008UL,
+};
+
enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_COMMON,
AMD_IP_BLOCK_TYPE_GMC,
@@ -150,6 +157,13 @@ enum DC_FEATURE_MASK {
DC_PSR_MASK = 0x8,
};
+enum DC_DEBUG_MASK {
+ DC_DISABLE_PIPE_SPLIT = 0x1,
+ DC_DISABLE_STUTTER = 0x2,
+ DC_DISABLE_DSC = 0x4,
+ DC_DISABLE_CLOCK_GATING = 0x8
+};
+
enum amd_dpm_forced_level;
/**
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
index e7db6f9f9c86..8b0b9a2a8fed 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
@@ -5599,6 +5599,7 @@
#define GRBM_PWR_CNTL__ALL_REQ_EN_MASK 0x00008000L
//GRBM_STATUS
#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT 0x5
#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x7
#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x8
#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x9
@@ -5619,6 +5620,7 @@
#define GRBM_STATUS__CB_BUSY__SHIFT 0x1e
#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x1f
#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS__RSMU_RQ_PENDING_MASK 0x00000020L
#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
@@ -5832,6 +5834,7 @@
#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
//GRBM_READ_ERROR2
#define GRBM_READ_ERROR2__READ_REQUESTER_CPF__SHIFT 0x10
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT 0x11
#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x12
#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x13
#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x14
@@ -5847,6 +5850,7 @@
#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x1e
#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x1f
#define GRBM_READ_ERROR2__READ_REQUESTER_CPF_MASK 0x00010000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK 0x00020000L
#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
index 68d0ffad28c7..92fd27c26a77 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
@@ -1162,8 +1162,10 @@
#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 0
#define mmRCC_CONFIG_RESERVED 0x0de4 // duplicate
#define mmRCC_CONFIG_RESERVED_BASE_IDX 0
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
#define mmRCC_IOV_FUNC_IDENTIFIER 0x0de5 // duplicate
#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 0
+#endif
// addressBlock: syshub_mmreg_ind_syshubdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
index 435462294fbc..a7cd760ebf8f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
@@ -4251,8 +4251,10 @@
#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2
#define mmRCC_CONFIG_RESERVED 0x00c4
#define mmRCC_CONFIG_RESERVED_BASE_IDX 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
#define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5
#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+#endif
// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
index ce5830ebe095..0c5a08bc034a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
@@ -2687,8 +2687,10 @@
#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2
#define mmRCC_CONFIG_RESERVED 0x00c4
#define mmRCC_CONFIG_RESERVED_BASE_IDX 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
#define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5
#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+#endif
// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h
new file mode 100644
index 000000000000..e87c359ea1fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _pwr_10_0_OFFSET_HEADER
+#define _pwr_10_0_OFFSET_HEADER
+
+#define mmPWR_MISC_CNTL_STATUS 0x0183
+#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h
new file mode 100644
index 000000000000..8a000c21651c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _pwr_10_0_SH_MASK_HEADER
+#define _pwr_10_0_SH_MASK_HEADER
+
+//PWR_MISC_CNTL_STATUS
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h
new file mode 100644
index 000000000000..9bf73284ad73
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_12_0_0_OFFSET_HEADER
+#define _smuio_12_0_0_OFFSET_HEADER
+
+#define mmSMUIO_GFX_MISC_CNTL 0x00c8
+#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
+
+#define mmPWR_MISC_CNTL_STATUS 0x0183
+#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h
new file mode 100644
index 000000000000..26556fa3d054
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_12_0_0_SH_MASK_HEADER
+#define _smuio_12_0_0_SH_MASK_HEADER
+
+//SMUIO_GFX_MISC_CNTL
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
+//PWR_MISC_CNTL_STATUS
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 70146518174c..b36ea8340afa 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -972,11 +972,13 @@ struct atom_ext_display_path
};
//usCaps
-enum ext_display_path_cap_def
-{
- EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE =0x0001,
- EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN =0x0002,
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK =0x007C,
+enum ext_display_path_cap_def {
+ EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE = 0x0001,
+ EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = 0x0002,
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007C,
+ EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x01 << 2), //PI redriver chip
+ EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x02 << 2), //TI retimer chip
+ EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 = (0x03 << 2) //Parade DP->HDMI recoverter chip
};
struct atom_external_display_connection_info
@@ -1876,6 +1878,108 @@ struct atom_smc_dpm_info_v4_6
uint32_t boardreserved[10];
};
+struct atom_smc_dpm_info_v4_7
+{
+ struct atom_common_table_header table_header;
+ // SECTION: BOARD PARAMETERS
+ // I2C Control
+ struct smudpm_i2c_controller_config_v2 I2cControllers[8];
+
+ // SVI2 Board Parameters
+ uint16_t MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+ uint16_t MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+
+ uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields
+
+ uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
+ uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
+ uint8_t ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN)
+ uint8_t Padding8_V;
+
+ // Telemetry Settings
+ uint16_t GfxMaxCurrent; // in Amps
+ uint8_t GfxOffset; // in Amps
+ uint8_t Padding_TelemetryGfx;
+ uint16_t SocMaxCurrent; // in Amps
+ uint8_t SocOffset; // in Amps
+ uint8_t Padding_TelemetrySoc;
+
+ uint16_t Mem0MaxCurrent; // in Amps
+ uint8_t Mem0Offset; // in Amps
+ uint8_t Padding_TelemetryMem0;
+
+ uint16_t Mem1MaxCurrent; // in Amps
+ uint8_t Mem1Offset; // in Amps
+ uint8_t Padding_TelemetryMem1;
+
+ // GPIO Settings
+ uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching
+ uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching
+ uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event
+ uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event
+
+ uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event
+ uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event
+ uint8_t GthrGpio; // GPIO pin configured for GTHR Event
+ uint8_t GthrPolarity; // replace GPIO polarity for GTHR
+
+ // LED Display Settings
+ uint8_t LedPin0; // GPIO number for LedPin[0]
+ uint8_t LedPin1; // GPIO number for LedPin[1]
+ uint8_t LedPin2; // GPIO number for LedPin[2]
+ uint8_t padding8_4;
+
+ // GFXCLK PLL Spread Spectrum
+ uint8_t PllGfxclkSpreadEnabled; // on or off
+ uint8_t PllGfxclkSpreadPercent; // Q4.4
+ uint16_t PllGfxclkSpreadFreq; // kHz
+
+ // GFXCLK DFLL Spread Spectrum
+ uint8_t DfllGfxclkSpreadEnabled; // on or off
+ uint8_t DfllGfxclkSpreadPercent; // Q4.4
+ uint16_t DfllGfxclkSpreadFreq; // kHz
+
+ // UCLK Spread Spectrum
+ uint8_t UclkSpreadEnabled; // on or off
+ uint8_t UclkSpreadPercent; // Q4.4
+ uint16_t UclkSpreadFreq; // kHz
+
+ // SOCCLK Spread Spectrum
+ uint8_t SoclkSpreadEnabled; // on or off
+ uint8_t SocclkSpreadPercent; // Q4.4
+ uint16_t SocclkSpreadFreq; // kHz
+
+ // Total board power
+ uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
+ uint16_t BoardPadding;
+
+ // Mvdd Svi2 Div Ratio Setting
+ uint32_t MvddRatio; // This is used for MVDD Vid workaround. It has 16 fractional bits (Q16.16)
+
+ // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence
+ uint8_t GpioI2cScl; // Serial Clock
+ uint8_t GpioI2cSda; // Serial Data
+ uint16_t GpioPadding;
+
+ // Additional LED Display Settings
+ uint8_t LedPin3; // GPIO number for LedPin[3] - PCIE GEN Speed
+ uint8_t LedPin4; // GPIO number for LedPin[4] - PMFW Error Status
+ uint16_t LedEnableMask;
+
+ // Power Limit Scalars
+ uint8_t PowerLimitScalar[4]; //[PPT_THROTTLER_COUNT]
+
+ uint8_t MvddUlvPhaseSheddingMask;
+ uint8_t VddciUlvPhaseSheddingMask;
+ uint8_t Padding8_Psi1;
+ uint8_t Padding8_Psi2;
+
+ uint32_t BoardReserved[5];
+};
+
/*
***************************************************************************
Data Table asic_profiling_info structure
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index a69deb3a2ac0..60a6536ff656 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -32,7 +32,6 @@ struct cgs_device;
* enum cgs_ind_reg - Indirect register spaces
*/
enum cgs_ind_reg {
- CGS_IND_REG__MMIO,
CGS_IND_REG__PCIE,
CGS_IND_REG__SMC,
CGS_IND_REG__UVD_CTX,
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 8e2acb4df860..7e6dcdf7df73 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -50,6 +50,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->device = amdgpu_cgs_create_device(adev);
mutex_init(&hwmgr->smu_lock);
+ mutex_init(&hwmgr->msg_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
hwmgr->feature_mask = adev->pm.pp_feature;
@@ -64,6 +65,8 @@ static void amd_powerplay_destroy(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ mutex_destroy(&hwmgr->msg_lock);
+
kfree(hwmgr->hardcode_pp_table);
hwmgr->hardcode_pp_table = NULL;
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index e77046931e4c..8c684a6e0156 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -62,6 +62,7 @@ const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
{
+ struct amdgpu_device *adev = smu->adev;
size_t size = 0;
int ret = 0, i = 0;
uint32_t feature_mask[2] = { 0 };
@@ -70,6 +71,9 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
uint32_t sort_feature[SMU_FEATURE_COUNT];
uint64_t hw_feature_count = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
@@ -110,9 +114,6 @@ static int smu_feature_update_enable_state(struct smu_context *smu,
uint32_t feature_low = 0, feature_high = 0;
int ret = 0;
- if (!smu->pm_enabled)
- return ret;
-
feature_low = (feature_mask >> 0 ) & 0xffffffff;
feature_high = (feature_mask >> 32) & 0xffffffff;
@@ -155,6 +156,10 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
uint64_t feature_2_enabled = 0;
uint64_t feature_2_disabled = 0;
uint64_t feature_enables = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
mutex_lock(&smu->mutex);
@@ -191,16 +196,31 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
if (!if_version && !smu_version)
return -EINVAL;
+ if (smu->smc_fw_if_version && smu->smc_fw_version)
+ {
+ if (if_version)
+ *if_version = smu->smc_fw_if_version;
+
+ if (smu_version)
+ *smu_version = smu->smc_fw_version;
+
+ return 0;
+ }
+
if (if_version) {
ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
if (ret)
return ret;
+
+ smu->smc_fw_if_version = *if_version;
}
if (smu_version) {
ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
if (ret)
return ret;
+
+ smu->smc_fw_version = *smu_version;
}
return ret;
@@ -327,13 +347,13 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
- param, &param);
+ param, value);
if (ret)
return ret;
/* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
* now, we un-support it */
- *value = param & 0x7fffffff;
+ *value = *value & 0x7fffffff;
return ret;
}
@@ -417,8 +437,12 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
ret = smu_dpm_set_uvd_enable(smu, !gate);
@@ -511,7 +535,6 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
int table_id = smu_table_get_index(smu, table_index);
uint32_t table_size;
int ret = 0;
-
if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
@@ -547,12 +570,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20)
return (amdgpu_dpm == 2) ? true : false;
else if (adev->asic_type >= CHIP_ARCTURUS) {
- if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
- return false;
- else
+ if (amdgpu_sriov_is_pp_one_vf(adev) || !amdgpu_sriov_vf(adev))
return true;
- } else
- return false;
+ }
+ return false;
}
bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
@@ -569,8 +590,12 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
uint32_t powerplay_table_size;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
@@ -591,11 +616,13 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table)
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
int ret = 0;
- if (!smu->pm_enabled)
+ if (!adev->pm.dpm_enabled)
return -EINVAL;
+
if (header->usStructureSize != size) {
pr_err("pp table size not matched !\n");
return -EIO;
@@ -636,8 +663,6 @@ int smu_feature_init_dpm(struct smu_context *smu)
int ret = 0;
uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
- if (!smu->pm_enabled)
- return ret;
mutex_lock(&feature->mutex);
bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
mutex_unlock(&feature->mutex);
@@ -665,7 +690,6 @@ int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
if (smu->is_apu)
return 1;
-
feature_id = smu_feature_get_index(smu, mask);
if (feature_id < 0)
return 0;
@@ -932,13 +956,6 @@ static int smu_sw_init(void *handle)
return ret;
}
- if (adev->smu.ppt_funcs->i2c_eeprom_init) {
- ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
-
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -948,9 +965,6 @@ static int smu_sw_fini(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
- if (adev->smu.ppt_funcs->i2c_eeprom_fini)
- smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
-
kfree(smu->irq_source);
smu->irq_source = NULL;
@@ -1323,6 +1337,9 @@ static int smu_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
ret = smu_start_smc_engine(smu);
if (ret) {
pr_err("SMU is not ready yet!\n");
@@ -1336,9 +1353,6 @@ static int smu_hw_init(void *handle)
smu_set_gfx_cgpg(&adev->smu, true);
}
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
if (!smu->pm_enabled)
return 0;
@@ -1366,10 +1380,11 @@ static int smu_hw_init(void *handle)
if (ret)
goto failed;
- if (!smu->pm_enabled)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
+ ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+ if (ret)
+ goto failed;
+
+ adev->pm.dpm_enabled = true;
pr_info("SMU is initialized successfully!\n");
@@ -1381,6 +1396,9 @@ failed:
static int smu_stop_dpms(struct smu_context *smu)
{
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
return smu_system_features_control(smu, false);
}
@@ -1403,6 +1421,10 @@ static int smu_hw_fini(void *handle)
if (!smu->pm_enabled)
return 0;
+ adev->pm.dpm_enabled = false;
+
+ smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
if (!amdgpu_sriov_vf(adev)){
ret = smu_stop_thermal_control(smu);
if (ret) {
@@ -1542,6 +1564,10 @@ static int smu_suspend(void *handle)
if (!smu->pm_enabled)
return 0;
+ adev->pm.dpm_enabled = false;
+
+ smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
if(!amdgpu_sriov_vf(adev)) {
ret = smu_disable_dpm(smu);
if (ret)
@@ -1587,11 +1613,17 @@ static int smu_resume(void *handle)
if (ret)
goto failed;
+ ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+ if (ret)
+ goto failed;
+
if (smu->is_apu)
smu_set_gfx_cgpg(&adev->smu, true);
smu->disable_uclk_switch = 0;
+ adev->pm.dpm_enabled = true;
+
pr_info("SMU is resumed successfully!\n");
return 0;
@@ -1603,10 +1635,14 @@ failed:
int smu_display_configuration_change(struct smu_context *smu,
const struct amd_pp_display_configuration *display_config)
{
+ struct amdgpu_device *adev = smu->adev;
int index = 0;
int num_of_active_display = 0;
- if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
+ if (!is_support_sw_smu(smu->adev))
return -EINVAL;
if (!display_config)
@@ -1668,12 +1704,16 @@ int smu_get_current_clocks(struct smu_context *smu,
struct amd_pp_clock_info *clocks)
{
struct amd_pp_simple_clock_info simple_clocks = {0};
+ struct amdgpu_device *adev = smu->adev;
struct smu_clock_info hw_clocks;
int ret = 0;
if (!is_support_sw_smu(smu->adev))
return -EINVAL;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
smu_get_dal_power_level(smu, &simple_clocks);
@@ -1736,7 +1776,7 @@ static int smu_enable_umd_pstate(void *handle,
struct smu_context *smu = (struct smu_context*)(handle);
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1778,9 +1818,6 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
long workload;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->pm_enabled)
- return -EINVAL;
-
if (!skip_display_settings) {
ret = smu_display_config_changed(smu);
if (ret) {
@@ -1831,8 +1868,12 @@ int smu_handle_task(struct smu_context *smu,
enum amd_pp_task task_id,
bool lock_needed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (lock_needed)
mutex_lock(&smu->mutex);
@@ -1866,10 +1907,11 @@ int smu_switch_power_profile(struct smu_context *smu,
bool en)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
long workload;
uint32_t index;
- if (!smu->pm_enabled)
+ if (!adev->pm.dpm_enabled)
return -EINVAL;
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
@@ -1900,8 +1942,12 @@ int smu_switch_power_profile(struct smu_context *smu,
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
enum amd_dpm_forced_level level;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
@@ -1915,8 +1961,12 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
@@ -1939,8 +1989,12 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
int smu_set_display_count(struct smu_context *smu, uint32_t count)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
ret = smu_init_display_count(smu, count);
mutex_unlock(&smu->mutex);
@@ -1954,8 +2008,12 @@ int smu_force_clk_levels(struct smu_context *smu,
bool lock_needed)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
pr_debug("force clock level is for dpm manual mode only.\n");
return -EINVAL;
@@ -1973,20 +2031,19 @@ int smu_force_clk_levels(struct smu_context *smu,
return ret;
}
+/*
+ * On system suspending or resetting, the dpm_enabled
+ * flag will be cleared. So that those SMU services which
+ * are not supported will be gated.
+ * However, the mp1 state setting should still be granted
+ * even if the dpm_enabled cleared.
+ */
int smu_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
uint16_t msg;
int ret;
- /*
- * The SMC is not fully ready. That may be
- * expected as the IP may be masked.
- * So, just return without error.
- */
- if (!smu->pm_enabled)
- return 0;
-
mutex_lock(&smu->mutex);
switch (mp1_state) {
@@ -2023,15 +2080,11 @@ int smu_set_mp1_state(struct smu_context *smu,
int smu_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
- /*
- * The SMC is not fully ready. That may be
- * expected as the IP may be masked.
- * So, just return without error.
- */
- if (!smu->pm_enabled)
- return 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
return 0;
@@ -2047,6 +2100,28 @@ int smu_set_df_cstate(struct smu_context *smu,
return ret;
}
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
+ if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
+ if (ret)
+ pr_err("[AllowXgmiPowerDown] failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
int smu_write_watermarks_table(struct smu_context *smu)
{
void *watermarks_table = smu->smu_table.watermarks_table;
@@ -2065,6 +2140,10 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
{
void *table = smu->smu_table.watermarks_table;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
if (!table)
return -EINVAL;
@@ -2089,8 +2168,12 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
int smu_set_ac_dc(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
/* controlled by firmware */
if (smu->dc_controlled_by_gpio)
return 0;
@@ -2149,8 +2232,12 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
int smu_load_microcode(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->load_microcode)
@@ -2163,8 +2250,12 @@ int smu_load_microcode(struct smu_context *smu)
int smu_check_fw_status(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->check_fw_status)
@@ -2191,8 +2282,12 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_fan_speed_rpm)
@@ -2208,10 +2303,15 @@ int smu_get_power_limit(struct smu_context *smu,
bool def,
bool lock_needed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (lock_needed)
+ if (lock_needed) {
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
+ }
if (smu->ppt_funcs->get_power_limit)
ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
@@ -2224,8 +2324,12 @@ int smu_get_power_limit(struct smu_context *smu,
int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_power_limit)
@@ -2238,8 +2342,12 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->print_clk_levels)
@@ -2252,8 +2360,12 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch
int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_od_percentage)
@@ -2266,8 +2378,12 @@ int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_od_percentage)
@@ -2282,8 +2398,12 @@ int smu_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->od_edit_dpm_table)
@@ -2298,8 +2418,12 @@ int smu_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->read_sensor)
@@ -2312,8 +2436,12 @@ int smu_read_sensor(struct smu_context *smu,
int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_power_profile_mode)
@@ -2329,8 +2457,12 @@ int smu_set_power_profile_mode(struct smu_context *smu,
uint32_t param_size,
bool lock_needed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (lock_needed)
mutex_lock(&smu->mutex);
@@ -2346,8 +2478,12 @@ int smu_set_power_profile_mode(struct smu_context *smu,
int smu_get_fan_control_mode(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_fan_control_mode)
@@ -2360,8 +2496,12 @@ int smu_get_fan_control_mode(struct smu_context *smu)
int smu_set_fan_control_mode(struct smu_context *smu, int value)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_fan_control_mode)
@@ -2374,8 +2514,12 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_fan_speed_percent)
@@ -2388,8 +2532,12 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_fan_speed_percent)
@@ -2402,8 +2550,12 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_fan_speed_rpm)
@@ -2416,8 +2568,12 @@ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_deep_sleep_dcefclk)
@@ -2430,8 +2586,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (smu->ppt_funcs->set_active_display_count)
ret = smu->ppt_funcs->set_active_display_count(smu, count);
@@ -2442,8 +2602,12 @@ int smu_get_clock_by_type(struct smu_context *smu,
enum amd_pp_clock_type type,
struct amd_pp_clocks *clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_clock_by_type)
@@ -2457,8 +2621,12 @@ int smu_get_clock_by_type(struct smu_context *smu,
int smu_get_max_high_clocks(struct smu_context *smu,
struct amd_pp_simple_clock_info *clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_max_high_clocks)
@@ -2473,8 +2641,12 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu,
enum smu_clk_type clk_type,
struct pp_clock_levels_with_latency *clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_clock_by_type_with_latency)
@@ -2489,8 +2661,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_clock_by_type_with_voltage)
@@ -2505,8 +2681,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
int smu_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request *clock_req)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->display_clock_voltage_request)
@@ -2520,8 +2700,12 @@ int smu_display_clock_voltage_request(struct smu_context *smu,
int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = -EINVAL;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->display_disable_memory_clock_switch)
@@ -2534,8 +2718,12 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl
int smu_notify_smu_enable_pwe(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->notify_smu_enable_pwe)
@@ -2549,8 +2737,12 @@ int smu_notify_smu_enable_pwe(struct smu_context *smu)
int smu_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_xgmi_pstate)
@@ -2563,8 +2755,12 @@ int smu_set_xgmi_pstate(struct smu_context *smu,
int smu_set_azalia_d3_pme(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_azalia_d3_pme)
@@ -2575,6 +2771,14 @@ int smu_set_azalia_d3_pme(struct smu_context *smu)
return ret;
}
+/*
+ * On system suspending or resetting, the dpm_enabled
+ * flag will be cleared. So that those SMU services which
+ * are not supported will be gated.
+ *
+ * However, the baco/mode1 reset should still be granted
+ * as they are still supported and necessary.
+ */
bool smu_baco_is_support(struct smu_context *smu)
{
bool ret = false;
@@ -2646,8 +2850,12 @@ int smu_mode2_reset(struct smu_context *smu)
int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
@@ -2662,8 +2870,12 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
unsigned int *clock_values_in_khz,
unsigned int *num_states)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_uclk_dpm_states)
@@ -2677,6 +2889,10 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
{
enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
mutex_lock(&smu->mutex);
@@ -2691,8 +2907,12 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
int smu_get_dpm_clock_table(struct smu_context *smu,
struct dpm_clocks *clock_table)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_dpm_clock_table)
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 1ef0923f7190..27c5fc9572b2 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -128,6 +128,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(SetXgmiMode, PPSMC_MSG_SetXgmiMode),
MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable),
MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl),
+ MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
@@ -622,6 +623,9 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct arcturus_dpm_table *dpm_table = NULL;
+ if (amdgpu_ras_intr_triggered())
+ return snprintf(buf, PAGE_SIZE, "unavailable\n");
+
dpm_table = smu_dpm->dpm_context;
switch (type) {
@@ -997,6 +1001,9 @@ static int arcturus_read_sensor(struct smu_context *smu,
PPTable_t *pptable = table_context->driver_pptable;
int ret = 0;
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
if (!data || !size)
return -EINVAL;
@@ -2226,12 +2233,8 @@ static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
- struct smu_context *smu = &adev->smu;
int res;
- if (!smu->pm_enabled)
- return -EOPNOTSUPP;
-
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
@@ -2247,12 +2250,6 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- struct smu_context *smu = &adev->smu;
-
- if (!smu->pm_enabled)
- return;
-
i2c_del_adapter(control);
}
@@ -2261,7 +2258,7 @@ static bool arcturus_is_baco_supported(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t val;
- if (!smu_v11_0_baco_is_support(smu))
+ if (!smu_v11_0_baco_is_support(smu) || amdgpu_sriov_vf(adev))
return false;
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
@@ -2296,6 +2293,35 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
+static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_GmiPwrDnControl is supported by 54.23.0 and onwards */
+ if (smu_version < 0x00361700) {
+ pr_err("XGMI power down control is only supported by PMFW 54.23.0 and onwards\n");
+ return -EINVAL;
+ }
+
+ if (en)
+ return smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ 1,
+ NULL);
+
+ return smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ 0,
+ NULL);
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2389,6 +2415,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
.get_pptable_power_limit = arcturus_get_pptable_power_limit,
.set_df_cstate = arcturus_set_df_cstate,
+ .allow_xgmi_power_down = arcturus_allow_xgmi_power_down,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 689072a312a7..c9cfe90a2947 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -36,6 +36,8 @@
#include "power_state.h"
#include "soc15_common.h"
#include "smu10.h"
+#include "asic_reg/pwr/pwr_10_0_offset.h"
+#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
@@ -43,13 +45,6 @@
#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
#define SMC_RAM_END 0x40000
-#define mmPWR_MISC_CNTL_STATUS 0x0183
-#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
-
static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
@@ -81,7 +76,7 @@ static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
return -EINVAL;
}
- smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
+ smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq, NULL);
return 0;
}
@@ -214,7 +209,8 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo
smu10_data->deep_sleep_dcefclk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- smu10_data->deep_sleep_dcefclk);
+ smu10_data->deep_sleep_dcefclk,
+ NULL);
}
return 0;
}
@@ -228,7 +224,8 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c
smu10_data->dcf_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinDcefclkByFreq,
- smu10_data->dcf_actual_hard_min_freq);
+ smu10_data->dcf_actual_hard_min_freq,
+ NULL);
}
return 0;
}
@@ -242,7 +239,8 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
smu10_data->f_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- smu10_data->f_actual_hard_min_freq);
+ smu10_data->f_actual_hard_min_freq,
+ NULL);
}
return 0;
}
@@ -255,7 +253,8 @@ static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count
smu10_data->num_active_display = count;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplayCount,
- smu10_data->num_active_display);
+ smu10_data->num_active_display,
+ NULL);
}
return 0;
@@ -278,7 +277,8 @@ static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetGfxCGPG,
- true);
+ true,
+ NULL);
else
return 0;
}
@@ -324,7 +324,7 @@ static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff, NULL);
/* confirm gfx is back to "on" state */
while (!smu10_is_gfx_on(hwmgr))
@@ -344,7 +344,7 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff, NULL);
return 0;
}
@@ -410,12 +410,10 @@ static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
struct smu10_voltage_dependency_table **pptable,
uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
{
- uint32_t table_size, i;
+ uint32_t i;
struct smu10_voltage_dependency_table *ptable;
- table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
- ptable = kzalloc(table_size, GFP_KERNEL);
-
+ ptable = kzalloc(struct_size(ptable, entries, num_entry), GFP_KERNEL);
if (NULL == ptable)
return -ENOMEM;
@@ -479,12 +477,10 @@ static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
- result = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &result);
smu10_data->gfx_min_freq_limit = result / 10 * 1000;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
- result = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &result);
smu10_data->gfx_max_freq_limit = result / 10 * 1000;
return 0;
@@ -588,116 +584,148 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- data->gfx_max_freq_limit/100);
+ data->gfx_max_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- SMU10_UMD_PSTATE_PEAK_FCLK);
+ SMU10_UMD_PSTATE_PEAK_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- SMU10_UMD_PSTATE_PEAK_SOCCLK);
+ SMU10_UMD_PSTATE_PEAK_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- data->gfx_max_freq_limit/100);
+ data->gfx_max_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- SMU10_UMD_PSTATE_PEAK_FCLK);
+ SMU10_UMD_PSTATE_PEAK_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- SMU10_UMD_PSTATE_PEAK_SOCCLK);
+ SMU10_UMD_PSTATE_PEAK_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- min_sclk);
+ min_sclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- min_sclk);
+ min_sclk,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- min_mclk);
+ min_mclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- min_mclk);
+ min_mclk,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- SMU10_UMD_PSTATE_GFXCLK);
+ SMU10_UMD_PSTATE_GFXCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- SMU10_UMD_PSTATE_FCLK);
+ SMU10_UMD_PSTATE_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- SMU10_UMD_PSTATE_SOCCLK);
+ SMU10_UMD_PSTATE_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- SMU10_UMD_PSTATE_GFXCLK);
+ SMU10_UMD_PSTATE_GFXCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- SMU10_UMD_PSTATE_FCLK);
+ SMU10_UMD_PSTATE_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- SMU10_UMD_PSTATE_SOCCLK);
+ SMU10_UMD_PSTATE_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- min_sclk);
+ min_sclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
hwmgr->display_config->num_display > 3 ?
SMU10_UMD_PSTATE_PEAK_FCLK :
- min_mclk);
+ min_mclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- SMU10_UMD_PSTATE_MIN_SOCCLK);
+ SMU10_UMD_PSTATE_MIN_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- SMU10_UMD_PSTATE_MIN_VCE);
+ SMU10_UMD_PSTATE_MIN_VCE,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- data->gfx_max_freq_limit/100);
+ data->gfx_max_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- SMU10_UMD_PSTATE_PEAK_FCLK);
+ SMU10_UMD_PSTATE_PEAK_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- SMU10_UMD_PSTATE_PEAK_SOCCLK);
+ SMU10_UMD_PSTATE_PEAK_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- data->gfx_min_freq_limit/100);
+ data->gfx_min_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- data->gfx_min_freq_limit/100);
+ data->gfx_min_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- min_mclk);
+ min_mclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- min_mclk);
+ min_mclk,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -849,13 +877,15 @@ static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
low == 2 ? data->gfx_max_freq_limit/100 :
low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
- data->gfx_min_freq_limit/100);
+ data->gfx_min_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
high == 0 ? data->gfx_min_freq_limit/100 :
high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
- data->gfx_max_freq_limit/100);
+ data->gfx_max_freq_limit/100,
+ NULL);
break;
case PP_MCLK:
@@ -864,11 +894,13 @@ static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- mclk_table->entries[low].clk/100);
+ mclk_table->entries[low].clk/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- mclk_table->entries[high].clk/100);
+ mclk_table->entries[high].clk/100,
+ NULL);
break;
case PP_PCIE:
@@ -888,8 +920,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
if (now == data->gfx_max_freq_limit/100)
@@ -910,8 +941,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
i == 2 ? "*" : "");
break;
case PP_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
for (i = 0; i < mclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -1122,15 +1152,13 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
- sclk = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk);
/* in units of 10KHZ */
*((uint32_t *)value) = sclk * 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
- mclk = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk);
/* in units of 10KHZ */
*((uint32_t *)value) = mclk * 100;
*size = 4;
@@ -1166,20 +1194,20 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister, NULL);
}
static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub, NULL);
}
static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
{
if (gate)
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma, NULL);
else
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma, NULL);
}
static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
@@ -1191,11 +1219,11 @@ static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PowerDownVcn, 0);
+ PPSMC_MSG_PowerDownVcn, 0, NULL);
smu10_data->vcn_power_gated = true;
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PowerUpVcn, 0);
+ PPSMC_MSG_PowerUpVcn, 0, NULL);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
@@ -1274,8 +1302,7 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- if ((adev->asic_type == CHIP_RAVEN) &&
- (adev->rev_id != 0x15d8) &&
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
(hwmgr->smu_version >= 0x41e2b))
return true;
else
@@ -1304,7 +1331,8 @@ static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uin
hwmgr->gfxoff_state_changed_by_workload = true;
}
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
if (!result)
hwmgr->power_profile_mode = input[size];
if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
@@ -1319,13 +1347,13 @@ static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mod
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DeviceDriverReset,
- mode);
+ mode,
+ NULL);
}
static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.backend_init = smu10_hwmgr_backend_init,
.backend_fini = smu10_hwmgr_backend_fini,
- .asic_setup = NULL,
.apply_state_adjust_rules = smu10_apply_state_adjust_rules,
.force_dpm_level = smu10_dpm_force_dpm_level,
.get_power_state_size = smu10_get_power_state_size,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
index 1fb296a996f3..0f969de10fab 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -192,7 +192,7 @@ struct smu10_clock_voltage_dependency_record {
struct smu10_voltage_dependency_table {
uint32_t count;
- struct smu10_clock_voltage_dependency_record entries[1];
+ struct smu10_clock_voltage_dependency_record entries[];
};
struct smu10_clock_voltage_information {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 683b29a99366..f2bda3bcbbde 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -29,14 +29,16 @@ static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_UVDDPM_Enable :
- PPSMC_MSG_UVDDPM_Disable);
+ PPSMC_MSG_UVDDPM_Disable,
+ NULL);
}
static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_VCEDPM_Enable :
- PPSMC_MSG_VCEDPM_Disable);
+ PPSMC_MSG_VCEDPM_Disable,
+ NULL);
}
static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
@@ -57,7 +59,8 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_UVDPowerOFF);
+ PPSMC_MSG_UVDPowerOFF,
+ NULL);
return 0;
}
@@ -67,10 +70,10 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDynamicPowerGating)) {
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_UVDPowerON, 1);
+ PPSMC_MSG_UVDPowerON, 1, NULL);
} else {
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_UVDPowerON, 0);
+ PPSMC_MSG_UVDPowerON, 0, NULL);
}
}
@@ -81,7 +84,8 @@ static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_VCEPowerOFF);
+ PPSMC_MSG_VCEPowerOFF,
+ NULL);
return 0;
}
@@ -89,7 +93,8 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_VCEPowerON);
+ PPSMC_MSG_VCEPowerON,
+ NULL);
return 0;
}
@@ -181,7 +186,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -191,7 +196,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -204,7 +209,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_3DCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
@@ -215,7 +220,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_3DLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -228,7 +233,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_RLC_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -241,7 +246,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CP_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -255,7 +260,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
CG_GFX_OTHERS_MGCG_MASK);
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -275,7 +280,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_BIF_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -285,7 +290,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_BIF_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -298,7 +303,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_MC_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
@@ -309,7 +314,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_MC_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -322,7 +327,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_DRM_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -332,7 +337,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_DRM_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -345,7 +350,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_HDP_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
@@ -356,7 +361,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_HDP_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -369,7 +374,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_SDMA_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
@@ -380,7 +385,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_SDMA_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -393,7 +398,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_ROM_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -423,8 +428,10 @@ int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
if (enable)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GFX_CU_PG_ENABLE,
- adev->gfx.cu_info.number);
+ adev->gfx.cu_info.number,
+ NULL);
else
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GFX_CU_PG_DISABLE);
+ PPSMC_MSG_GFX_CU_PG_DISABLE,
+ NULL);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 4795eb66b2b2..753cb2cf6b77 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -186,7 +186,7 @@ static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
}
if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
return 0;
}
@@ -493,7 +493,7 @@ static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
}
/**
@@ -979,7 +979,8 @@ static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_EnableVRHotGPIOInterrupt);
+ PPSMC_MSG_EnableVRHotGPIOInterrupt,
+ NULL);
return 0;
}
@@ -996,7 +997,7 @@ static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->ulv_supported)
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
return 0;
}
@@ -1006,7 +1007,7 @@ static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->ulv_supported)
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
return 0;
}
@@ -1015,13 +1016,14 @@ static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+ if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
PP_ASSERT_WITH_CODE(false,
"Attempt to enable Master Deep Sleep switch failed!",
return -EINVAL);
} else {
if (smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PPSMC_MSG_MASTER_DeepSleep_OFF,
+ NULL)) {
PP_ASSERT_WITH_CODE(false,
"Attempt to disable Master Deep Sleep switch failed!",
return -EINVAL);
@@ -1036,7 +1038,8 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
if (smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PPSMC_MSG_MASTER_DeepSleep_OFF,
+ NULL)) {
PP_ASSERT_WITH_CODE(false,
"Attempt to disable Master Deep Sleep switch failed!",
return -EINVAL);
@@ -1089,7 +1092,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
smu7_disable_sclk_vce_handshake(hwmgr);
PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
+ (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
"Failed to enable SCLK DPM during DPM Start Function!",
return -EINVAL);
}
@@ -1101,7 +1104,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MCLKDPM_Enable)),
+ PPSMC_MSG_MCLKDPM_Enable,
+ NULL)),
"Failed to enable MCLK DPM during DPM Start Function!",
return -EINVAL);
@@ -1172,7 +1176,8 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
if (0 == data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PCIeDPM_Enable)),
+ PPSMC_MSG_PCIeDPM_Enable,
+ NULL)),
"Failed to enable pcie DPM during DPM Start Function!",
return -EINVAL);
}
@@ -1180,7 +1185,8 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_Falcon_QuickTransition)) {
PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_EnableACDCGPIOInterrupt)),
+ PPSMC_MSG_EnableACDCGPIOInterrupt,
+ NULL)),
"Failed to enable AC DC GPIO Interrupt!",
);
}
@@ -1197,7 +1203,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to disable SCLK DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
}
/* disable MCLK dpm */
@@ -1205,7 +1211,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to disable MCLK DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
}
return 0;
@@ -1226,7 +1232,8 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
if (!data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(
(smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PCIeDPM_Disable) == 0),
+ PPSMC_MSG_PCIeDPM_Disable,
+ NULL) == 0),
"Failed to disable pcie DPM during DPM Stop Function!",
return -EINVAL);
}
@@ -1237,7 +1244,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
"Trying to disable voltage DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
return 0;
}
@@ -1388,7 +1395,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
- smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
+ smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
tmp_result = smu7_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1446,14 +1453,14 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr, PPSMC_MSG_EnableAvfs),
+ hwmgr, PPSMC_MSG_EnableAvfs, NULL),
"Failed to enable AVFS!",
return -EINVAL);
}
} else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr, PPSMC_MSG_DisableAvfs),
+ hwmgr, PPSMC_MSG_DisableAvfs, NULL),
"Failed to disable AVFS!",
return -EINVAL);
}
@@ -2609,7 +2616,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
if (level)
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PCIeDPM_ForceLevel, level);
+ PPSMC_MSG_PCIeDPM_ForceLevel, level,
+ NULL);
}
}
@@ -2623,7 +2631,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
if (level)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
+ (1 << level),
+ NULL);
}
}
@@ -2637,7 +2646,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
if (level)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
+ (1 << level),
+ NULL);
}
}
@@ -2656,14 +2666,16 @@ static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask,
+ NULL);
}
if (!data->mclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask,
+ NULL);
}
return 0;
@@ -2678,7 +2690,8 @@ static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
if (!data->pcie_dpm_key_disabled) {
smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PCIeDPM_UnForceLevel);
+ PPSMC_MSG_PCIeDPM_UnForceLevel,
+ NULL);
}
return smu7_upload_dpm_level_enable_mask(hwmgr);
@@ -2696,7 +2709,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_level_enable_mask.sclk_dpm_enable_mask);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
+ (1 << level),
+ NULL);
}
@@ -2706,7 +2720,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_level_enable_mask.mclk_dpm_enable_mask);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
+ (1 << level),
+ NULL);
}
}
@@ -2716,7 +2731,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_level_enable_mask.pcie_dpm_enable_mask);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
- (level));
+ (level),
+ NULL);
}
}
@@ -3495,21 +3511,20 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
(adev->asic_type != CHIP_BONAIRE) &&
(adev->asic_type != CHIP_FIJI) &&
(adev->asic_type != CHIP_TONGA)) {
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
- tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
*query = tmp;
if (tmp != 0)
return 0;
}
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMU_PM_STATUS_95, 0);
for (i = 0; i < 10; i++) {
msleep(500);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
tmp = cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC,
ixSMU_PM_STATUS_95);
@@ -3534,14 +3549,12 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
- sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
*((uint32_t *)value) = sclk;
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
- mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
*((uint32_t *)value) = mclk;
*size = 4;
return 0;
@@ -3730,7 +3743,8 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to freeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SCLKDPM_FreezeLevel),
+ PPSMC_MSG_SCLKDPM_FreezeLevel,
+ NULL),
"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3742,7 +3756,8 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to freeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MCLKDPM_FreezeLevel),
+ PPSMC_MSG_MCLKDPM_FreezeLevel,
+ NULL),
"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3884,7 +3899,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to Unfreeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ PPSMC_MSG_SCLKDPM_UnfreezeLevel,
+ NULL),
"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3896,7 +3912,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MCLKDPM_UnfreezeLevel),
+ PPSMC_MSG_MCLKDPM_UnfreezeLevel,
+ NULL),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3949,12 +3966,14 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
if (hwmgr->chip_id == CHIP_VEGAM)
smum_send_msg_to_smc_with_parameter(hwmgr,
- (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
+ NULL);
else
smum_send_msg_to_smc_with_parameter(hwmgr,
- (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
+ NULL);
}
- return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+ return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ? 0 : -EINVAL;
}
static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
@@ -4040,7 +4059,8 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
+ PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
+ NULL);
}
static int
@@ -4048,7 +4068,7 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
- return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1;
+ return (smum_send_msg_to_smc(hwmgr, msg, NULL) == 0) ? 0 : -1;
}
static int
@@ -4132,7 +4152,8 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
+ PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
+ NULL);
}
static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
@@ -4262,14 +4283,14 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
if ((hwmgr->chip_id == CHIP_POLARIS10) ||
(hwmgr->chip_id == CHIP_POLARIS11) ||
(hwmgr->chip_id == CHIP_POLARIS12))
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
} else {
data->mem_latency_high = 330;
data->mem_latency_low = 330;
if ((hwmgr->chip_id == CHIP_POLARIS10) ||
(hwmgr->chip_id == CHIP_POLARIS11) ||
(hwmgr->chip_id == CHIP_POLARIS12))
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
}
return 0;
@@ -4413,13 +4434,15 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
if (!data->sclk_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
+ NULL);
break;
case PP_MCLK:
if (!data->mclk_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
+ NULL);
break;
case PP_PCIE:
{
@@ -4427,11 +4450,13 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
if (!data->pcie_dpm_key_disabled) {
if (fls(tmp) != ffs(tmp))
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
+ NULL);
else
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
- fls(tmp) - 1);
+ fls(tmp) - 1,
+ NULL);
}
break;
}
@@ -4457,8 +4482,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
for (i = 0; i < sclk_table->count; i++) {
if (clock > sclk_table->dpm_levels[i].value)
@@ -4473,8 +4497,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
(i == now) ? "*" : "");
break;
case PP_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
for (i = 0; i < mclk_table->count; i++) {
if (clock > mclk_table->dpm_levels[i].value)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 58f5589aaf12..5d4971576111 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -887,7 +887,10 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
didt_block |= block_en << TCP_Enable_SHIFT;
if (enable)
- result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Didt_Block_Function, didt_block);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_Didt_Block_Function,
+ didt_block,
+ NULL);
return result;
}
@@ -1009,7 +1012,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
if (hwmgr->chip_id == CHIP_POLARIS11) {
result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_EnableDpmDidt));
+ (uint16_t)(PPSMC_MSG_EnableDpmDidt),
+ NULL);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to enable DPM DIDT.", goto error);
}
@@ -1042,7 +1046,8 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
goto error);
if (hwmgr->chip_id == CHIP_POLARIS11) {
result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_DisableDpmDidt));
+ (uint16_t)(PPSMC_MSG_DisableDpmDidt),
+ NULL);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to disable DPM DIDT.", goto error);
}
@@ -1063,7 +1068,8 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_CAC)) {
int smc_result;
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_EnableCac));
+ (uint16_t)(PPSMC_MSG_EnableCac),
+ NULL);
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable CAC in SMC.", result = -1);
@@ -1079,7 +1085,8 @@ int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) {
int smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_DisableCac));
+ (uint16_t)(PPSMC_MSG_DisableCac),
+ NULL);
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable CAC in SMC.", result = -1);
@@ -1095,7 +1102,9 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PkgPwrSetLimit, n<<8);
+ PPSMC_MSG_PkgPwrSetLimit,
+ n<<8,
+ NULL);
return 0;
}
@@ -1103,7 +1112,9 @@ static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr,
uint32_t target_tdp)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+ PPSMC_MSG_OverDriveSetTargetTdp,
+ target_tdp,
+ NULL);
}
int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
@@ -1124,7 +1135,8 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->enable_tdc_limit_feature) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_TDCLimitEnable));
+ (uint16_t)(PPSMC_MSG_TDCLimitEnable),
+ NULL);
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable TDCLimit in SMC.", result = -1;);
if (0 == smc_result)
@@ -1134,7 +1146,8 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
if (data->enable_pkg_pwr_tracking_feature) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable),
+ NULL);
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (0 == smc_result) {
@@ -1163,7 +1176,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_TDCLimit) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+ (uint16_t)(PPSMC_MSG_TDCLimitDisable),
+ NULL);
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable TDCLimit in SMC.",
result = smc_result);
@@ -1172,7 +1186,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_DTE) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_DisableDTE));
+ (uint16_t)(PPSMC_MSG_DisableDTE),
+ NULL);
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable DTE in SMC.",
result = smc_result);
@@ -1181,7 +1196,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable),
+ NULL);
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable PkgPwrTracking in SMC.",
result = smc_result);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index 5bdc0df5a9f4..0b30f73649a8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -151,8 +151,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int result;
if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
- result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl,
+ FAN_CONTROL_FUZZY, NULL);
if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM))
hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
@@ -164,8 +164,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
advanceFanControlParameters.usMaxFanPWM);
} else {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
- result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl,
+ FAN_CONTROL_TABLE, NULL);
}
if (!result && hwmgr->thermal_controller.
@@ -173,7 +173,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
hwmgr->thermal_controller.
- advanceFanControlParameters.ucTargetTemperature);
+ advanceFanControlParameters.ucTargetTemperature,
+ NULL);
hwmgr->fan_ctrl_enabled = true;
return result;
@@ -183,7 +184,7 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
hwmgr->fan_ctrl_enabled = false;
- return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl, NULL);
}
/**
@@ -372,7 +373,7 @@ static void smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to enable internal thermal interrupts */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable, NULL);
}
/**
@@ -390,7 +391,7 @@ int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to disable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable, NULL);
}
/**
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 019d6a206492..a6c6a793e98e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -162,8 +162,10 @@ static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
struct smu8_hwmgr *data = hwmgr->backend;
if (data->max_sclk_level == 0) {
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
- data->max_sclk_level = smum_get_argument(hwmgr) + 1;
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetMaxSclkLevel,
+ &data->max_sclk_level);
+ data->max_sclk_level += 1;
}
return data->max_sclk_level;
@@ -580,7 +582,8 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_uvd_clock_voltage_dependency_table *table =
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
- unsigned long clock = 0, level;
+ unsigned long clock = 0;
+ uint32_t level;
if (NULL == table || table->count <= 0)
return -EINVAL;
@@ -588,8 +591,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
data->uvd_dpm.soft_min_clk = 0;
data->uvd_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
- level = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
if (level < table->count)
clock = table->entries[level].vclk;
@@ -607,7 +609,8 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_vce_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
- unsigned long clock = 0, level;
+ unsigned long clock = 0;
+ uint32_t level;
if (NULL == table || table->count <= 0)
return -EINVAL;
@@ -615,8 +618,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
data->vce_dpm.soft_min_clk = 0;
data->vce_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
- level = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
if (level < table->count)
clock = table->entries[level].ecclk;
@@ -634,7 +636,8 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_acp_clock_voltage_dependency_table *table =
hwmgr->dyn_state.acp_clock_voltage_dependency_table;
- unsigned long clock = 0, level;
+ unsigned long clock = 0;
+ uint32_t level;
if (NULL == table || table->count <= 0)
return -EINVAL;
@@ -642,8 +645,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
data->acp_dpm.soft_min_clk = 0;
data->acp_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
- level = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
if (level < table->count)
clock = table->entries[level].acpclk;
@@ -665,7 +667,7 @@ static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
#ifdef CONFIG_DRM_AMD_ACP
data->acp_power_gated = false;
#else
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
data->acp_power_gated = true;
#endif
@@ -708,7 +710,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkHardMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.hard_min_clk,
- PPSMC_MSG_SetSclkHardMin));
+ PPSMC_MSG_SetSclkHardMin),
+ NULL);
}
clock = data->sclk_dpm.soft_min_clk;
@@ -731,7 +734,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
}
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -742,7 +746,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
}
return 0;
@@ -760,7 +765,8 @@ static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepSclk,
- clks);
+ clks,
+ NULL);
}
return 0;
@@ -773,7 +779,8 @@ static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetWatermarkFrequency,
- data->sclk_dpm.soft_max_clk);
+ data->sclk_dpm.soft_max_clk,
+ NULL);
return 0;
}
@@ -788,13 +795,15 @@ static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable,
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableLowMemoryPstate,
- (lock ? 1 : 0));
+ (lock ? 1 : 0),
+ NULL);
} else {
PP_DBG_LOG("disable Low Memory PState.\n");
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableLowMemoryPstate,
- (lock ? 1 : 0));
+ (lock ? 1 : 0),
+ NULL);
}
}
@@ -814,7 +823,8 @@ static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
ret = smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
- dpm_features);
+ dpm_features,
+ NULL);
if (ret == 0)
data->is_nb_dpm_enabled = false;
}
@@ -835,7 +845,8 @@ static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
ret = smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
- dpm_features);
+ dpm_features,
+ NULL);
if (ret == 0)
data->is_nb_dpm_enabled = true;
}
@@ -953,7 +964,8 @@ static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
- SCLK_DPM_MASK);
+ SCLK_DPM_MASK,
+ NULL);
}
static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
@@ -967,7 +979,8 @@ static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
- dpm_features);
+ dpm_features,
+ NULL);
}
return ret;
}
@@ -983,13 +996,15 @@ static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
return 0;
}
@@ -1127,13 +1142,15 @@ static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
return 0;
}
@@ -1167,13 +1184,15 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
return 0;
}
@@ -1186,13 +1205,15 @@ static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
return 0;
}
@@ -1227,7 +1248,7 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF, NULL);
return 0;
}
@@ -1237,7 +1258,8 @@ static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_UVDPowerON,
- PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
+ PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0,
+ NULL);
}
return 0;
@@ -1259,15 +1281,20 @@ static int smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetEclkHardMin,
smu8_get_eclk_level(hwmgr,
data->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
+ PPSMC_MSG_SetEclkHardMin),
+ NULL);
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetEclkHardMin, 0);
+ PPSMC_MSG_SetEclkHardMin,
+ 0,
+ NULL);
/* disable ECLK DPM 0. Otherwise VCE could hang if
* switching SCLK from DPM 0 to 6/7 */
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetEclkSoftMin, 1);
+ PPSMC_MSG_SetEclkSoftMin,
+ 1,
+ NULL);
}
return 0;
}
@@ -1276,7 +1303,8 @@ static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_VCEPowerOFF);
+ PPSMC_MSG_VCEPowerOFF,
+ NULL);
return 0;
}
@@ -1284,7 +1312,8 @@ static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_VCEPowerON);
+ PPSMC_MSG_VCEPowerON,
+ NULL);
return 0;
}
@@ -1435,7 +1464,8 @@ static void smu8_hw_print_display_cfg(
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplaySizePowerParams,
- data);
+ data,
+ NULL);
}
return 0;
@@ -1497,10 +1527,12 @@ static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
case PP_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- mask);
+ mask,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- mask);
+ mask,
+ NULL);
break;
default:
break;
@@ -1753,9 +1785,10 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
+ result = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetAverageGraphicsActivity,
+ &activity_percent);
if (0 == result) {
- activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
activity_percent = activity_percent > 100 ? 100 : activity_percent;
} else {
activity_percent = 50;
@@ -1785,20 +1818,25 @@ static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrHiVirtual,
- mc_addr_hi);
+ mc_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrLoVirtual,
- mc_addr_low);
+ mc_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrHiPhysical,
- virtual_addr_hi);
+ virtual_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrLoPhysical,
- virtual_addr_low);
+ virtual_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramBufferSize,
- size);
+ size,
+ NULL);
return 0;
}
@@ -1827,12 +1865,16 @@ static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
data->dpm_flags |= DPMFlags_UVD_Enabled;
dpm_features |= UVD_DPM_MASK;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+ PPSMC_MSG_EnableAllSmuFeatures,
+ dpm_features,
+ NULL);
} else {
dpm_features |= UVD_DPM_MASK;
data->dpm_flags &= ~DPMFlags_UVD_Enabled;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+ PPSMC_MSG_DisableAllSmuFeatures,
+ dpm_features,
+ NULL);
}
return 0;
}
@@ -1854,7 +1896,8 @@ int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
PPSMC_MSG_SetUvdHardMin,
smu8_get_uvd_level(hwmgr,
data->uvd_dpm.hard_min_clk,
- PPSMC_MSG_SetUvdHardMin));
+ PPSMC_MSG_SetUvdHardMin),
+ NULL);
smu8_enable_disable_uvd_dpm(hwmgr, true);
} else {
@@ -1878,12 +1921,16 @@ static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
data->dpm_flags |= DPMFlags_VCE_Enabled;
dpm_features |= VCE_DPM_MASK;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+ PPSMC_MSG_EnableAllSmuFeatures,
+ dpm_features,
+ NULL);
} else {
dpm_features |= VCE_DPM_MASK;
data->dpm_flags &= ~DPMFlags_VCE_Enabled;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+ PPSMC_MSG_DisableAllSmuFeatures,
+ dpm_features,
+ NULL);
}
return 0;
@@ -1898,9 +1945,9 @@ static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
return;
if (bgate)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
else
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL);
}
static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index d09690fca452..60b5ca974356 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -22,6 +22,7 @@
*/
#include <linux/pci.h>
+#include <linux/reboot.h>
#include "hwmgr.h"
#include "pp_debug.h"
@@ -557,7 +558,9 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
if (req_vddc <= vddc_table->entries[i].vddc) {
req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_VddC_Request, req_volt);
+ PPSMC_MSG_VddC_Request,
+ req_volt,
+ NULL);
return;
}
}
@@ -593,37 +596,43 @@ int phm_irq_process(struct amdgpu_device *adev,
uint32_t src_id = entry->src_id;
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
- if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
- pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
- else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
- pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
- else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
- pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ /*
+ * SW CTF just occurred.
+ * Try to do a graceful shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
+ } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
+ else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
+ dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+ /*
+ * HW CTF just occurred. Shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
+ orderly_poweroff(true);
+ }
} else if (client_id == SOC15_IH_CLIENTID_THM) {
- if (src_id == 0)
- pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
- else
- pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
- } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO)
- pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ if (src_id == 0) {
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ /*
+ * SW CTF just occurred.
+ * Try to do a graceful shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
+ } else
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
+ } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
+ dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+ /*
+ * HW CTF just occurred. Shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
+ orderly_poweroff(true);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
index d168af4a4d78..46bb16c29cf6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
@@ -98,7 +98,7 @@ int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
if (state == BACO_STATE_IN) {
if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
ARRAY_SIZE(pre_baco_tbl))) {
- if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco))
+ if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco, NULL))
return -EINVAL;
if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f29f95be1e56..675c7cab7cfc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -484,8 +484,9 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (data->registry_data.vr0hot_enabled)
data->smu_features[GNLD_VR0HOT].supported = true;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- hwmgr->smu_version = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetSmuVersion,
+ &hwmgr->smu_version);
/* ACG firmware has major version 5 */
if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true;
@@ -503,10 +504,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_PCC_LIMIT].supported = true;
/* Get the SN to turn into a Unique ID */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- top32 = smum_get_argument(hwmgr);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- bottom32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
@@ -993,7 +992,10 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to set up led dpm config!",
return -EINVAL);
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_NumOfDisplays,
+ 0,
+ NULL);
return 0;
}
@@ -2303,16 +2305,15 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
- agc_btc_response = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
if (1 == agc_btc_response) {
if (1 == data->acg_loop_state)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop, NULL);
else if (2 == data->acg_loop_state)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop, NULL);
if (0 == vega10_enable_smc_features(hwmgr, true,
data->smu_features[GNLD_ACG].smu_feature_bitmap))
data->smu_features[GNLD_ACG].enabled = true;
@@ -2429,11 +2430,9 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data = hwmgr->backend;
AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- top32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- bottom32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
serial_number = ((uint64_t)bottom32 << 32) | top32;
@@ -2610,14 +2609,16 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
if (0 != boot_up_values.usVddc) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFloorSocVoltage,
- (boot_up_values.usVddc * 4));
+ (boot_up_values.usVddc * 4),
+ NULL);
data->vbios_boot_state.bsoc_vddc_lock = true;
} else {
data->vbios_boot_state.bsoc_vddc_lock = false;
}
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+ (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+ NULL);
}
result = vega10_populate_avfs_parameters(hwmgr);
@@ -2904,7 +2905,8 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
if (data->vbios_boot_state.bsoc_vddc_lock) {
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetFloorSocVoltage, 0);
+ PPSMC_MSG_SetFloorSocVoltage, 0,
+ NULL);
data->vbios_boot_state.bsoc_vddc_lock = false;
}
@@ -2947,7 +2949,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
vega10_enable_disable_PCC_limit_feature(hwmgr, true);
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
+ PPSMC_MSG_ConfigureTelemetry, data->config_telemetry,
+ NULL);
tmp_result = vega10_construct_voltage_tables(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
@@ -3528,7 +3531,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
data->dpm_table.gfx_table.dpm_state.soft_min_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
- data->smc_state_table.gfx_boot_level);
+ data->smc_state_table.gfx_boot_level,
+ NULL);
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->smc_state_table.gfx_boot_level;
@@ -3543,11 +3547,13 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
- socclk_idx);
+ socclk_idx,
+ NULL);
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinUclkByIndex,
- data->smc_state_table.mem_boot_level);
+ data->smc_state_table.mem_boot_level,
+ NULL);
}
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->smc_state_table.mem_boot_level;
@@ -3562,7 +3568,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
data->dpm_table.soc_table.dpm_state.soft_min_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
- data->smc_state_table.soc_boot_level);
+ data->smc_state_table.soc_boot_level,
+ NULL);
data->dpm_table.soc_table.dpm_state.soft_min_level =
data->smc_state_table.soc_boot_level;
}
@@ -3582,7 +3589,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
data->dpm_table.gfx_table.dpm_state.soft_max_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxclkByIndex,
- data->smc_state_table.gfx_max_level);
+ data->smc_state_table.gfx_max_level,
+ NULL);
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->smc_state_table.gfx_max_level;
}
@@ -3593,7 +3601,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxUclkByIndex,
- data->smc_state_table.mem_max_level);
+ data->smc_state_table.mem_max_level,
+ NULL);
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->smc_state_table.mem_max_level;
}
@@ -3607,7 +3616,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
data->dpm_table.soc_table.dpm_state.soft_max_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByIndex,
- data->smc_state_table.soc_max_level);
+ data->smc_state_table.soc_max_level,
+ NULL);
data->dpm_table.soc_table.dpm_state.soft_max_level =
data->smc_state_table.soc_max_level;
}
@@ -3694,7 +3704,8 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
/* This message will also enable SmcToHost Interrupt */
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetLowGfxclkInterruptThreshold,
- (uint32_t)low_sclk_interrupt_threshold);
+ (uint32_t)low_sclk_interrupt_threshold,
+ NULL);
}
return 0;
@@ -3801,8 +3812,7 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
if (!query)
return -EINVAL;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
- value = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
*query = value << 8;
@@ -3822,13 +3832,11 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
- sclk_mhz = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency, &sclk_mhz);
*((uint32_t *)value) = sclk_mhz * 100;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
- mclk_idx = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &mclk_idx);
if (mclk_idx < dpm_table->mem_table.count) {
*((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
*size = 4;
@@ -3837,8 +3845,8 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
}
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
- activity_percent = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0,
+ &activity_percent);
*((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
*size = 4;
break;
@@ -3847,14 +3855,14 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot);
- *((uint32_t *)value) = smum_get_argument(hwmgr) *
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot, (uint32_t *)value);
+ *((uint32_t *)value) = *((uint32_t *)value) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
*size = 4;
break;
case AMDGPU_PP_SENSOR_MEM_TEMP:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM);
- *((uint32_t *)value) = smum_get_argument(hwmgr) *
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM, (uint32_t *)value);
+ *((uint32_t *)value) = *((uint32_t *)value) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
*size = 4;
break;
@@ -3893,7 +3901,8 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 1 : 0);
+ has_disp ? 1 : 0,
+ NULL);
}
int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
@@ -3928,7 +3937,8 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
clk_request = (clk_freq << 16) | clk_select;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_RequestDisplayClockByFreq,
- clk_request);
+ clk_request,
+ NULL);
}
return result;
@@ -3990,7 +4000,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcefClockInSR / 100);
+ min_clocks.dcefClockInSR / 100,
+ NULL);
} else {
pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
}
@@ -4000,7 +4011,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
if (min_clocks.memoryClock != 0) {
idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx,
+ NULL);
data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
}
@@ -4541,8 +4553,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.sclk_dpm_key_disabled)
break;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
if (hwmgr->pp_one_vf &&
(hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
@@ -4558,8 +4569,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.mclk_dpm_key_disabled)
break;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
for (i = 0; i < mclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4570,8 +4580,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.socclk_dpm_key_disabled)
break;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
for (i = 0; i < soc_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4583,8 +4592,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK);
- now = smum_get_argument(hwmgr);
+ PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
for (i = 0; i < dcef_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4593,8 +4601,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
"*" : "");
break;
case PP_PCIE:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now);
for (i = 0; i < pcie_table->count; i++)
size += sprintf(buf + size, "%d: %s %s\n", i,
@@ -4658,7 +4665,8 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if (data->water_marks_bitmap & WaterMarksLoaded) {
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
+ PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
+ NULL);
}
return result;
@@ -4924,21 +4932,26 @@ static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrHigh,
- virtual_addr_hi);
+ virtual_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrLow,
- virtual_addr_low);
+ virtual_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrHigh,
- mc_addr_hi);
+ mc_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrLow,
- mc_addr_low);
+ mc_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramSize,
- size);
+ size,
+ NULL);
return 0;
}
@@ -5040,12 +5053,14 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetCustomGfxDpmParameters,
busy_set_point | FPS<<8 |
- use_rlc_busy << 16 | min_active_level<<24);
+ use_rlc_busy << 16 | min_active_level<<24,
+ NULL);
}
out:
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
- 1 << power_profile_mode);
+ 1 << power_profile_mode,
+ NULL);
hwmgr->power_profile_mode = power_profile_mode;
return 0;
@@ -5302,7 +5317,7 @@ static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr,
return 0;
}
- PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
"[PrepareMp1] Failed!",
return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 0a677d4bc87b..9757d47dd6b8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -651,18 +651,6 @@ static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg PSMSEEDCThresholdConfig_Vega10[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC THRESHOLD */
- { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] =
{
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -707,17 +695,6 @@ static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg PSMGCEDCThresholdConfig_vega10[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] =
{
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -925,7 +902,8 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
/* For Vega10, SMC does not support any mask yet. */
if (enable)
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info,
+ NULL);
}
@@ -1327,7 +1305,8 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
if (data->registry_data.enable_pkg_pwr_tracking_feature)
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetPptLimit, n);
+ PPSMC_MSG_SetPptLimit, n,
+ NULL);
return 0;
}
@@ -1393,7 +1372,8 @@ static void vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+ PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+ NULL);
}
int vega10_power_control_set_level(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index ba8763daa380..7783c7fd7ccb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -31,8 +31,7 @@
static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm);
- *current_rpm = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm, current_rpm);
return 0;
}
@@ -520,7 +519,8 @@ int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
- (uint32_t)table->FanTargetTemperature);
+ (uint32_t)table->FanTargetTemperature,
+ NULL);
table->FanPwmMin = hwmgr->thermal_controller.
advanceFanControlParameters.usPWMMin * 255 / 100;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
index 9d8ca94a8f0c..bc53cce4f32d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
@@ -96,7 +96,7 @@ int vega12_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
if (state == BACO_STATE_IN) {
if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
ARRAY_SIZE(pre_baco_tbl))) {
- if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
+ if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0, NULL))
return -EINVAL;
if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index aca61d1ff3c2..f4d1692cccf3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -357,10 +357,8 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
}
/* Get the SN to turn into a Unique ID */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- top32 = smum_get_argument(hwmgr);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- bottom32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
@@ -483,16 +481,12 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ num_of_levels);
PP_ASSERT_WITH_CODE(!ret,
"[GetNumOfDpmLevel] failed to get dpm levels!",
return ret);
- *num_of_levels = smum_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE(*num_of_levels > 0,
- "[GetNumOfDpmLevel] number of clk levels is invalid!",
- return -EINVAL);
-
return ret;
}
@@ -504,12 +498,11 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
*Lower 16 bits specify the level
*/
PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index)) == 0,
+ PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index),
+ clock) == 0,
"[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
return -EINVAL);
- *clock = smum_get_argument(hwmgr);
-
return 0;
}
@@ -749,7 +742,8 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.vclock = boot_up_values.ulVClk;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+ (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+ NULL);
}
memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
@@ -767,11 +761,10 @@ static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
uint32_t result;
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &result) == 0,
"[Run_ACG_BTC] Attempt to run ACG BTC failed!",
return -EINVAL);
- result = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(result == 1,
"Failed to run ACG BTC!", return -EINVAL);
@@ -792,12 +785,14 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
(allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high,
+ NULL) == 0,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
return -1);
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low,
+ NULL) == 0,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
return -1);
@@ -828,7 +823,7 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
bool enabled;
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0,
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, NULL) == 0,
"[EnableAllSMUFeatures] Failed to enable all smu features!",
return -1);
@@ -854,7 +849,7 @@ static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
bool enabled;
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0,
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, NULL) == 0,
"[DisableAllSMUFeatures] Failed to disable all smu features!",
return -1);
@@ -879,7 +874,8 @@ static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+ PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+ NULL);
}
static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
@@ -902,24 +898,24 @@ static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
{
/* AC Max */
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16),
+ &(clock->ACMax)) == 0,
"[GetClockRanges] Failed to get max ac clock from SMC!",
return -EINVAL);
- clock->ACMax = smum_get_argument(hwmgr);
/* AC Min */
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16),
+ &(clock->ACMin)) == 0,
"[GetClockRanges] Failed to get min ac clock from SMC!",
return -EINVAL);
- clock->ACMin = smum_get_argument(hwmgr);
/* DC Max */
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16),
+ &(clock->DCMax)) == 0,
"[GetClockRanges] Failed to get max dc clock from SMC!",
return -EINVAL);
- clock->DCMax = smum_get_argument(hwmgr);
return 0;
}
@@ -944,7 +940,7 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
int tmp_result, result = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ PPSMC_MSG_NumOfDisplays, 0, NULL);
result = vega12_set_allowed_featuresmask(hwmgr);
PP_ASSERT_WITH_CODE(result == 0,
@@ -1043,7 +1039,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min gfxclk !",
return ret);
}
@@ -1052,14 +1049,16 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min memclk !",
return ret);
min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set hard min memclk !",
return ret);
}
@@ -1069,7 +1068,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_VCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min vclk!",
return ret);
@@ -1077,7 +1077,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_DCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min dclk!",
return ret);
}
@@ -1087,7 +1088,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_ECLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min eclk!",
return ret);
}
@@ -1097,7 +1099,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min socclk!",
return ret);
}
@@ -1107,7 +1110,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set hard min dcefclk!",
return ret);
}
@@ -1127,7 +1131,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max gfxclk!",
return ret);
}
@@ -1137,7 +1142,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max memclk!",
return ret);
}
@@ -1147,14 +1153,16 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_VCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max vclk!",
return ret);
max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_DCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max dclk!",
return ret);
}
@@ -1164,7 +1172,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_ECLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max eclk!",
return ret);
}
@@ -1174,7 +1183,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max socclk!",
return ret);
}
@@ -1287,10 +1297,10 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
*gfx_freq = 0;
PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16),
+ &gfx_clk) == 0,
"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
return -EINVAL);
- gfx_clk = smum_get_argument(hwmgr);
*gfx_freq = gfx_clk * 100;
@@ -1304,10 +1314,10 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
*mclk_freq = 0;
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16),
+ &mem_clk) == 0,
"[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
return -EINVAL);
- mem_clk = smum_get_argument(hwmgr);
*mclk_freq = mem_clk * 100;
@@ -1420,7 +1430,8 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 1 : 0);
+ has_disp ? 1 : 0,
+ NULL);
return 0;
}
@@ -1459,7 +1470,8 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
clk_request = (clk_select << 16) | clk_freq;
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- clk_request);
+ clk_request,
+ NULL);
}
}
@@ -1493,7 +1505,8 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
PP_ASSERT_WITH_CODE(
!smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcefClockInSR /100),
+ min_clocks.dcefClockInSR /100,
+ NULL),
"Attempt to set divider for DCEFCLK Failed!",
return -1);
} else {
@@ -2124,10 +2137,10 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
case PP_SOCCLK:
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16)) == 0,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16),
+ &now) == 0,
"Attempt to get Current SOCCLK Frequency Failed!",
return -EINVAL);
- now = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(
vega12_get_socclocks(hwmgr, &clocks) == 0,
@@ -2142,10 +2155,10 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
case PP_DCEFCLK:
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16)) == 0,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16),
+ &now) == 0,
"Attempt to get Current DCEFCLK Frequency Failed!",
return -EINVAL);
- now = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(
vega12_get_dcefclocks(hwmgr, &clocks) == 0,
@@ -2343,7 +2356,8 @@ static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+ NULL)),
"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
return ret);
}
@@ -2357,7 +2371,8 @@ static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
int ret = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ PPSMC_MSG_NumOfDisplays, 0,
+ NULL);
ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
&data->dpm_table.mem_table);
@@ -2383,7 +2398,8 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DPM_DCEFCLK].supported &&
data->smu_features[GNLD_DPM_SOCCLK].supported)
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
+ PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
+ NULL);
return result;
}
@@ -2555,21 +2571,26 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrHigh,
- virtual_addr_hi);
+ virtual_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrLow,
- virtual_addr_low);
+ virtual_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrHigh,
- mc_addr_hi);
+ mc_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrLow,
- mc_addr_low);
+ mc_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramSize,
- size);
+ size,
+ NULL);
return 0;
}
@@ -2605,7 +2626,7 @@ static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
int ret = 0;
if (data->gfxoff_controlled_by_driver)
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff, NULL);
return ret;
}
@@ -2617,7 +2638,7 @@ static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
int ret = 0;
if (data->gfxoff_controlled_by_driver)
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff, NULL);
return ret;
}
@@ -2654,7 +2675,7 @@ static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
return 0;
}
- PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
"[PrepareMp1] Failed!",
return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
index 904eb2c9155b..c85806a6f62e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -32,10 +32,10 @@
static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetCurrentRpm),
+ PPSMC_MSG_GetCurrentRpm,
+ current_rpm),
"Attempt to get current RPM from SMC Failed!",
return -EINVAL);
- *current_rpm = smum_get_argument(hwmgr);
return 0;
}
@@ -259,7 +259,8 @@ int vega12_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
- (uint32_t)table->FanTargetTemperature);
+ (uint32_t)table->FanTargetTemperature,
+ NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index 9b5e72bdceca..2a28c9df15a0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -91,16 +91,16 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
if(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnterBaco, 0))
+ PPSMC_MSG_EnterBaco, 0, NULL))
return -EINVAL;
} else {
if(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnterBaco, 1))
+ PPSMC_MSG_EnterBaco, 1, NULL))
return -EINVAL;
}
} else if (state == BACO_STATE_OUT) {
- if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
+ if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco, NULL))
return -EINVAL;
if (!soc15_baco_program_registers(hwmgr, clean_baco_tbl,
ARRAY_SIZE(clean_baco_tbl)))
@@ -118,5 +118,5 @@ int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr)
if (ret)
return ret;
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI, NULL);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 08b6ba39a6d7..9ff470f1b826 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -92,8 +92,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
*/
data->registry_data.disallowed_features = 0xE0041C00;
/* ECC feature should be disabled on old SMUs */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- hwmgr->smu_version = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
if (hwmgr->smu_version < 0x282100)
data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
@@ -400,10 +399,8 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
}
/* Get the SN to turn into a Unique ID */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- top32 = smum_get_argument(hwmgr);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- bottom32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
@@ -527,16 +524,12 @@ static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ num_of_levels);
PP_ASSERT_WITH_CODE(!ret,
"[GetNumOfDpmLevel] failed to get dpm levels!",
return ret);
- *num_of_levels = smum_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE(*num_of_levels > 0,
- "[GetNumOfDpmLevel] number of clk levels is invalid!",
- return -EINVAL);
-
return ret;
}
@@ -547,16 +540,12 @@ static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | index));
+ (clk_id << 16 | index),
+ clk);
PP_ASSERT_WITH_CODE(!ret,
"[GetDpmFreqByIndex] failed to get dpm freq by index!",
return ret);
- *clk = smum_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE(*clk,
- "[GetDpmFreqByIndex] clk value is invalid!",
- return -EINVAL);
-
return ret;
}
@@ -813,7 +802,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+ (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+ NULL);
memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
@@ -868,7 +858,8 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
*/
smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverridePcieParameters, smu_pcie_arg);
+ PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+ NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
@@ -899,13 +890,13 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
& 0xFFFFFFFF));
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high);
+ PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL);
PP_ASSERT_WITH_CODE(!ret,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
return ret);
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low);
+ PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL);
PP_ASSERT_WITH_CODE(!ret,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
return ret);
@@ -915,12 +906,12 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
static int vega20_run_btc(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL);
}
static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL);
}
static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
@@ -933,7 +924,8 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
int ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures)) == 0,
+ PPSMC_MSG_EnableAllSmuFeatures,
+ NULL)) == 0,
"[EnableAllSMUFeatures] Failed to enable all smu features!",
return ret);
@@ -966,7 +958,8 @@ static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- 1);
+ 1,
+ NULL);
return 0;
}
@@ -978,7 +971,8 @@ static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFclkGfxClkRatio,
- data->registry_data.fclk_gfxclk_ratio);
+ data->registry_data.fclk_gfxclk_ratio,
+ NULL);
}
static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
@@ -991,7 +985,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
int ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures)) == 0,
+ PPSMC_MSG_DisableAllSmuFeatures,
+ NULL)) == 0,
"[DisableAllSMUFeatures] Failed to disable all smu features!",
return ret);
@@ -1199,12 +1194,12 @@ static int vega20_od8_get_gfx_clock_base_voltage(
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetAVFSVoltageByDpm,
- ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+ ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
+ voltage);
PP_ASSERT_WITH_CODE(!ret,
"[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
return ret);
- *voltage = smum_get_argument(hwmgr);
*voltage = *voltage / VOLTAGE_SCALE;
return 0;
@@ -1560,19 +1555,19 @@ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDcModeMaxDpmFreq,
- (clock_select << 16))) == 0,
+ (clock_select << 16),
+ clock)) == 0,
"[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
return ret);
- *clock = smum_get_argument(hwmgr);
/* if DC limit is zero, return AC limit */
if (*clock == 0) {
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetMaxDpmFreq,
- (clock_select << 16))) == 0,
+ (clock_select << 16),
+ clock)) == 0,
"[GetMaxSustainableClock] failed to get max AC clock from SMC!",
return ret);
- *clock = smum_get_argument(hwmgr);
}
return 0;
@@ -1641,7 +1636,8 @@ static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
int result;
result = smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SetMGpuFanBoostLimitRpm);
+ PPSMC_MSG_SetMGpuFanBoostLimitRpm,
+ NULL);
PP_ASSERT_WITH_CODE(!result,
"[EnableMgpuFan] Failed to enable mgpu fan boost!",
return result);
@@ -1669,7 +1665,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
int result = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ PPSMC_MSG_NumOfDisplays, 0, NULL);
result = vega20_set_allowed_featuresmask(hwmgr);
PP_ASSERT_WITH_CODE(!result,
@@ -1740,12 +1736,12 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result);
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
- POWER_SOURCE_AC << 16);
+ POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
PP_ASSERT_WITH_CODE(!result,
"[GetPptLimit] get default PPT limit failed!",
return result);
hwmgr->power_limit =
- hwmgr->default_power_limit = smum_get_argument(hwmgr);
+ hwmgr->default_power_limit;
return 0;
}
@@ -1806,7 +1802,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min gfxclk !",
return ret);
}
@@ -1816,7 +1813,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min memclk !",
return ret);
}
@@ -1827,7 +1825,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_VCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min vclk!",
return ret);
@@ -1835,7 +1834,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_DCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min dclk!",
return ret);
}
@@ -1846,7 +1846,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_ECLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min eclk!",
return ret);
}
@@ -1857,7 +1858,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min socclk!",
return ret);
}
@@ -1868,7 +1870,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_FCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_FCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min fclk!",
return ret);
}
@@ -1879,7 +1882,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set hard min dcefclk!",
return ret);
}
@@ -1900,7 +1904,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max gfxclk!",
return ret);
}
@@ -1911,7 +1916,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max memclk!",
return ret);
}
@@ -1922,14 +1928,16 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_VCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max vclk!",
return ret);
max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_DCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max dclk!",
return ret);
}
@@ -1940,7 +1948,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_ECLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max eclk!",
return ret);
}
@@ -1951,7 +1960,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max socclk!",
return ret);
}
@@ -1962,7 +1972,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_FCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_FCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max fclk!",
return ret);
}
@@ -2006,17 +2017,17 @@ static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
if (max) {
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0,
+ PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16),
+ clock)) == 0,
"[GetClockRanges] Failed to get max clock from SMC!",
return ret);
- *clock = smum_get_argument(hwmgr);
} else {
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetMinDpmFreq,
- (clock_select << 16))) == 0,
+ (clock_select << 16),
+ clock)) == 0,
"[GetClockRanges] Failed to get min clock from SMC!",
return ret);
- *clock = smum_get_argument(hwmgr);
}
return 0;
@@ -2122,10 +2133,10 @@ static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
*clk_freq = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0,
+ PPSMC_MSG_GetDpmClockFreq, (clk_id << 16),
+ clk_freq)) == 0,
"[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
return ret);
- *clk_freq = smum_get_argument(hwmgr);
*clk_freq = *clk_freq * 100;
@@ -2276,7 +2287,8 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
clk_request = (clk_select << 16) | clk_freq;
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- clk_request);
+ clk_request,
+ NULL);
}
}
@@ -2312,7 +2324,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
if (data->smu_features[GNLD_DS_DCEFCLK].supported)
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcefClockInSR / 100)) == 0,
+ min_clocks.dcefClockInSR / 100,
+ NULL)) == 0,
"Attempt to set divider for DCEFCLK Failed!",
return ret);
} else {
@@ -2324,7 +2337,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+ NULL)),
"[SetHardMinFreq] Set hard min uclk failed!",
return ret);
}
@@ -2656,7 +2670,8 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
return -EINVAL;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level,
+ NULL);
PP_ASSERT_WITH_CODE(!ret,
"Failed to set min link dpm level!",
return ret);
@@ -3140,7 +3155,7 @@ static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr,
return 0;
}
- PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
"[PrepareMp1] Failed!",
return ret);
@@ -3495,7 +3510,8 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+ NULL)),
"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
return ret);
}
@@ -3520,7 +3536,8 @@ static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)),
+ (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level,
+ NULL)),
"[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
return ret);
}
@@ -3534,7 +3551,7 @@ static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
int ret = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ PPSMC_MSG_NumOfDisplays, 0, NULL);
ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
&data->dpm_table.mem_table);
@@ -3565,7 +3582,8 @@ static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DPM_SOCCLK].supported) {
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays,
- hwmgr->display_config->num_display);
+ hwmgr->display_config->num_display,
+ NULL);
}
return result;
@@ -4082,7 +4100,8 @@ out:
workload_type =
conv_power_profile_to_pplib_workload(power_profile_mode);
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
hwmgr->power_profile_mode = power_profile_mode;
@@ -4098,21 +4117,26 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrHigh,
- virtual_addr_hi);
+ virtual_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrLow,
- virtual_addr_low);
+ virtual_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrHigh,
- mc_addr_hi);
+ mc_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrLow,
- mc_addr_low);
+ mc_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramSize,
- size);
+ size,
+ NULL);
return 0;
}
@@ -4153,7 +4177,8 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
(acquire ?
PPSMC_MSG_RequestI2CBus :
PPSMC_MSG_ReleaseI2CBus),
- 0);
+ 0,
+ NULL);
PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res);
return res;
@@ -4170,7 +4195,8 @@ static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state);
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state,
+ NULL);
if (ret)
pr_err("SetDfCstate failed!\n");
@@ -4184,7 +4210,8 @@ static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetXgmiMode,
- pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
+ NULL);
if (ret)
pr_err("SetXgmiPstate failed!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
index a0bfb65cc5d6..d7cc3d2d9e17 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
@@ -36,7 +36,8 @@ int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
if (data->smu_features[GNLD_PPT].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetPptLimit, n);
+ PPSMC_MSG_SetPptLimit, n,
+ NULL);
return 0;
}
@@ -51,7 +52,8 @@ static int vega20_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+ PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+ NULL);
}
int vega20_power_control_set_level(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
index ede54e87e287..7add2f60f49c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
@@ -106,10 +106,10 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
int ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetCurrentRpm)) == 0,
+ PPSMC_MSG_GetCurrentRpm,
+ current_rpm)) == 0,
"Attempt to get current RPM from SMC Failed!",
return ret);
- *current_rpm = smum_get_argument(hwmgr);
return 0;
}
@@ -329,7 +329,8 @@ static int vega20_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
- (uint32_t)table->FanTargetTemperature);
+ (uint32_t)table->FanTargetTemperature,
+ NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index ae2c318dd6fa..4d1c2a44a8b6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -405,7 +405,9 @@ struct smu_context
bool pm_enabled;
bool is_apu;
- uint32_t smc_if_version;
+ uint32_t smc_driver_if_version;
+ uint32_t smc_fw_if_version;
+ uint32_t smc_fw_version;
bool uploading_custom_pp_table;
bool dc_controlled_by_gpio;
@@ -489,6 +491,7 @@ struct pptable_funcs {
int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t dpm_level, uint32_t *freq);
int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+ int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
int (*i2c_eeprom_init)(struct i2c_adapter *control);
void (*i2c_eeprom_fini)(struct i2c_adapter *control);
@@ -580,11 +583,6 @@ int smu_check_fw_status(struct smu_context *smu);
int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
-#define smu_i2c_eeprom_init(smu, control) \
- ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL)
-#define smu_i2c_eeprom_fini(smu, control) \
- ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL)
-
int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
int smu_get_power_limit(struct smu_context *smu,
@@ -734,6 +732,7 @@ int smu_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);
int smu_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state);
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index f736d773f9d6..e07478b6ac04 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -114,7 +114,8 @@
#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x3A
#define PPSMC_MSG_DFCstateControl 0x3B
-#define PPSMC_Message_Count 0x3C
+#define PPSMC_MSG_GmiPwrDnControl 0x3D
+#define PPSMC_Message_Count 0x3E
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 2ffb666b97e6..15ed6cbdf366 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -743,6 +743,7 @@ struct pp_hwmgr {
bool pm_en;
bool pp_one_vf;
struct mutex smu_lock;
+ struct mutex msg_lock;
uint32_t pp_table_version;
void *device;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index ce5b5011c122..8b82059d97e7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -82,8 +82,8 @@
// Other
#define FEATURE_OUT_OF_BAND_MONITOR_BIT 24
#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 25
+#define FEATURE_PER_PART_VMIN_BIT 26
-#define FEATURE_SPARE_26_BIT 26
#define FEATURE_SPARE_27_BIT 27
#define FEATURE_SPARE_28_BIT 28
#define FEATURE_SPARE_29_BIT 29
@@ -154,6 +154,7 @@
#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT )
#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT )
+#define FEATURE_PER_PART_VMIN_MASK (1 << FEATURE_PER_PART_VMIN_BIT )
//FIXME need updating
@@ -628,8 +629,14 @@ typedef struct {
uint16_t BasePerformanceFrequencyCap; //In Mhz
uint16_t MaxPerformanceFrequencyCap; //In Mhz
+ // Per-Part Vmin
+ uint16_t VDDGFX_VminLow; // mv Q2
+ uint16_t VDDGFX_TVminLow; //Celcius
+ uint16_t VDDGFX_VminLow_HiTemp; // mv Q2
+ uint16_t VDDGFX_VminLow_LoTemp; // mv Q2
+
// SECTION: Reserved
- uint32_t Reserved[9];
+ uint32_t Reserved[7];
// SECTION: BOARD PARAMETERS
@@ -869,6 +876,10 @@ typedef struct {
uint8_t Mem_DownHystLimit;
uint16_t Mem_Fps;
+ uint32_t BusyThreshold; // Q16
+ uint32_t BusyHyst;
+ uint32_t IdleHyst;
+
uint32_t MmHubPadding[8]; // SMU internal use
} DpmActivityMonitorCoeffInt_t;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
index 2f85a34c0591..e9315eb5b48e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU12_DRIVER_IF_VERSION 11
+#define SMU12_DRIVER_IF_VERSION 14
typedef struct {
int32_t value;
@@ -154,15 +154,19 @@ typedef enum {
} CLOCK_IDs_e;
// Throttler Status Bitmask
-#define THROTTLER_STATUS_BIT_SPL 0
-#define THROTTLER_STATUS_BIT_FPPT 1
-#define THROTTLER_STATUS_BIT_SPPT 2
-#define THROTTLER_STATUS_BIT_SPPT_APU 3
-#define THROTTLER_STATUS_BIT_THM_CORE 4
-#define THROTTLER_STATUS_BIT_THM_GFX 5
-#define THROTTLER_STATUS_BIT_THM_SOC 6
-#define THROTTLER_STATUS_BIT_TDC_VDD 7
-#define THROTTLER_STATUS_BIT_TDC_SOC 8
+#define THROTTLER_STATUS_BIT_SPL 0
+#define THROTTLER_STATUS_BIT_FPPT 1
+#define THROTTLER_STATUS_BIT_SPPT 2
+#define THROTTLER_STATUS_BIT_SPPT_APU 3
+#define THROTTLER_STATUS_BIT_THM_CORE 4
+#define THROTTLER_STATUS_BIT_THM_GFX 5
+#define THROTTLER_STATUS_BIT_THM_SOC 6
+#define THROTTLER_STATUS_BIT_TDC_VDD 7
+#define THROTTLER_STATUS_BIT_TDC_SOC 8
+#define THROTTLER_STATUS_BIT_PROCHOT_CPU 9
+#define THROTTLER_STATUS_BIT_PROCHOT_GFX 10
+#define THROTTLER_STATUS_BIT_EDC_CPU 11
+#define THROTTLER_STATUS_BIT_EDC_GFX 12
typedef struct {
uint16_t ClockFrequency[CLOCK_COUNT]; //[MHz]
@@ -180,7 +184,7 @@ typedef struct {
uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC
uint16_t FanPwm; //[milli]
- uint16_t CurrentSocketPower; //[mW]
+ uint16_t CurrentSocketPower; //[W]
uint16_t CoreFrequency[8]; //[MHz]
uint16_t CorePower[8]; //[mW]
@@ -193,10 +197,16 @@ typedef struct {
uint16_t ThrottlerStatus;
uint16_t spare;
- uint16_t StapmOriginalLimit; //[mW]
- uint16_t StapmCurrentLimit; //[mW]
- uint16_t ApuPower; //[mW]
- uint16_t dGpuPower; //[mW]
+ uint16_t StapmOriginalLimit; //[W]
+ uint16_t StapmCurrentLimit; //[W]
+ uint16_t ApuPower; //[W]
+ uint16_t dGpuPower; //[W]
+
+ uint16_t VddTdcValue; //[mA]
+ uint16_t SocTdcValue; //[mA]
+ uint16_t VddEdcValue; //[mA]
+ uint16_t SocEdcValue; //[mA]
+ uint16_t reserve[2];
} SmuMetrics_t;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
index a5b4df146713..ee7dac4693d4 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -170,6 +170,7 @@
__SMU_DUMMY_MAP(SetSoftMinJpeg), \
__SMU_DUMMY_MAP(SetHardMinFclkByFreq), \
__SMU_DUMMY_MAP(DFCstateControl), \
+ __SMU_DUMMY_MAP(GmiPwrDnControl), \
__SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \
__SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 674e426ed59b..6b3b451a8018 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,8 +27,8 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
-#define SMU11_DRIVER_IF_VERSION_NV10 0x35
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x14
+#define SMU11_DRIVER_IF_VERSION_NV10 0x36
#define SMU11_DRIVER_IF_VERSION_NV12 0x33
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
@@ -37,7 +37,6 @@
#define MP0_SRAM 0x03900000
#define MP1_Public 0x03b00000
#define MP1_SRAM 0x03c00004
-#define MP1_SMC_SIZE 0x40000
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index c5288831aa15..ad100b533d04 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -81,16 +81,15 @@ enum SMU10_TABLE_ID {
SMU10_CLOCKTABLE,
};
-extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr);
-
extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr);
-extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp);
extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter);
+ uint16_t msg, uint32_t parameter,
+ uint32_t *resp);
extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 15030284b444..0c9be864d072 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -423,6 +423,7 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *smc_pptable = table_context->driver_pptable;
struct atom_smc_dpm_info_v4_5 *smc_dpm_table;
+ struct atom_smc_dpm_info_v4_7 *smc_dpm_table_v4_7;
int index, ret;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -433,77 +434,33 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
if (ret)
return ret;
- memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
- sizeof(I2cControllerConfig_t) * NUM_I2C_CONTROLLERS);
-
- /* SVI2 Board Parameters */
- smc_pptable->MaxVoltageStepGfx = smc_dpm_table->MaxVoltageStepGfx;
- smc_pptable->MaxVoltageStepSoc = smc_dpm_table->MaxVoltageStepSoc;
- smc_pptable->VddGfxVrMapping = smc_dpm_table->VddGfxVrMapping;
- smc_pptable->VddSocVrMapping = smc_dpm_table->VddSocVrMapping;
- smc_pptable->VddMem0VrMapping = smc_dpm_table->VddMem0VrMapping;
- smc_pptable->VddMem1VrMapping = smc_dpm_table->VddMem1VrMapping;
- smc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->GfxUlvPhaseSheddingMask;
- smc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->SocUlvPhaseSheddingMask;
- smc_pptable->ExternalSensorPresent = smc_dpm_table->ExternalSensorPresent;
- smc_pptable->Padding8_V = smc_dpm_table->Padding8_V;
-
- /* Telemetry Settings */
- smc_pptable->GfxMaxCurrent = smc_dpm_table->GfxMaxCurrent;
- smc_pptable->GfxOffset = smc_dpm_table->GfxOffset;
- smc_pptable->Padding_TelemetryGfx = smc_dpm_table->Padding_TelemetryGfx;
- smc_pptable->SocMaxCurrent = smc_dpm_table->SocMaxCurrent;
- smc_pptable->SocOffset = smc_dpm_table->SocOffset;
- smc_pptable->Padding_TelemetrySoc = smc_dpm_table->Padding_TelemetrySoc;
- smc_pptable->Mem0MaxCurrent = smc_dpm_table->Mem0MaxCurrent;
- smc_pptable->Mem0Offset = smc_dpm_table->Mem0Offset;
- smc_pptable->Padding_TelemetryMem0 = smc_dpm_table->Padding_TelemetryMem0;
- smc_pptable->Mem1MaxCurrent = smc_dpm_table->Mem1MaxCurrent;
- smc_pptable->Mem1Offset = smc_dpm_table->Mem1Offset;
- smc_pptable->Padding_TelemetryMem1 = smc_dpm_table->Padding_TelemetryMem1;
-
- /* GPIO Settings */
- smc_pptable->AcDcGpio = smc_dpm_table->AcDcGpio;
- smc_pptable->AcDcPolarity = smc_dpm_table->AcDcPolarity;
- smc_pptable->VR0HotGpio = smc_dpm_table->VR0HotGpio;
- smc_pptable->VR0HotPolarity = smc_dpm_table->VR0HotPolarity;
- smc_pptable->VR1HotGpio = smc_dpm_table->VR1HotGpio;
- smc_pptable->VR1HotPolarity = smc_dpm_table->VR1HotPolarity;
- smc_pptable->GthrGpio = smc_dpm_table->GthrGpio;
- smc_pptable->GthrPolarity = smc_dpm_table->GthrPolarity;
-
- /* LED Display Settings */
- smc_pptable->LedPin0 = smc_dpm_table->LedPin0;
- smc_pptable->LedPin1 = smc_dpm_table->LedPin1;
- smc_pptable->LedPin2 = smc_dpm_table->LedPin2;
- smc_pptable->padding8_4 = smc_dpm_table->padding8_4;
-
- /* GFXCLK PLL Spread Spectrum */
- smc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->PllGfxclkSpreadEnabled;
- smc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->PllGfxclkSpreadPercent;
- smc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->PllGfxclkSpreadFreq;
-
- /* GFXCLK DFLL Spread Spectrum */
- smc_pptable->DfllGfxclkSpreadEnabled = smc_dpm_table->DfllGfxclkSpreadEnabled;
- smc_pptable->DfllGfxclkSpreadPercent = smc_dpm_table->DfllGfxclkSpreadPercent;
- smc_pptable->DfllGfxclkSpreadFreq = smc_dpm_table->DfllGfxclkSpreadFreq;
-
- /* UCLK Spread Spectrum */
- smc_pptable->UclkSpreadEnabled = smc_dpm_table->UclkSpreadEnabled;
- smc_pptable->UclkSpreadPercent = smc_dpm_table->UclkSpreadPercent;
- smc_pptable->UclkSpreadFreq = smc_dpm_table->UclkSpreadFreq;
-
- /* SOCCLK Spread Spectrum */
- smc_pptable->SoclkSpreadEnabled = smc_dpm_table->SoclkSpreadEnabled;
- smc_pptable->SocclkSpreadPercent = smc_dpm_table->SocclkSpreadPercent;
- smc_pptable->SocclkSpreadFreq = smc_dpm_table->SocclkSpreadFreq;
-
- /* Total board power */
- smc_pptable->TotalBoardPower = smc_dpm_table->TotalBoardPower;
- smc_pptable->BoardPadding = smc_dpm_table->BoardPadding;
-
- /* Mvdd Svi2 Div Ratio Setting */
- smc_pptable->MvddRatio = smc_dpm_table->MvddRatio;
+ pr_info("smc_dpm_info table revision(format.content): %d.%d\n",
+ smc_dpm_table->table_header.format_revision,
+ smc_dpm_table->table_header.content_revision);
+
+ if (smc_dpm_table->table_header.format_revision != 4) {
+ pr_err("smc_dpm_info table format revision is not 4!\n");
+ return -EINVAL;
+ }
+
+ switch (smc_dpm_table->table_header.content_revision) {
+ case 5: /* nv10 and nv14 */
+ memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
+ sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
+ break;
+ case 7: /* nv12 */
+ ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+ (uint8_t **)&smc_dpm_table_v4_7);
+ if (ret)
+ return ret;
+ memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
+ sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
+ break;
+ default:
+ pr_err("smc_dpm_info with unsupported content revision %d!\n",
+ smc_dpm_table->table_header.content_revision);
+ return -EINVAL;
+ }
if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
/* TODO: remove it once SMU fw fix it */
@@ -1336,8 +1293,6 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
}
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size < 0)
- return -EINVAL;
ret = smu_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
@@ -1860,7 +1815,8 @@ static int navi10_get_power_limit(struct smu_context *smu,
int power_src;
if (!smu->power_limit) {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT) &&
+ !amdgpu_sriov_vf(smu->adev)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
return -EINVAL;
@@ -2003,6 +1959,9 @@ static int navi10_set_default_od_settings(struct smu_context *smu, bool initiali
OverDriveTable_t *od_table, *boot_od_table;
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index b0ed1b3fe79a..67476047c067 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -296,6 +296,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
for (i = 0; i < count; i++) {
GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
+ if (!value)
+ continue;
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
if (cur_value == value)
@@ -847,7 +849,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
uint32_t i, size = 0;
int16_t workload_type = 0;
- if (!smu->pm_enabled || !buf)
+ if (!buf)
return -EINVAL;
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
@@ -898,7 +900,7 @@ static bool renoir_is_dpm_running(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
/*
- * Util now, the pmfw hasn't exported the interface of SMU
+ * Until now, the pmfw hasn't exported the interface of SMU
* feature mask to APU SKU so just force on all the feature
* at early initial stage.
*/
@@ -955,6 +957,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
void renoir_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &renoir_ppt_funcs;
- smu->smc_if_version = SMU12_DRIVER_IF_VERSION;
+ smu->smc_driver_if_version = SMU12_DRIVER_IF_VERSION;
smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 40c35bcc5a0a..c97444841abc 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -214,4 +214,9 @@ static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_typ
#define smu_set_power_source(smu, power_src) \
((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0)
+#define smu_i2c_eeprom_init(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : 0)
+#define smu_i2c_eeprom_fini(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : 0)
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 655ba4fb05dc..aa76c2cea747 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -23,6 +23,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/reboot.h>
#define SMU_11_0_PARTIAL_PPTABLE
@@ -57,7 +58,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
return 0;
}
@@ -65,7 +66,7 @@ static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
return 0;
}
@@ -75,7 +76,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
for (i = 0; i < timeout; i++) {
- cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
return cur_value == 0x1 ? 0 : -EIO;
@@ -83,7 +84,10 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
}
/* timeout means wrong logic */
- return -ETIME;
+ if (i == timeout)
+ return -ETIME;
+
+ return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
int
@@ -107,9 +111,9 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
goto out;
}
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
@@ -119,6 +123,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
smu_get_message_name(smu, msg), index, param, ret);
goto out;
}
+
if (read_arg) {
ret = smu_v11_0_read_arg(smu, read_arg);
if (ret) {
@@ -201,13 +206,15 @@ int smu_v11_0_load_microcode(struct smu_context *smu)
const struct smc_firmware_header_v1_0 *hdr;
uint32_t addr_start = MP1_SRAM;
uint32_t i;
+ uint32_t smc_fw_size;
uint32_t mp1_fw_flags;
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
src = (const uint32_t *)(adev->pm.fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ smc_fw_size = hdr->header.ucode_size_bytes;
- for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
+ for (i = 1; i < smc_fw_size/4 - 1; i++) {
WREG32_PCIE(addr_start, src[i]);
addr_start += 4;
}
@@ -264,23 +271,23 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
switch (smu->adev->asic_type) {
case CHIP_VEGA20:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VG20;
break;
case CHIP_ARCTURUS:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
break;
case CHIP_NAVI10:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
break;
case CHIP_NAVI12:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
break;
case CHIP_NAVI14:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
break;
default:
pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
break;
}
@@ -292,10 +299,10 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
* Considering above, we just leave user a warning message instead
* of halt driver loading.
*/
- if (if_version != smu->smc_if_version) {
+ if (if_version != smu->smc_driver_if_version) {
pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_if_version, if_version,
+ smu->smc_driver_if_version, if_version,
smu_version, smu_major, smu_minor, smu_debug);
pr_warn("SMU driver if version not matched\n");
}
@@ -479,8 +486,6 @@ int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
- if (!smu->pm_enabled)
- return 0;
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
@@ -497,8 +502,6 @@ int smu_v11_0_fini_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
- if (!smu->pm_enabled)
- return 0;
if (!smu_power->power_context || smu_power->power_context_size == 0)
return -EINVAL;
@@ -730,8 +733,9 @@ int smu_v11_0_parse_pptable(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
+ /* during TDR we need to free and alloc the pptable */
if (table_context->driver_pptable)
- return -EINVAL;
+ kfree(table_context->driver_pptable);
table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
@@ -771,6 +775,9 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
int ret;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
if (ret)
@@ -783,8 +790,6 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
- if (!smu->pm_enabled)
- return 0;
if (!table_context)
return -EINVAL;
@@ -816,6 +821,9 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (tool_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
@@ -835,6 +843,9 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (!smu->pm_enabled)
return ret;
@@ -849,6 +860,9 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
int ret = 0;
uint32_t feature_mask[2];
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
mutex_lock(&feature->mutex);
if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
goto failed;
@@ -877,6 +891,9 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev) && !amdgpu_sriov_is_pp_one_vf(smu->adev))
+ return 0;
+
if (!feature_mask || num < 2)
return -EINVAL;
@@ -932,8 +949,12 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (!smu->pm_enabled)
return ret;
+
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
@@ -948,9 +969,6 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
int ret = 0;
int clk_id;
- if (!smu->pm_enabled)
- return ret;
-
if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
(smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
return 0;
@@ -1096,6 +1114,9 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
int ret = 0;
uint32_t max_power_limit;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
max_power_limit = smu_v11_0_get_max_power_limit(smu);
if (n > max_power_limit) {
@@ -1205,9 +1226,6 @@ int smu_v11_0_start_thermal_control(struct smu_context *smu)
struct smu_temperature_range range;
struct amdgpu_device *adev = smu->adev;
- if (!smu->pm_enabled)
- return ret;
-
memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
ret = smu_get_thermal_temperature_range(smu, &range);
@@ -1321,9 +1339,6 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
enum smu_clk_type clk_select = 0;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
- if (!smu->pm_enabled)
- return -EINVAL;
-
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
switch (clk_type) {
@@ -1533,39 +1548,65 @@ static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
+#define SMUIO_11_0__SRCID__SMUIO_GPIO19 83
+
static int smu_v11_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
+ /*
+ * ctxid is used to distinguish different
+ * events for SMCToHost interrupt.
+ */
+ uint32_t ctxid = entry->src_data[0];
+ uint32_t data;
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
- pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ /*
+ * SW CTF just occurred.
+ * Try to do a graceful shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
break;
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
- pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
break;
default:
- pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
- src_id,
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
+ src_id);
break;
-
}
+ } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
+ dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+ /*
+ * HW CTF just occurred. Shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
+ orderly_poweroff(true);
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
- if (src_id == 0xfe)
- smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+ if (src_id == 0xfe) {
+ /* ACK SMUToHost interrupt */
+ data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
+ data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
+
+ switch (ctxid) {
+ case 0x3:
+ dev_dbg(adev->dev, "Switched to AC mode!\n");
+ smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+ break;
+ case 0x4:
+ dev_dbg(adev->dev, "Switched to DC mode!\n");
+ smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+ break;
+ }
+ }
}
return 0;
@@ -1605,6 +1646,13 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
if (ret)
return ret;
+ /* Register CTF(GPIO_19) interrupt */
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
+ SMUIO_11_0__SRCID__SMUIO_GPIO19,
+ irq_src);
+ if (ret)
+ return ret;
+
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
0xfe,
irq_src);
@@ -1833,6 +1881,9 @@ int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
uint32_t pcie_gen = 0, pcie_width = 0;
int ret;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_gen = 3;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 169ebdad87b8..4023d10fb49b 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -32,13 +32,15 @@
#include "asic_reg/mp/mp_12_0_0_offset.h"
#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
+#include "asic_reg/smuio/smuio_12_0_0_offset.h"
+#include "asic_reg/smuio/smuio_12_0_0_sh_mask.h"
-#define smnMP1_FIRMWARE_FLAGS 0x3010024
+// because some SMU12 based ASICs use older ip offset tables
+// we should undefine this register from the smuio12 header
+// to prevent confusion down the road
+#undef mmPWR_MISC_CNTL_STATUS
-#define mmSMUIO_GFX_MISC_CNTL 0x00c8
-#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
-#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
-#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
@@ -158,10 +160,10 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
* Considering above, we just leave user a warning message instead
* of halt driver loading.
*/
- if (if_version != smu->smc_if_version) {
+ if (if_version != smu->smc_driver_if_version) {
pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_if_version, if_version,
+ smu->smc_driver_if_version, if_version,
smu_version, smu_major, smu_minor, smu_debug);
pr_warn("SMU driver if version not matched\n");
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 868e2d5f6e62..85e5b1ed22c2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -2780,7 +2780,7 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
if (setting->bupdate_sclk) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
if (levels[i].ActivityLevel !=
cpu_to_be16(setting->sclk_activity)) {
@@ -2810,12 +2810,12 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
}
if (setting->bupdate_mclk) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
if (mclk_levels[i].ActivityLevel !=
cpu_to_be16(setting->mclk_activity)) {
@@ -2845,7 +2845,7 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
}
return 0;
}
@@ -2881,8 +2881,9 @@ static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
break;
}
- ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
- data->dpm_level_enable_mask.uvd_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.uvd_dpm_enable_mask,
+ NULL);
return 0;
}
@@ -2912,8 +2913,9 @@ static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
break;
}
- ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
- data->dpm_level_enable_mask.vce_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.vce_dpm_enable_mask,
+ NULL);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 32ebb383c456..ecb9ee46d6b3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -137,9 +137,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
INTERRUPTS_ENABLED, 1);
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
- PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL);
/* Wait for done bit to be set */
PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
@@ -203,8 +201,9 @@ static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
+ if (0 != smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param,
+ NULL)) {
pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
result = -EINVAL;
}
@@ -1913,7 +1912,8 @@ static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
if (mask)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_LedConfig,
- mask);
+ mask,
+ NULL);
return 0;
}
@@ -2220,14 +2220,16 @@ static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanMinPwm,
hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
+ advanceFanControlParameters.ucMinimumPWMLimit,
+ NULL);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanSclkTarget,
hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit,
+ NULL);
if (res)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -2242,7 +2244,7 @@ static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
if (!hwmgr->avfs_supported)
return 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
return 0;
}
@@ -2390,7 +2392,8 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+ NULL);
return 0;
}
@@ -2422,7 +2425,8 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+ NULL);
return 0;
}
@@ -2569,7 +2573,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
if (setting->bupdate_sclk) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
if (levels[i].ActivityLevel !=
cpu_to_be16(setting->sclk_activity)) {
@@ -2599,12 +2603,12 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
}
if (setting->bupdate_mclk) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
if (mclk_levels[i].ActivityLevel !=
cpu_to_be16(setting->mclk_activity)) {
@@ -2634,7 +2638,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
}
return 0;
}
@@ -2649,6 +2653,7 @@ const struct pp_smumgr_func fiji_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = &smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.update_smc_table = fiji_update_smc_table,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 732005c03a82..431ad2fd38df 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -2669,6 +2669,7 @@ const struct pp_smumgr_func iceland_smu_funcs = {
.request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
.send_msg_to_smc = &smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.get_offsetof = iceland_get_offsetof,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 23c12018dbc1..c3d2e6dcf62a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -99,7 +99,8 @@ static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
+ if (0 != smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param,
+ NULL)) {
pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1;
}
@@ -2049,15 +2050,16 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
return 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+ PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting,
+ NULL);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
/* Apply avfs cks-off voltages to avoid the overshoot
* when switching to the highest sclk frequency
*/
if (data->apply_avfs_cks_off_voltage)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage, NULL);
return 0;
}
@@ -2158,14 +2160,16 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanMinPwm,
hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
+ advanceFanControlParameters.ucMinimumPWMLimit,
+ NULL);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanSclkTarget,
hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit,
+ NULL);
if (res)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -2202,7 +2206,8 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+ NULL);
return 0;
}
@@ -2234,7 +2239,8 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+ NULL);
return 0;
}
@@ -2485,7 +2491,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
if (setting->bupdate_sclk) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
if (levels[i].ActivityLevel !=
cpu_to_be16(setting->sclk_activity)) {
@@ -2515,12 +2521,12 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
}
if (setting->bupdate_mclk) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
if (mclk_levels[i].ActivityLevel !=
cpu_to_be16(setting->mclk_activity)) {
@@ -2550,7 +2556,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
}
return 0;
}
@@ -2565,6 +2571,7 @@ const struct pp_smumgr_func polaris10_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.update_smc_table = polaris10_update_smc_table,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 2319400a3fcb..ea2279bb8cbf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -126,15 +126,18 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL;);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL;);
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- priv->smu_tables.entry[table_id].table_id);
+ priv->smu_tables.entry[table_id].table_id,
+ NULL);
/* flush hdp cache */
amdgpu_asic_flush_hdp(adev, NULL);
@@ -164,15 +167,18 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
- priv->smu_tables.entry[table_id].table_id);
+ priv->smu_tables.entry[table_id].table_id,
+ NULL);
return 0;
}
@@ -181,9 +187,9 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
{
uint32_t smc_driver_if_version;
- smu10_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetDriverIfVersion);
- smc_driver_if_version = smu10_read_arg_from_smc(hwmgr);
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetDriverIfVersion,
+ &smc_driver_if_version);
if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) &&
(smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) {
@@ -217,11 +223,11 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
adev->pm.fw_version = hwmgr->smu_version >> 8;
- if (adev->rev_id < 0x8 && adev->pdev->device != 0x15d8 &&
+ if (!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
+ (adev->apu_flags & AMD_APU_IS_RAVEN) &&
adev->pm.fw_version < 0x1e45)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 3f51d545e8ff..aae25243eb10 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -191,13 +191,6 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
return 0;
}
-int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
@@ -207,25 +200,14 @@ int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, ui
return smu7_send_msg_to_smc(hwmgr, msg);
}
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
+uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr)
{
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
-
- return smu7_send_msg_to_smc_without_waiting(hwmgr, msg);
+ return cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
}
int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
{
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
-
- cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
-
- if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP))
- pr_info("Failed to send Message.\n");
-
- return 0;
+ return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL);
}
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
@@ -353,12 +335,14 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
if (hwmgr->not_vf) {
- smu7_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_HI,
- upper_32_bits(smu_data->smu_buffer.mc_addr));
- smu7_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(smu_data->smu_buffer.mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_LO,
- lower_32_bits(smu_data->smu_buffer.mc_addr));
+ lower_32_bits(smu_data->smu_buffer.mc_addr),
+ NULL);
}
fw_to_load = UCODE_ID_RLC_G_MASK
+ UCODE_ID_SDMA0_MASK
@@ -423,10 +407,16 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
}
memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
sizeof(struct SMU_DRAMData_TOC));
- smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
- smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
-
- smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DRV_DRAM_ADDR_HI,
+ upper_32_bits(smu_data->header_buffer.mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DRV_DRAM_ADDR_LO,
+ lower_32_bits(smu_data->header_buffer.mc_addr),
+ NULL);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load, NULL);
r = smu7_check_fw_load_finish(hwmgr, fw_to_load);
if (!r)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 01f0538fba6b..e7303dc8c260 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -60,11 +60,9 @@ int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr);
int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
-int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg);
int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg,
uint32_t parameter);
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter);
+uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr);
int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr);
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 7dca04a89217..76d4f12ceedf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -610,18 +610,21 @@ static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
*table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
- upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+ upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
- lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+ lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_clock_table);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_clock_table,
+ NULL);
- smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram, NULL);
return 0;
}
@@ -637,18 +640,21 @@ static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
break;
}
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
- upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+ upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
- lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+ lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_clock_table);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_clock_table,
+ NULL);
- smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu, NULL);
return 0;
}
@@ -671,25 +677,30 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrHi,
- upper_32_bits(smu8_smu->toc_buffer.mc_addr));
+ upper_32_bits(smu8_smu->toc_buffer.mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrLo,
- lower_32_bits(smu8_smu->toc_buffer.mc_addr));
+ lower_32_bits(smu8_smu->toc_buffer.mc_addr),
+ NULL);
- smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs, NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_aram);
- smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_power_profiling_index);
+ smu8_smu->toc_entry_aram,
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_power_profiling_index,
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_initialize_index);
+ smu8_smu->toc_entry_initialize_index,
+ NULL);
fw_to_check = UCODE_ID_RLC_G_MASK |
UCODE_ID_SDMA0_MASK |
@@ -860,11 +871,13 @@ static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
unsigned long check_feature)
{
int result;
- unsigned long features;
+ uint32_t features;
- result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetFeatureStatus,
+ 0,
+ &features);
if (result == 0) {
- features = smum_get_argument(hwmgr);
if (features & check_feature)
return true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 4240aeec9000..b6fb48066841 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -103,14 +103,6 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
return 0;
}
-uint32_t smum_get_argument(struct pp_hwmgr *hwmgr)
-{
- if (NULL != hwmgr->smumgr_funcs->get_argument)
- return hwmgr->smumgr_funcs->get_argument(hwmgr);
-
- return 0;
-}
-
uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value)
{
if (NULL != hwmgr->smumgr_funcs->get_mac_definition)
@@ -135,22 +127,58 @@ int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr)
return 0;
}
-int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp)
{
- if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL)
+ int ret = 0;
+
+ if (hwmgr == NULL ||
+ hwmgr->smumgr_funcs->send_msg_to_smc == NULL ||
+ (resp && !hwmgr->smumgr_funcs->get_argument))
return -EINVAL;
- return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
+ mutex_lock(&hwmgr->msg_lock);
+
+ ret = hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
+ if (ret) {
+ mutex_unlock(&hwmgr->msg_lock);
+ return ret;
+ }
+
+ if (resp)
+ *resp = hwmgr->smumgr_funcs->get_argument(hwmgr);
+
+ mutex_unlock(&hwmgr->msg_lock);
+
+ return ret;
}
int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
+ uint16_t msg,
+ uint32_t parameter,
+ uint32_t *resp)
{
+ int ret = 0;
+
if (hwmgr == NULL ||
- hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
+ hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL ||
+ (resp && !hwmgr->smumgr_funcs->get_argument))
return -EINVAL;
- return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
+
+ mutex_lock(&hwmgr->msg_lock);
+
+ ret = hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
hwmgr, msg, parameter);
+ if (ret) {
+ mutex_unlock(&hwmgr->msg_lock);
+ return ret;
+ }
+
+ if (resp)
+ *resp = hwmgr->smumgr_funcs->get_argument(hwmgr);
+
+ mutex_unlock(&hwmgr->msg_lock);
+
+ return ret;
}
int smum_init_smc_table(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index f19bac7ef7ba..398e7e3587de 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -2702,7 +2702,8 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+ NULL);
return 0;
}
@@ -2733,7 +2734,8 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+ NULL);
return 0;
}
@@ -3168,7 +3170,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
if (setting->bupdate_sclk) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
if (levels[i].ActivityLevel !=
cpu_to_be16(setting->sclk_activity)) {
@@ -3198,12 +3200,12 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
}
if (setting->bupdate_mclk) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
if (mclk_levels[i].ActivityLevel !=
cpu_to_be16(setting->mclk_activity)) {
@@ -3233,7 +3235,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
}
return 0;
}
@@ -3248,6 +3250,7 @@ const struct pp_smumgr_func tonga_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = &smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.update_smc_table = tonga_update_smc_table,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 715564009089..1e222c5d91a4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -47,15 +47,18 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- priv->smu_tables.entry[table_id].table_id);
+ priv->smu_tables.entry[table_id].table_id,
+ NULL);
/* flush hdp cache */
amdgpu_asic_flush_hdp(adev, NULL);
@@ -90,15 +93,18 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
- priv->smu_tables.entry[table_id].table_id);
+ priv->smu_tables.entry[table_id].table_id,
+ NULL);
return 0;
}
@@ -118,17 +124,21 @@ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
return 0;
return smum_send_msg_to_smc_with_parameter(hwmgr,
- msg, feature_mask);
+ msg, feature_mask, NULL);
}
int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
uint64_t *features_enabled)
{
+ uint32_t enabled_features;
+
if (features_enabled == NULL)
return -EINVAL;
- smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
- *features_enabled = smu9_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeatures,
+ &enabled_features);
+ *features_enabled = enabled_features;
return 0;
}
@@ -150,12 +160,14 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
struct vega10_smumgr *priv = hwmgr->smu_backend;
if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
+ lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr),
+ NULL);
}
return 0;
}
@@ -167,11 +179,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
uint32_t dev_id;
uint32_t rev_id;
- PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetDriverIfVersion),
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetDriverIfVersion,
+ &smc_driver_if_version),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- smc_driver_if_version = smu9_get_argument(hwmgr);
dev_id = adev->pdev->device;
rev_id = adev->pdev->revision;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 275dbf65f1a0..f54df76537e4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -50,18 +50,21 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- table_id) == 0,
+ table_id,
+ NULL) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return -EINVAL);
@@ -98,19 +101,22 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
- table_id) == 0,
+ table_id,
+ NULL) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
return -EINVAL);
@@ -126,21 +132,21 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
if (enable) {
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL) == 0,
"[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL) == 0,
"[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
return -EINVAL);
} else {
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL) == 0,
"[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL) == 0,
"[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
return -EINVAL);
}
@@ -156,17 +162,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
if (features_enabled == NULL)
return -EINVAL;
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesLow,
+ &smc_features_low) == 0,
"[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
return -EINVAL);
- smc_features_low = smu9_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesHigh,
+ &smc_features_high) == 0,
"[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
return -EINVAL);
- smc_features_high = smu9_get_argument(hwmgr);
*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -192,12 +198,14 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
(struct vega12_smumgr *)(hwmgr->smu_backend);
if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
- if (!smu9_send_msg_to_smc_with_parameter(hwmgr,
+ if (!smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)))
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+ NULL))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+ lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+ NULL);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 16aa171971d3..2fb97554134f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -175,18 +175,20 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL)) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL)) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableSmu2Dram, table_id, NULL)) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return ret);
@@ -224,18 +226,20 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL)) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL)) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableDram2Smu, table_id, NULL)) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
return ret);
@@ -255,18 +259,22 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+ NULL)) == 0,
"[SetActivityMonitor] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+ NULL)) == 0,
"[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableDram2Smu,
+ TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16),
+ NULL)) == 0,
"[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!",
return ret);
@@ -281,19 +289,21 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+ NULL)) == 0,
"[GetActivityMonitor] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+ NULL)) == 0,
"[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
+ TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16), NULL)) == 0,
"[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
return ret);
@@ -316,21 +326,21 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
if (enable) {
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL)) == 0,
"[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
"[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
return ret);
} else {
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL)) == 0,
"[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
"[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
return ret);
}
@@ -347,16 +357,16 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
if (features_enabled == NULL)
return -EINVAL;
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesLow,
+ &smc_features_low)) == 0,
"[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
return ret);
- smc_features_low = vega20_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesHigh,
+ &smc_features_high)) == 0,
"[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
return ret);
- smc_features_high = vega20_get_argument(hwmgr);
*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -371,13 +381,15 @@ static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
int ret = 0;
if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
- ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+ upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+ NULL);
if (!ret)
- ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+ lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+ NULL);
}
return ret;
@@ -389,14 +401,16 @@ int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
(struct vega20_smumgr *)(hwmgr->smu_backend);
int ret = 0;
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
+ NULL)) == 0,
"[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
+ NULL)) == 0,
"[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index b0e0d67cd54b..3da71a088b92 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -356,7 +356,8 @@ static int vegam_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+ NULL);
return 0;
}
@@ -388,7 +389,8 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+ NULL);
return 0;
}
@@ -1906,7 +1908,8 @@ static int vegam_enable_reconfig_cus(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableModeSwitchRLCNotification,
- adev->gfx.cu_info.number);
+ adev->gfx.cu_info.number,
+ NULL);
return 0;
}
@@ -2060,7 +2063,7 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition) &&
- !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme))
+ !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme, NULL))
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
} else {
@@ -2250,10 +2253,12 @@ int vegam_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
if (!hwmgr->avfs_supported)
return 0;
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
if (!ret) {
if (data->apply_avfs_cks_off_voltage)
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+ ret = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_ApplyAvfsCksOffVoltage,
+ NULL);
}
return ret;
@@ -2279,6 +2284,7 @@ const struct pp_smumgr_func vegam_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.process_firmware_header = vegam_process_firmware_header,
.is_dpm_running = vegam_is_dpm_running,
.get_mac_definition = vegam_get_mac_definition,
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 3f1044326dcb..61923530b2e4 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -1796,7 +1796,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
"PD_Data_error_rate_coeff"};
int result = 0;
- if (!smu->pm_enabled || !buf)
+ if (!buf)
return -EINVAL;
size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
@@ -1887,8 +1887,6 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
smu->power_profile_mode = input[size];
- if (!smu->pm_enabled)
- return ret;
if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
return -EINVAL;
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index d6a6692db0ac..c05d001163e0 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -137,10 +137,11 @@ static struct drm_info_list arcpgu_debugfs_list[] = {
{ "clocks", arcpgu_show_pxlclock, 0 },
};
-static int arcpgu_debugfs_init(struct drm_minor *minor)
+static void arcpgu_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(arcpgu_debugfs_list,
- ARRAY_SIZE(arcpgu_debugfs_list), minor->debugfs_root, minor);
+ drm_debugfs_create_files(arcpgu_debugfs_list,
+ ARRAY_SIZE(arcpgu_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 442d4656150a..6b85d5f4caa8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -14,6 +14,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -260,17 +261,16 @@ static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
{
- struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL);
+ struct komeda_kms_dev *kms;
struct drm_device *drm;
int err;
- if (!kms)
- return ERR_PTR(-ENOMEM);
+ kms = devm_drm_dev_alloc(mdev->dev, &komeda_kms_driver,
+ struct komeda_kms_dev, base);
+ if (IS_ERR(kms))
+ return kms;
drm = &kms->base;
- err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev);
- if (err)
- goto free_kms;
drm->dev_private = mdev;
@@ -327,9 +327,6 @@ cleanup_mode_config:
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
- drm_dev_put(drm);
-free_kms:
- kfree(kms);
return ERR_PTR(err);
}
@@ -346,5 +343,4 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
- drm_dev_put(drm);
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 2e053815b54a..194419f47c5e 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -224,10 +224,11 @@ static struct drm_info_list hdlcd_debugfs_list[] = {
{ "clocks", hdlcd_show_pxlclock, 0 },
};
-static int hdlcd_debugfs_init(struct drm_minor *minor)
+static void hdlcd_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(hdlcd_debugfs_list,
- ARRAY_SIZE(hdlcd_debugfs_list), minor->debugfs_root, minor);
+ drm_debugfs_create_files(hdlcd_debugfs_list,
+ ARRAY_SIZE(hdlcd_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 37d92a06318e..def8c9ffafca 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -548,7 +548,7 @@ static const struct file_operations malidp_debugfs_fops = {
.release = single_release,
};
-static int malidp_debugfs_init(struct drm_minor *minor)
+static void malidp_debugfs_init(struct drm_minor *minor)
{
struct malidp_drm *malidp = minor->dev->dev_private;
@@ -557,7 +557,6 @@ static int malidp_debugfs_init(struct drm_minor *minor)
spin_lock_init(&malidp->errors_lock);
debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root,
minor->dev, &malidp_debugfs_fops);
- return 0;
}
#endif //CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 197dca3fc84c..5fc25c3f445c 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_fb_helper.h>
@@ -103,6 +104,7 @@ static int armada_drm_bind(struct device *dev)
kfree(priv);
return ret;
}
+ drmm_add_final_kfree(&priv->drm, priv);
/* Remove early framebuffers */
ret = drm_fb_helper_remove_conflicting_framebuffers(NULL,
@@ -311,7 +313,7 @@ static void __exit armada_drm_exit(void)
}
module_exit(armada_drm_exit);
-MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
MODULE_DESCRIPTION("Armada DRM Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:armada-drm");
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h
index a10358bb61ec..e7ca95827ae8 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx.h
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h
@@ -5,6 +5,7 @@
#include <drm/drm_simple_kms_helper.h>
struct aspeed_gfx {
+ struct drm_device drm;
void __iomem *base;
struct clk *clk;
struct reset_control *rst;
@@ -12,8 +13,8 @@ struct aspeed_gfx {
struct drm_simple_display_pipe pipe;
struct drm_connector connector;
- struct drm_fbdev_cma *fbdev;
};
+#define to_aspeed_gfx(x) container_of(x, struct aspeed_gfx, drm)
int aspeed_gfx_create_pipe(struct drm_device *drm);
int aspeed_gfx_create_output(struct drm_device *drm);
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
index 2184b8be6fd4..e54686c31a90 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -231,7 +231,7 @@ static const uint32_t aspeed_gfx_formats[] = {
int aspeed_gfx_create_pipe(struct drm_device *drm)
{
- struct aspeed_gfx *priv = drm->dev_private;
+ struct aspeed_gfx *priv = to_aspeed_gfx(drm);
return drm_simple_display_pipe_init(drm, &priv->pipe, &aspeed_gfx_funcs,
aspeed_gfx_formats,
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index ada2f6aca906..6b27242b9ee3 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -77,7 +77,7 @@ static void aspeed_gfx_setup_mode_config(struct drm_device *drm)
static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
{
struct drm_device *drm = data;
- struct aspeed_gfx *priv = drm->dev_private;
+ struct aspeed_gfx *priv = to_aspeed_gfx(drm);
u32 reg;
reg = readl(priv->base + CRT_CTRL1);
@@ -96,15 +96,10 @@ static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
static int aspeed_gfx_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
- struct aspeed_gfx *priv;
+ struct aspeed_gfx *priv = to_aspeed_gfx(drm);
struct resource *res;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- drm->dev_private = priv;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(drm->dev, res);
if (IS_ERR(priv->base))
@@ -187,8 +182,6 @@ static void aspeed_gfx_unload(struct drm_device *drm)
{
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
-
- drm->dev_private = NULL;
}
DEFINE_DRM_GEM_CMA_FOPS(fops);
@@ -216,27 +209,26 @@ static const struct of_device_id aspeed_gfx_match[] = {
static int aspeed_gfx_probe(struct platform_device *pdev)
{
- struct drm_device *drm;
+ struct aspeed_gfx *priv;
int ret;
- drm = drm_dev_alloc(&aspeed_gfx_driver, &pdev->dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
+ priv = devm_drm_dev_alloc(&pdev->dev, &aspeed_gfx_driver,
+ struct aspeed_gfx, drm);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
- ret = aspeed_gfx_load(drm);
+ ret = aspeed_gfx_load(&priv->drm);
if (ret)
- goto err_free;
+ return ret;
- ret = drm_dev_register(drm, 0);
+ ret = drm_dev_register(&priv->drm, 0);
if (ret)
goto err_unload;
return 0;
err_unload:
- aspeed_gfx_unload(drm);
-err_free:
- drm_dev_put(drm);
+ aspeed_gfx_unload(&priv->drm);
return ret;
}
@@ -247,7 +239,6 @@ static int aspeed_gfx_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
aspeed_gfx_unload(drm);
- drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
index 67ee5fa10055..6759cb88415a 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
@@ -28,7 +28,7 @@ static const struct drm_connector_funcs aspeed_gfx_connector_funcs = {
int aspeed_gfx_create_output(struct drm_device *drm)
{
- struct aspeed_gfx *priv = drm->dev_private;
+ struct aspeed_gfx *priv = to_aspeed_gfx(drm);
int ret;
priv->connector.dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 30aa73a5d9b7..b7ba22dddcad 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -32,6 +32,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_probe_helper.h>
@@ -111,6 +112,8 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_ast_driver_unload;
+ drm_fbdev_generic_setup(dev, 32);
+
return 0;
err_ast_driver_unload:
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 18a0a4ce00f6..e5398e3dabe7 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -30,7 +30,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
@@ -512,10 +511,6 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_reset(dev);
- ret = drm_fbdev_generic_setup(dev, 32);
- if (ret)
- goto out_free;
-
return 0;
out_free:
kfree(ast);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index cdd6c46d6557..3a3a511670c9 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -226,6 +226,7 @@ static void ast_set_vbios_color_reg(struct ast_private *ast,
case 3:
case 4:
color_index = TrueCModeIndex;
+ break;
default:
return;
}
@@ -561,8 +562,9 @@ static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
return 0;
}
-void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void
+ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
struct ast_private *ast = plane->dev->dev_private;
struct drm_plane_state *state = plane->state;
@@ -801,6 +803,9 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (!state->enable)
+ return 0; /* no mode checks if CRTC is being disabled */
+
ast_state = to_ast_crtc_state(state);
format = ast_state->format;
@@ -881,6 +886,17 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.atomic_disable = ast_crtc_helper_atomic_disable,
};
+static void ast_crtc_reset(struct drm_crtc *crtc)
+{
+ struct ast_crtc_state *ast_state =
+ kzalloc(sizeof(*ast_state), GFP_KERNEL);
+
+ if (crtc->state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+ __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+}
+
static void ast_crtc_destroy(struct drm_crtc *crtc)
{
drm_crtc_cleanup(crtc);
@@ -919,8 +935,7 @@ static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
}
static const struct drm_crtc_funcs ast_crtc_funcs = {
- .reset = drm_atomic_helper_crtc_reset,
- .set_config = drm_crtc_helper_set_config,
+ .reset = ast_crtc_reset,
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = ast_crtc_destroy,
.set_config = drm_atomic_helper_set_config,
@@ -1069,7 +1084,6 @@ static void ast_connector_destroy(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
ast_i2c_destroy(ast_connector->i2c);
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -1112,8 +1126,6 @@ static int ast_connector_init(struct drm_device *dev)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_connector_register(connector);
-
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index e2019fe97fff..43bc709e3523 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -11,9 +11,10 @@
#include <linux/media-bus-format.h>
#include <linux/of_graph.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
-#include <drm/drm_bridge.h>
+#include <drm/drm_simple_kms_helper.h>
#include "atmel_hlcdc_dc.h"
@@ -22,10 +23,6 @@ struct atmel_hlcdc_rgb_output {
int bus_fmt;
};
-static const struct drm_encoder_funcs atmel_hlcdc_panel_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static struct atmel_hlcdc_rgb_output *
atmel_hlcdc_encoder_to_rgb_output(struct drm_encoder *encoder)
{
@@ -98,9 +95,8 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
return -EINVAL;
}
- ret = drm_encoder_init(dev, &output->encoder,
- &atmel_hlcdc_panel_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(dev, &output->encoder,
+ DRM_MODE_ENCODER_NONE);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 917767173ee6..e5bd1d517a18 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -92,7 +92,6 @@ void bochs_mm_fini(struct bochs_device *bochs);
/* bochs_kms.c */
int bochs_kms_init(struct bochs_device *bochs);
-void bochs_kms_fini(struct bochs_device *bochs);
/* bochs_fbdev.c */
extern const struct drm_mode_config_funcs bochs_mode_funcs;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index addb0568c1af..e18c51de1196 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -7,6 +7,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_managed.h>
#include "bochs.h"
@@ -21,10 +22,7 @@ static void bochs_unload(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
- bochs_kms_fini(bochs);
bochs_mm_fini(bochs);
- kfree(bochs);
- dev->dev_private = NULL;
}
static int bochs_load(struct drm_device *dev)
@@ -32,7 +30,7 @@ static int bochs_load(struct drm_device *dev)
struct bochs_device *bochs;
int ret;
- bochs = kzalloc(sizeof(*bochs), GFP_KERNEL);
+ bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL);
if (bochs == NULL)
return -ENOMEM;
dev->dev_private = bochs;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 8066d7d370d5..05d8373888e8 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -104,7 +104,6 @@ static void bochs_connector_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector,
&bochs_connector_connector_helper_funcs);
- drm_connector_register(connector);
bochs_hw_load_edid(bochs);
if (bochs->edid) {
@@ -134,7 +133,11 @@ const struct drm_mode_config_funcs bochs_mode_funcs = {
int bochs_kms_init(struct bochs_device *bochs)
{
- drm_mode_config_init(bochs->dev);
+ int ret;
+
+ ret = drmm_mode_config_init(bochs->dev);
+ if (ret)
+ return ret;
bochs->dev->mode_config.max_width = 8192;
bochs->dev->mode_config.max_height = 8192;
@@ -160,12 +163,3 @@ int bochs_kms_init(struct bochs_device *bochs)
return 0;
}
-
-void bochs_kms_fini(struct bochs_device *bochs)
-{
- if (!bochs->dev->mode_config.num_connector)
- return;
-
- drm_atomic_helper_shutdown(bochs->dev);
- drm_mode_config_cleanup(bochs->dev);
-}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index aaed2347ace9..04f876e985de 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -27,6 +27,16 @@ config DRM_CDNS_DSI
Support Cadence DPI to DSI bridge. This is an internal
bridge and is meant to be directly embedded in a SoC.
+config DRM_CHRONTEL_CH7033
+ tristate "Chrontel CH7033 Video Encoder"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Enable support for the Chrontel CH7033 VGA/DVI/HDMI Encoder, as
+ found in the Dell Wyse 3020 thin client.
+
+ If in doubt, say "N".
+
config DRM_DISPLAY_CONNECTOR
tristate "Display connector support"
depends on OF
@@ -58,6 +68,22 @@ config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
to DP++. This is used with the i.MX6 imx-ldb
driver. You are likely to say N here.
+config DRM_NWL_MIPI_DSI
+ tristate "Northwest Logic MIPI DSI Host controller"
+ depends on DRM
+ depends on COMMON_CLK
+ depends on OF && HAS_IOMEM
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
+ select GENERIC_PHY_MIPI_DPHY
+ select MFD_SYSCON
+ select MULTIPLEXER
+ select REGMAP_MMIO
+ help
+ This enables the Northwest Logic MIPI DSI Host controller as
+ for example found on NXP's i.MX8 Processors.
+
config DRM_NXP_PTN3460
tristate "NXP PTN3460 DP/LVDS bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 6fb062b5b0f0..d63d4b7e4347 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
+obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
@@ -18,6 +19,7 @@ obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
+obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
obj-y += analogix/
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index 47d4eb9e845d..f46a5e26b5dd 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -6,7 +6,7 @@ config DRM_I2C_ADV7511
select REGMAP_I2C
select DRM_MIPI_DSI
help
- Support for the Analog Device ADV7511(W)/13/33/35 HDMI encoders.
+ Support for the Analog Devices ADV7511(W)/13/33/35 HDMI encoders.
config DRM_I2C_ADV7511_AUDIO
bool "ADV7511 HDMI Audio driver"
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index a428185be2c1..f101dd2819b5 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -19,13 +19,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
{
switch (fs) {
case 32000:
- *n = 4096;
+ case 48000:
+ case 96000:
+ case 192000:
+ *n = fs * 128 / 1000;
break;
case 44100:
- *n = 6272;
- break;
- case 48000:
- *n = 6144;
+ case 88200:
+ case 176400:
+ *n = fs * 128 / 900;
break;
}
@@ -119,6 +121,9 @@ int adv7511_hdmi_hw_params(struct device *dev, void *data,
audio_source = ADV7511_AUDIO_SOURCE_I2S;
i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
break;
+ case HDMI_SPDIF:
+ audio_source = ADV7511_AUDIO_SOURCE_SPDIF;
+ break;
default:
return -EINVAL;
}
@@ -175,11 +180,21 @@ static int audio_startup(struct device *dev, void *data)
/* use Audio infoframe updated info */
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
BIT(5), 0);
+ /* enable SPDIF receiver */
+ if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), BIT(7));
+
return 0;
}
static void audio_shutdown(struct device *dev, void *data)
{
+ struct adv7511 *adv7511 = dev_get_drvdata(dev);
+
+ if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), 0);
}
static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
@@ -213,6 +228,7 @@ static const struct hdmi_codec_pdata codec_data = {
.ops = &adv7511_codec_ops,
.max_i2s_channels = 2,
.i2s = 1,
+ .spdif = 1,
};
int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
new file mode 100644
index 000000000000..f8675d82974b
--- /dev/null
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Chrontel CH7033 Video Encoder Driver
+ *
+ * Copyright (C) 2019,2020 Lubomir Rintel
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+/* Page 0, Register 0x07 */
+enum {
+ DRI_PD = BIT(3),
+ IO_PD = BIT(5),
+};
+
+/* Page 0, Register 0x08 */
+enum {
+ DRI_PDDRI = GENMASK(7, 4),
+ PDDAC = GENMASK(3, 1),
+ PANEN = BIT(0),
+};
+
+/* Page 0, Register 0x09 */
+enum {
+ DPD = BIT(7),
+ GCKOFF = BIT(6),
+ TV_BP = BIT(5),
+ SCLPD = BIT(4),
+ SDPD = BIT(3),
+ VGA_PD = BIT(2),
+ HDBKPD = BIT(1),
+ HDMI_PD = BIT(0),
+};
+
+/* Page 0, Register 0x0a */
+enum {
+ MEMINIT = BIT(7),
+ MEMIDLE = BIT(6),
+ MEMPD = BIT(5),
+ STOP = BIT(4),
+ LVDS_PD = BIT(3),
+ HD_DVIB = BIT(2),
+ HDCP_PD = BIT(1),
+ MCU_PD = BIT(0),
+};
+
+/* Page 0, Register 0x18 */
+enum {
+ IDF = GENMASK(7, 4),
+ INTEN = BIT(3),
+ SWAP = GENMASK(2, 0),
+};
+
+enum {
+ BYTE_SWAP_RGB = 0,
+ BYTE_SWAP_RBG = 1,
+ BYTE_SWAP_GRB = 2,
+ BYTE_SWAP_GBR = 3,
+ BYTE_SWAP_BRG = 4,
+ BYTE_SWAP_BGR = 5,
+};
+
+/* Page 0, Register 0x19 */
+enum {
+ HPO_I = BIT(5),
+ VPO_I = BIT(4),
+ DEPO_I = BIT(3),
+ CRYS_EN = BIT(2),
+ GCLKFREQ = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x2e */
+enum {
+ HFLIP = BIT(7),
+ VFLIP = BIT(6),
+ DEPO_O = BIT(5),
+ HPO_O = BIT(4),
+ VPO_O = BIT(3),
+ TE = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x2b */
+enum {
+ SWAPS = GENMASK(7, 4),
+ VFMT = GENMASK(3, 0),
+};
+
+/* Page 0, Register 0x54 */
+enum {
+ COMP_BP = BIT(7),
+ DAC_EN_T = BIT(6),
+ HWO_HDMI_HI = GENMASK(5, 3),
+ HOO_HDMI_HI = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x57 */
+enum {
+ FLDSEN = BIT(7),
+ VWO_HDMI_HI = GENMASK(5, 3),
+ VOO_HDMI_HI = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x7e */
+enum {
+ HDMI_LVDS_SEL = BIT(7),
+ DE_GEN = BIT(6),
+ PWM_INDEX_HI = BIT(5),
+ USE_DE = BIT(4),
+ R_INT = GENMASK(3, 0),
+};
+
+/* Page 1, Register 0x07 */
+enum {
+ BPCKSEL = BIT(7),
+ DRI_CMFB_EN = BIT(6),
+ CEC_PUEN = BIT(5),
+ CEC_T = BIT(3),
+ CKINV = BIT(2),
+ CK_TVINV = BIT(1),
+ DRI_CKS2 = BIT(0),
+};
+
+/* Page 1, Register 0x08 */
+enum {
+ DACG = BIT(6),
+ DACKTST = BIT(5),
+ DEDGEB = BIT(4),
+ SYO = BIT(3),
+ DRI_IT_LVDS = GENMASK(2, 1),
+ DISPON = BIT(0),
+};
+
+/* Page 1, Register 0x0c */
+enum {
+ DRI_PLL_CP = GENMASK(7, 6),
+ DRI_PLL_DIVSEL = BIT(5),
+ DRI_PLL_N1_1 = BIT(4),
+ DRI_PLL_N1_0 = BIT(3),
+ DRI_PLL_N3_1 = BIT(2),
+ DRI_PLL_N3_0 = BIT(1),
+ DRI_PLL_CKTSTEN = BIT(0),
+};
+
+/* Page 1, Register 0x6b */
+enum {
+ VCO3CS = GENMASK(7, 6),
+ ICPGBK2_0 = GENMASK(5, 3),
+ DRI_VCO357SC = BIT(2),
+ PDPLL2 = BIT(1),
+ DRI_PD_SER = BIT(0),
+};
+
+/* Page 1, Register 0x6c */
+enum {
+ PLL2N11 = GENMASK(7, 4),
+ PLL2N5_4 = BIT(3),
+ PLL2N5_TOP = BIT(2),
+ DRI_PLL_PD = BIT(1),
+ PD_I2CM = BIT(0),
+};
+
+/* Page 3, Register 0x28 */
+enum {
+ DIFF_EN = GENMASK(7, 6),
+ CORREC_EN = GENMASK(5, 4),
+ VGACLK_BP = BIT(3),
+ HM_LV_SEL = BIT(2),
+ HD_VGA_SEL = BIT(1),
+};
+
+/* Page 3, Register 0x2a */
+enum {
+ LVDSCLK_BP = BIT(7),
+ HDTVCLK_BP = BIT(6),
+ HDMICLK_BP = BIT(5),
+ HDTV_BP = BIT(4),
+ HDMI_BP = BIT(3),
+ THRWL = GENMASK(2, 0),
+};
+
+/* Page 4, Register 0x52 */
+enum {
+ PGM_ARSTB = BIT(7),
+ MCU_ARSTB = BIT(6),
+ MCU_RETB = BIT(2),
+ RESETIB = BIT(1),
+ RESETDB = BIT(0),
+};
+
+struct ch7033_priv {
+ struct regmap *regmap;
+ struct drm_bridge *next_bridge;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+};
+
+#define conn_to_ch7033_priv(x) \
+ container_of(x, struct ch7033_priv, connector)
+#define bridge_to_ch7033_priv(x) \
+ container_of(x, struct ch7033_priv, bridge)
+
+
+static enum drm_connector_status ch7033_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+
+ return drm_bridge_detect(priv->next_bridge);
+}
+
+static const struct drm_connector_funcs ch7033_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = ch7033_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ch7033_connector_get_modes(struct drm_connector *connector)
+{
+ struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_bridge_get_edid(priv->next_bridge, connector);
+ drm_connector_update_edid_property(connector, edid);
+ if (edid) {
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ } else {
+ ret = drm_add_modes_noedid(connector, 1920, 1080);
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return ret;
+}
+
+static struct drm_encoder *ch7033_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+
+ return priv->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs ch7033_connector_helper_funcs = {
+ .get_modes = ch7033_connector_get_modes,
+ .best_encoder = ch7033_connector_best_encoder,
+};
+
+static void ch7033_hpd_event(void *arg, enum drm_connector_status status)
+{
+ struct ch7033_priv *priv = arg;
+
+ if (priv->bridge.dev)
+ drm_helper_hpd_irq_event(priv->connector.dev);
+}
+
+static int ch7033_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
+ if (priv->next_bridge->ops & DRM_BRIDGE_OP_DETECT) {
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ } else {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
+
+ if (priv->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_hpd_enable(priv->next_bridge, ch7033_hpd_event,
+ priv);
+ }
+
+ drm_connector_helper_add(connector,
+ &ch7033_connector_helper_funcs);
+ ret = drm_connector_init_with_ddc(bridge->dev, &priv->connector,
+ &ch7033_connector_funcs,
+ priv->next_bridge->type,
+ priv->next_bridge->ddc);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ return drm_connector_attach_encoder(&priv->connector, bridge->encoder);
+}
+
+static void ch7033_bridge_detach(struct drm_bridge *bridge)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+ if (priv->next_bridge->ops & DRM_BRIDGE_OP_HPD)
+ drm_bridge_hpd_disable(priv->next_bridge);
+ drm_connector_cleanup(&priv->connector);
+}
+
+static enum drm_mode_status ch7033_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->hdisplay >= 1920)
+ return MODE_BAD_HVALUE;
+ if (mode->vdisplay >= 1080)
+ return MODE_BAD_VVALUE;
+ return MODE_OK;
+}
+
+static void ch7033_bridge_disable(struct drm_bridge *bridge)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+ regmap_write(priv->regmap, 0x03, 0x04);
+ regmap_update_bits(priv->regmap, 0x52, RESETDB, 0x00);
+}
+
+static void ch7033_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+ regmap_write(priv->regmap, 0x03, 0x04);
+ regmap_update_bits(priv->regmap, 0x52, RESETDB, RESETDB);
+}
+
+static void ch7033_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+ int hbporch = mode->hsync_start - mode->hdisplay;
+ int hsynclen = mode->hsync_end - mode->hsync_start;
+ int vbporch = mode->vsync_start - mode->vdisplay;
+ int vsynclen = mode->vsync_end - mode->vsync_start;
+
+ /*
+ * Page 4
+ */
+ regmap_write(priv->regmap, 0x03, 0x04);
+
+ /* Turn everything off to set all the registers to their defaults. */
+ regmap_write(priv->regmap, 0x52, 0x00);
+ /* Bring I/O block up. */
+ regmap_write(priv->regmap, 0x52, RESETIB);
+
+ /*
+ * Page 0
+ */
+ regmap_write(priv->regmap, 0x03, 0x00);
+
+ /* Bring up parts we need from the power down. */
+ regmap_update_bits(priv->regmap, 0x07, DRI_PD | IO_PD, 0);
+ regmap_update_bits(priv->regmap, 0x08, DRI_PDDRI | PDDAC | PANEN, 0);
+ regmap_update_bits(priv->regmap, 0x09, DPD | GCKOFF |
+ HDMI_PD | VGA_PD, 0);
+ regmap_update_bits(priv->regmap, 0x0a, HD_DVIB, 0);
+
+ /* Horizontal input timing. */
+ regmap_write(priv->regmap, 0x0b, (mode->htotal >> 8) << 3 |
+ (mode->hdisplay >> 8));
+ regmap_write(priv->regmap, 0x0c, mode->hdisplay);
+ regmap_write(priv->regmap, 0x0d, mode->htotal);
+ regmap_write(priv->regmap, 0x0e, (hsynclen >> 8) << 3 |
+ (hbporch >> 8));
+ regmap_write(priv->regmap, 0x0f, hbporch);
+ regmap_write(priv->regmap, 0x10, hsynclen);
+
+ /* Vertical input timing. */
+ regmap_write(priv->regmap, 0x11, (mode->vtotal >> 8) << 3 |
+ (mode->vdisplay >> 8));
+ regmap_write(priv->regmap, 0x12, mode->vdisplay);
+ regmap_write(priv->regmap, 0x13, mode->vtotal);
+ regmap_write(priv->regmap, 0x14, ((vsynclen >> 8) << 3) |
+ (vbporch >> 8));
+ regmap_write(priv->regmap, 0x15, vbporch);
+ regmap_write(priv->regmap, 0x16, vsynclen);
+
+ /* Input color swap. */
+ regmap_update_bits(priv->regmap, 0x18, SWAP, BYTE_SWAP_BGR);
+
+ /* Input clock and sync polarity. */
+ regmap_update_bits(priv->regmap, 0x19, 0x1, mode->clock >> 16);
+ regmap_update_bits(priv->regmap, 0x19, HPO_I | VPO_I | GCLKFREQ,
+ (mode->flags & DRM_MODE_FLAG_PHSYNC) ? HPO_I : 0 |
+ (mode->flags & DRM_MODE_FLAG_PVSYNC) ? VPO_I : 0 |
+ mode->clock >> 16);
+ regmap_write(priv->regmap, 0x1a, mode->clock >> 8);
+ regmap_write(priv->regmap, 0x1b, mode->clock);
+
+ /* Horizontal output timing. */
+ regmap_write(priv->regmap, 0x1f, (mode->htotal >> 8) << 3 |
+ (mode->hdisplay >> 8));
+ regmap_write(priv->regmap, 0x20, mode->hdisplay);
+ regmap_write(priv->regmap, 0x21, mode->htotal);
+
+ /* Vertical output timing. */
+ regmap_write(priv->regmap, 0x25, (mode->vtotal >> 8) << 3 |
+ (mode->vdisplay >> 8));
+ regmap_write(priv->regmap, 0x26, mode->vdisplay);
+ regmap_write(priv->regmap, 0x27, mode->vtotal);
+
+ /* VGA channel bypass */
+ regmap_update_bits(priv->regmap, 0x2b, VFMT, 9);
+
+ /* Output sync polarity. */
+ regmap_update_bits(priv->regmap, 0x2e, HPO_O | VPO_O,
+ (mode->flags & DRM_MODE_FLAG_PHSYNC) ? HPO_O : 0 |
+ (mode->flags & DRM_MODE_FLAG_PVSYNC) ? VPO_O : 0);
+
+ /* HDMI horizontal output timing. */
+ regmap_update_bits(priv->regmap, 0x54, HWO_HDMI_HI | HOO_HDMI_HI,
+ (hsynclen >> 8) << 3 |
+ (hbporch >> 8));
+ regmap_write(priv->regmap, 0x55, hbporch);
+ regmap_write(priv->regmap, 0x56, hsynclen);
+
+ /* HDMI vertical output timing. */
+ regmap_update_bits(priv->regmap, 0x57, VWO_HDMI_HI | VOO_HDMI_HI,
+ (vsynclen >> 8) << 3 |
+ (vbporch >> 8));
+ regmap_write(priv->regmap, 0x58, vbporch);
+ regmap_write(priv->regmap, 0x59, vsynclen);
+
+ /* Pick HDMI, not LVDS. */
+ regmap_update_bits(priv->regmap, 0x7e, HDMI_LVDS_SEL, HDMI_LVDS_SEL);
+
+ /*
+ * Page 1
+ */
+ regmap_write(priv->regmap, 0x03, 0x01);
+
+ /* No idea what these do, but VGA is wobbly and blinky without them. */
+ regmap_update_bits(priv->regmap, 0x07, CKINV, CKINV);
+ regmap_update_bits(priv->regmap, 0x08, DISPON, DISPON);
+
+ /* DRI PLL */
+ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_DIVSEL, DRI_PLL_DIVSEL);
+ if (mode->clock <= 40000) {
+ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+ DRI_PLL_N1_0 |
+ DRI_PLL_N3_1 |
+ DRI_PLL_N3_0,
+ 0);
+ } else if (mode->clock < 80000) {
+ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+ DRI_PLL_N1_0 |
+ DRI_PLL_N3_1 |
+ DRI_PLL_N3_0,
+ DRI_PLL_N3_0 |
+ DRI_PLL_N1_0);
+ } else {
+ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+ DRI_PLL_N1_0 |
+ DRI_PLL_N3_1 |
+ DRI_PLL_N3_0,
+ DRI_PLL_N3_1 |
+ DRI_PLL_N1_1);
+ }
+
+ /* This seems to be color calibration for VGA. */
+ regmap_write(priv->regmap, 0x64, 0x29); /* LSB Blue */
+ regmap_write(priv->regmap, 0x65, 0x29); /* LSB Green */
+ regmap_write(priv->regmap, 0x66, 0x29); /* LSB Red */
+ regmap_write(priv->regmap, 0x67, 0x00); /* MSB Blue */
+ regmap_write(priv->regmap, 0x68, 0x00); /* MSB Green */
+ regmap_write(priv->regmap, 0x69, 0x00); /* MSB Red */
+
+ regmap_update_bits(priv->regmap, 0x6b, DRI_PD_SER, 0x00);
+ regmap_update_bits(priv->regmap, 0x6c, DRI_PLL_PD, 0x00);
+
+ /*
+ * Page 3
+ */
+ regmap_write(priv->regmap, 0x03, 0x03);
+
+ /* More bypasses and apparently another HDMI/LVDS selector. */
+ regmap_update_bits(priv->regmap, 0x28, VGACLK_BP | HM_LV_SEL,
+ VGACLK_BP | HM_LV_SEL);
+ regmap_update_bits(priv->regmap, 0x2a, HDMICLK_BP | HDMI_BP,
+ HDMICLK_BP | HDMI_BP);
+
+ /*
+ * Page 4
+ */
+ regmap_write(priv->regmap, 0x03, 0x04);
+
+ /* Output clock. */
+ regmap_write(priv->regmap, 0x10, mode->clock >> 16);
+ regmap_write(priv->regmap, 0x11, mode->clock >> 8);
+ regmap_write(priv->regmap, 0x12, mode->clock);
+}
+
+static const struct drm_bridge_funcs ch7033_bridge_funcs = {
+ .attach = ch7033_bridge_attach,
+ .detach = ch7033_bridge_detach,
+ .mode_valid = ch7033_bridge_mode_valid,
+ .disable = ch7033_bridge_disable,
+ .enable = ch7033_bridge_enable,
+ .mode_set = ch7033_bridge_mode_set,
+};
+
+static const struct regmap_config ch7033_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x7f,
+};
+
+static int ch7033_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ch7033_priv *priv;
+ unsigned int val;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, NULL,
+ &priv->next_bridge);
+ if (ret)
+ return ret;
+
+ priv->regmap = devm_regmap_init_i2c(client, &ch7033_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(&client->dev, "regmap init failed\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ ret = regmap_read(priv->regmap, 0x00, &val);
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading the model id: %d\n", ret);
+ return ret;
+ }
+ if ((val & 0xf7) != 0x56) {
+ dev_err(&client->dev, "the device is not a ch7033\n");
+ return -ENODEV;
+ }
+
+ regmap_write(priv->regmap, 0x03, 0x04);
+ ret = regmap_read(priv->regmap, 0x51, &val);
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading the model id: %d\n", ret);
+ return ret;
+ }
+ if ((val & 0x0f) != 3) {
+ dev_err(&client->dev, "unknown revision %u\n", val);
+ return -ENODEV;
+ }
+
+ INIT_LIST_HEAD(&priv->bridge.list);
+ priv->bridge.funcs = &ch7033_bridge_funcs;
+ priv->bridge.of_node = dev->of_node;
+ drm_bridge_add(&priv->bridge);
+
+ dev_info(dev, "Chrontel CH7033 Video Encoder\n");
+ return 0;
+}
+
+static int ch7033_remove(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ch7033_priv *priv = dev_get_drvdata(dev);
+
+ drm_bridge_remove(&priv->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id ch7033_dt_ids[] = {
+ { .compatible = "chrontel,ch7033", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ch7033_dt_ids);
+
+static const struct i2c_device_id ch7033_ids[] = {
+ { "ch7033", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ch7033_ids);
+
+static struct i2c_driver ch7033_driver = {
+ .probe = ch7033_probe,
+ .remove = ch7033_remove,
+ .driver = {
+ .name = "ch7033",
+ .of_match_table = of_match_ptr(ch7033_dt_ids),
+ },
+ .id_table = ch7033_ids,
+};
+
+module_i2c_driver(ch7033_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Chrontel CH7033 Video Encoder Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
new file mode 100644
index 000000000000..b14d725bf609
--- /dev/null
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -0,0 +1,1213 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX8 NWL MIPI DSI host driver
+ *
+ * Copyright (C) 2017 NXP
+ * Copyright (C) 2020 Purism SPC
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sys_soc.h>
+#include <linux/time64.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <video/mipi_display.h>
+
+#include "nwl-dsi.h"
+
+#define DRV_NAME "nwl-dsi"
+
+/* i.MX8 NWL quirks */
+/* i.MX8MQ errata E11418 */
+#define E11418_HS_MODE_QUIRK BIT(0)
+
+#define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500)
+
+enum transfer_direction {
+ DSI_PACKET_SEND,
+ DSI_PACKET_RECEIVE,
+};
+
+#define NWL_DSI_ENDPOINT_LCDIF 0
+#define NWL_DSI_ENDPOINT_DCSS 1
+
+struct nwl_dsi_plat_clk_config {
+ const char *id;
+ struct clk *clk;
+ bool present;
+};
+
+struct nwl_dsi_transfer {
+ const struct mipi_dsi_msg *msg;
+ struct mipi_dsi_packet packet;
+ struct completion completed;
+
+ int status; /* status of transmission */
+ enum transfer_direction direction;
+ bool need_bta;
+ u8 cmd;
+ u16 rx_word_count;
+ size_t tx_len; /* in bytes */
+ size_t rx_len; /* in bytes */
+};
+
+struct nwl_dsi {
+ struct drm_bridge bridge;
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge *panel_bridge;
+ struct device *dev;
+ struct phy *phy;
+ union phy_configure_opts phy_cfg;
+ unsigned int quirks;
+
+ struct regmap *regmap;
+ int irq;
+ /*
+ * The DSI host controller needs this reset sequence according to NWL:
+ * 1. Deassert pclk reset to get access to DSI regs
+ * 2. Configure DSI Host and DPHY and enable DPHY
+ * 3. Deassert ESC and BYTE resets to allow host TX operations)
+ * 4. Send DSI cmds to configure peripheral (handled by panel drv)
+ * 5. Deassert DPI reset so DPI receives pixels and starts sending
+ * DSI data
+ *
+ * TODO: Since panel_bridges do their DSI setup in enable we
+ * currently have 4. and 5. swapped.
+ */
+ struct reset_control *rst_byte;
+ struct reset_control *rst_esc;
+ struct reset_control *rst_dpi;
+ struct reset_control *rst_pclk;
+ struct mux_control *mux;
+
+ /* DSI clocks */
+ struct clk *phy_ref_clk;
+ struct clk *rx_esc_clk;
+ struct clk *tx_esc_clk;
+ struct clk *core_clk;
+ /*
+ * hardware bug: the i.MX8MQ needs this clock on during reset
+ * even when not using LCDIF.
+ */
+ struct clk *lcdif_clk;
+
+ /* dsi lanes */
+ u32 lanes;
+ enum mipi_dsi_pixel_format format;
+ struct drm_display_mode mode;
+ unsigned long dsi_mode_flags;
+ int error;
+
+ struct nwl_dsi_transfer *xfer;
+};
+
+static const struct regmap_config nwl_dsi_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = NWL_DSI_IRQ_MASK2,
+ .name = DRV_NAME,
+};
+
+static inline struct nwl_dsi *bridge_to_dsi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct nwl_dsi, bridge);
+}
+
+static int nwl_dsi_clear_error(struct nwl_dsi *dsi)
+{
+ int ret = dsi->error;
+
+ dsi->error = 0;
+ return ret;
+}
+
+static void nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val)
+{
+ int ret;
+
+ if (dsi->error)
+ return;
+
+ ret = regmap_write(dsi->regmap, reg, val);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev,
+ "Failed to write NWL DSI reg 0x%x: %d\n", reg,
+ ret);
+ dsi->error = ret;
+ }
+}
+
+static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg)
+{
+ unsigned int val;
+ int ret;
+
+ if (dsi->error)
+ return 0;
+
+ ret = regmap_read(dsi->regmap, reg, &val);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n",
+ reg, ret);
+ dsi->error = ret;
+ }
+ return val;
+}
+
+static int nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format)
+{
+ switch (format) {
+ case MIPI_DSI_FMT_RGB565:
+ return NWL_DSI_PIXEL_FORMAT_16;
+ case MIPI_DSI_FMT_RGB666:
+ return NWL_DSI_PIXEL_FORMAT_18L;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return NWL_DSI_PIXEL_FORMAT_18;
+ case MIPI_DSI_FMT_RGB888:
+ return NWL_DSI_PIXEL_FORMAT_24;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * ps2bc - Picoseconds to byte clock cycles
+ */
+static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
+{
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+ return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp,
+ dsi->lanes * 8 * NSEC_PER_SEC);
+}
+
+/*
+ * ui2bc - UI time periods to byte clock cycles
+ */
+static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui)
+{
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+ return DIV64_U64_ROUND_UP(ui * dsi->lanes,
+ dsi->mode.clock * 1000 * bpp);
+}
+
+/*
+ * us2bc - micro seconds to lp clock cycles
+ */
+static u32 us2lp(u32 lp_clk_rate, unsigned long us)
+{
+ return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC);
+}
+
+static int nwl_dsi_config_host(struct nwl_dsi *dsi)
+{
+ u32 cycles;
+ struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy;
+
+ if (dsi->lanes < 1 || dsi->lanes > 4)
+ return -EINVAL;
+
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1);
+
+ if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+ nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01);
+ } else {
+ nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00);
+ }
+
+ /* values in byte clock cycles */
+ cycles = ui2bc(dsi, cfg->clk_pre);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
+ cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
+ cycles += ui2bc(dsi, cfg->clk_pre);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
+ cycles = ps2bc(dsi, cfg->hs_exit);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles);
+
+ nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00);
+ /* In LP clock cycles */
+ cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles);
+
+ return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_config_dpi(struct nwl_dsi *dsi)
+{
+ u32 mode;
+ int color_format;
+ bool burst_mode;
+ int hfront_porch, hback_porch, vfront_porch, vback_porch;
+ int hsync_len, vsync_len;
+
+ hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay;
+ hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start;
+ hback_porch = dsi->mode.htotal - dsi->mode.hsync_end;
+
+ vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay;
+ vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start;
+ vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end;
+
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock);
+
+ color_format = nwl_dsi_get_dpi_pixel_format(dsi->format);
+ if (color_format < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n",
+ dsi->format);
+ return color_format;
+ }
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format);
+
+ nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT);
+ nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format);
+ /*
+ * Adjusting input polarity based on the video mode results in
+ * a black screen so always pick active low:
+ */
+ nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY,
+ NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW);
+ nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY,
+ NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW);
+
+ burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
+ !(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE);
+
+ if (burst_mode) {
+ nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE);
+ nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256);
+ } else {
+ mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ?
+ NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES :
+ NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS);
+ nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode);
+ nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL,
+ dsi->mode.hdisplay);
+ }
+
+ nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch);
+ nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch);
+ nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len);
+
+ nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0);
+ nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1);
+ nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0);
+ nwl_dsi_write(dsi, NWL_DSI_VC, 0x0);
+
+ nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay);
+ nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1);
+ nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch);
+ nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch);
+
+ return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_init_interrupts(struct nwl_dsi *dsi)
+{
+ u32 irq_enable;
+
+ nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, 0xffffffff);
+ nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7);
+
+ irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK |
+ NWL_DSI_RX_PKT_HDR_RCVD_MASK |
+ NWL_DSI_TX_FIFO_OVFLW_MASK |
+ NWL_DSI_HS_TX_TIMEOUT_MASK);
+
+ nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable);
+
+ return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host,
+ struct mipi_dsi_device *device)
+{
+ struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
+ struct device *dev = dsi->dev;
+
+ DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes,
+ device->format, device->mode_flags);
+
+ if (device->lanes < 1 || device->lanes > 4)
+ return -EINVAL;
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->dsi_mode_flags = device->mode_flags;
+
+ return 0;
+}
+
+static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status)
+{
+ struct device *dev = dsi->dev;
+ struct nwl_dsi_transfer *xfer = dsi->xfer;
+ int err;
+ u8 *payload = xfer->msg->rx_buf;
+ u32 val;
+ u16 word_count;
+ u8 channel;
+ u8 data_type;
+
+ xfer->status = 0;
+
+ if (xfer->rx_word_count == 0) {
+ if (!(status & NWL_DSI_RX_PKT_HDR_RCVD))
+ return false;
+ /* Get the RX header and parse it */
+ val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER);
+ err = nwl_dsi_clear_error(dsi);
+ if (err)
+ xfer->status = err;
+ word_count = NWL_DSI_WC(val);
+ channel = NWL_DSI_RX_VC(val);
+ data_type = NWL_DSI_RX_DT(val);
+
+ if (channel != xfer->msg->channel) {
+ DRM_DEV_ERROR(dev,
+ "[%02X] Channel mismatch (%u != %u)\n",
+ xfer->cmd, channel, xfer->msg->channel);
+ xfer->status = -EINVAL;
+ return true;
+ }
+
+ switch (data_type) {
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ fallthrough;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ if (xfer->msg->rx_len > 1) {
+ /* read second byte */
+ payload[1] = word_count >> 8;
+ ++xfer->rx_len;
+ }
+ fallthrough;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ fallthrough;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ if (xfer->msg->rx_len > 0) {
+ /* read first byte */
+ payload[0] = word_count & 0xff;
+ ++xfer->rx_len;
+ }
+ xfer->status = xfer->rx_len;
+ return true;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ word_count &= 0xff;
+ DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n",
+ xfer->cmd, word_count);
+ xfer->status = -EPROTO;
+ return true;
+ }
+
+ if (word_count > xfer->msg->rx_len) {
+ DRM_DEV_ERROR(dev,
+ "[%02X] Receive buffer too small: %zu (< %u)\n",
+ xfer->cmd, xfer->msg->rx_len, word_count);
+ xfer->status = -EINVAL;
+ return true;
+ }
+
+ xfer->rx_word_count = word_count;
+ } else {
+ /* Set word_count from previous header read */
+ word_count = xfer->rx_word_count;
+ }
+
+ /* If RX payload is not yet received, wait for it */
+ if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD))
+ return false;
+
+ /* Read the RX payload */
+ while (word_count >= 4) {
+ val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
+ payload[0] = (val >> 0) & 0xff;
+ payload[1] = (val >> 8) & 0xff;
+ payload[2] = (val >> 16) & 0xff;
+ payload[3] = (val >> 24) & 0xff;
+ payload += 4;
+ xfer->rx_len += 4;
+ word_count -= 4;
+ }
+
+ if (word_count > 0) {
+ val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
+ switch (word_count) {
+ case 3:
+ payload[2] = (val >> 16) & 0xff;
+ ++xfer->rx_len;
+ fallthrough;
+ case 2:
+ payload[1] = (val >> 8) & 0xff;
+ ++xfer->rx_len;
+ fallthrough;
+ case 1:
+ payload[0] = (val >> 0) & 0xff;
+ ++xfer->rx_len;
+ break;
+ }
+ }
+
+ xfer->status = xfer->rx_len;
+ err = nwl_dsi_clear_error(dsi);
+ if (err)
+ xfer->status = err;
+
+ return true;
+}
+
+static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status)
+{
+ struct nwl_dsi_transfer *xfer = dsi->xfer;
+ bool end_packet = false;
+
+ if (!xfer)
+ return;
+
+ if (xfer->direction == DSI_PACKET_SEND &&
+ status & NWL_DSI_TX_PKT_DONE) {
+ xfer->status = xfer->tx_len;
+ end_packet = true;
+ } else if (status & NWL_DSI_DPHY_DIRECTION &&
+ ((status & (NWL_DSI_RX_PKT_HDR_RCVD |
+ NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) {
+ end_packet = nwl_dsi_read_packet(dsi, status);
+ }
+
+ if (end_packet)
+ complete(&xfer->completed);
+}
+
+static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi)
+{
+ struct nwl_dsi_transfer *xfer = dsi->xfer;
+ struct mipi_dsi_packet *pkt = &xfer->packet;
+ const u8 *payload;
+ size_t length;
+ u16 word_count;
+ u8 hs_mode;
+ u32 val;
+ u32 hs_workaround = 0;
+
+ /* Send the payload, if any */
+ length = pkt->payload_length;
+ payload = pkt->payload;
+
+ while (length >= 4) {
+ val = *(u32 *)payload;
+ hs_workaround |= !(val & 0xFFFF00);
+ nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
+ payload += 4;
+ length -= 4;
+ }
+ /* Send the rest of the payload */
+ val = 0;
+ switch (length) {
+ case 3:
+ val |= payload[2] << 16;
+ fallthrough;
+ case 2:
+ val |= payload[1] << 8;
+ hs_workaround |= !(val & 0xFFFF00);
+ fallthrough;
+ case 1:
+ val |= payload[0];
+ nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
+ break;
+ }
+ xfer->tx_len = pkt->payload_length;
+
+ /*
+ * Send the header
+ * header[0] = Virtual Channel + Data Type
+ * header[1] = Word Count LSB (LP) or first param (SP)
+ * header[2] = Word Count MSB (LP) or second param (SP)
+ */
+ word_count = pkt->header[1] | (pkt->header[2] << 8);
+ if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) {
+ DRM_DEV_DEBUG_DRIVER(dsi->dev,
+ "Using hs mode workaround for cmd 0x%x\n",
+ xfer->cmd);
+ hs_mode = 1;
+ } else {
+ hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1;
+ }
+ val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) |
+ NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) |
+ NWL_DSI_BTA_TX(xfer->need_bta);
+ nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val);
+
+ /* Send packet command */
+ nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1);
+}
+
+static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
+ struct nwl_dsi_transfer xfer;
+ ssize_t ret = 0;
+
+ /* Create packet to be sent */
+ dsi->xfer = &xfer;
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0) {
+ dsi->xfer = NULL;
+ return ret;
+ }
+
+ if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM ||
+ msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM ||
+ msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM ||
+ msg->type & MIPI_DSI_DCS_READ) &&
+ msg->rx_len > 0 && msg->rx_buf)
+ xfer.direction = DSI_PACKET_RECEIVE;
+ else
+ xfer.direction = DSI_PACKET_SEND;
+
+ xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE);
+ xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0;
+ xfer.msg = msg;
+ xfer.status = -ETIMEDOUT;
+ xfer.rx_word_count = 0;
+ xfer.rx_len = 0;
+ xfer.cmd = 0x00;
+ if (msg->tx_len > 0)
+ xfer.cmd = ((u8 *)(msg->tx_buf))[0];
+ init_completion(&xfer.completed);
+
+ ret = clk_prepare_enable(dsi->rx_esc_clk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n",
+ ret);
+ return ret;
+ }
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n",
+ clk_get_rate(dsi->rx_esc_clk));
+
+ /* Initiate the DSI packet transmision */
+ nwl_dsi_begin_transmission(dsi);
+
+ if (!wait_for_completion_timeout(&xfer.completed,
+ NWL_DSI_MIPI_FIFO_TIMEOUT)) {
+ DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n",
+ xfer.cmd);
+ ret = -ETIMEDOUT;
+ } else {
+ ret = xfer.status;
+ }
+
+ clk_disable_unprepare(dsi->rx_esc_clk);
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops nwl_dsi_host_ops = {
+ .attach = nwl_dsi_host_attach,
+ .transfer = nwl_dsi_host_transfer,
+};
+
+static irqreturn_t nwl_dsi_irq_handler(int irq, void *data)
+{
+ u32 irq_status;
+ struct nwl_dsi *dsi = data;
+
+ irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS);
+
+ if (irq_status & NWL_DSI_TX_FIFO_OVFLW)
+ DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n");
+
+ if (irq_status & NWL_DSI_HS_TX_TIMEOUT)
+ DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n");
+
+ if (irq_status & NWL_DSI_TX_PKT_DONE ||
+ irq_status & NWL_DSI_RX_PKT_HDR_RCVD ||
+ irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)
+ nwl_dsi_finish_transmission(dsi, irq_status);
+
+ return IRQ_HANDLED;
+}
+
+static int nwl_dsi_enable(struct nwl_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ union phy_configure_opts *phy_cfg = &dsi->phy_cfg;
+ int ret;
+
+ if (!dsi->lanes) {
+ DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes);
+ return -EINVAL;
+ }
+
+ ret = phy_init(dsi->phy);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret);
+ return ret;
+ }
+
+ ret = phy_configure(dsi->phy, phy_cfg);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret);
+ goto uninit_phy;
+ }
+
+ ret = clk_prepare_enable(dsi->tx_esc_clk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n",
+ ret);
+ goto uninit_phy;
+ }
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n",
+ clk_get_rate(dsi->tx_esc_clk));
+
+ ret = nwl_dsi_config_host(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret);
+ goto disable_clock;
+ }
+
+ ret = nwl_dsi_config_dpi(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret);
+ goto disable_clock;
+ }
+
+ ret = phy_power_on(dsi->phy);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret);
+ goto disable_clock;
+ }
+
+ ret = nwl_dsi_init_interrupts(dsi);
+ if (ret < 0)
+ goto power_off_phy;
+
+ return ret;
+
+power_off_phy:
+ phy_power_off(dsi->phy);
+disable_clock:
+ clk_disable_unprepare(dsi->tx_esc_clk);
+uninit_phy:
+ phy_exit(dsi->phy);
+
+ return ret;
+}
+
+static int nwl_dsi_disable(struct nwl_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n");
+
+ phy_power_off(dsi->phy);
+ phy_exit(dsi->phy);
+
+ /* Disabling the clock before the phy breaks enabling dsi again */
+ clk_disable_unprepare(dsi->tx_esc_clk);
+
+ return 0;
+}
+
+static void nwl_dsi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ nwl_dsi_disable(dsi);
+
+ ret = reset_control_assert(dsi->rst_dpi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to assert DPI: %d\n", ret);
+ return;
+ }
+ ret = reset_control_assert(dsi->rst_byte);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to assert ESC: %d\n", ret);
+ return;
+ }
+ ret = reset_control_assert(dsi->rst_esc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to assert BYTE: %d\n", ret);
+ return;
+ }
+ ret = reset_control_assert(dsi->rst_pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to assert PCLK: %d\n", ret);
+ return;
+ }
+
+ clk_disable_unprepare(dsi->core_clk);
+ clk_disable_unprepare(dsi->lcdif_clk);
+
+ pm_runtime_put(dsi->dev);
+}
+
+static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi,
+ const struct drm_display_mode *mode,
+ union phy_configure_opts *phy_opts)
+{
+ unsigned long rate;
+ int ret;
+
+ if (dsi->lanes < 1 || dsi->lanes > 4)
+ return -EINVAL;
+
+ /*
+ * So far the DPHY spec minimal timings work for both mixel
+ * dphy and nwl dsi host
+ */
+ ret = phy_mipi_dphy_get_default_config(mode->clock * 1000,
+ mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes,
+ &phy_opts->mipi_dphy);
+ if (ret < 0)
+ return ret;
+
+ rate = clk_get_rate(dsi->tx_esc_clk);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "LP clk is @%lu Hz\n", rate);
+ phy_opts->mipi_dphy.lp_clk_rate = rate;
+
+ return 0;
+}
+
+static bool nwl_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* At least LCDIF + NWL needs active high sync */
+ adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+
+ return true;
+}
+
+static enum drm_mode_status
+nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+ if (mode->clock * bpp > 15000000 * dsi->lanes)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock * bpp < 80000 * dsi->lanes)
+ return MODE_CLOCK_LOW;
+
+ return MODE_OK;
+}
+
+static void
+nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ struct device *dev = dsi->dev;
+ union phy_configure_opts new_cfg;
+ unsigned long phy_ref_rate;
+ int ret;
+
+ ret = nwl_dsi_get_dphy_params(dsi, adjusted_mode, &new_cfg);
+ if (ret < 0)
+ return;
+
+ /*
+ * If hs clock is unchanged, we're all good - all parameters are
+ * derived from it atm.
+ */
+ if (new_cfg.mipi_dphy.hs_clk_rate == dsi->phy_cfg.mipi_dphy.hs_clk_rate)
+ return;
+
+ phy_ref_rate = clk_get_rate(dsi->phy_ref_clk);
+ DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate);
+ /* Save the new desired phy config */
+ memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg));
+
+ memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode));
+ drm_mode_debug_printmodeline(adjusted_mode);
+}
+
+static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ pm_runtime_get_sync(dsi->dev);
+
+ if (clk_prepare_enable(dsi->lcdif_clk) < 0)
+ return;
+ if (clk_prepare_enable(dsi->core_clk) < 0)
+ return;
+
+ /* Step 1 from DSI reset-out instructions */
+ ret = reset_control_deassert(dsi->rst_pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to deassert PCLK: %d\n", ret);
+ return;
+ }
+
+ /* Step 2 from DSI reset-out instructions */
+ nwl_dsi_enable(dsi);
+
+ /* Step 3 from DSI reset-out instructions */
+ ret = reset_control_deassert(dsi->rst_esc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to deassert ESC: %d\n", ret);
+ return;
+ }
+ ret = reset_control_deassert(dsi->rst_byte);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to deassert BYTE: %d\n", ret);
+ return;
+ }
+}
+
+static void nwl_dsi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ /* Step 5 from DSI reset-out instructions */
+ ret = reset_control_deassert(dsi->rst_dpi);
+ if (ret < 0)
+ DRM_DEV_ERROR(dsi->dev, "Failed to deassert DPI: %d\n", ret);
+}
+
+static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ struct drm_bridge *panel_bridge;
+ struct drm_panel *panel;
+ int ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
+ ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
+ &panel_bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ panel_bridge = drm_panel_bridge_add(panel);
+ if (IS_ERR(panel_bridge))
+ return PTR_ERR(panel_bridge);
+ }
+ dsi->panel_bridge = panel_bridge;
+
+ if (!dsi->panel_bridge)
+ return -EPROBE_DEFER;
+
+ return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ flags);
+}
+
+static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
+{ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+
+ drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0);
+}
+
+static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
+ .pre_enable = nwl_dsi_bridge_pre_enable,
+ .enable = nwl_dsi_bridge_enable,
+ .disable = nwl_dsi_bridge_disable,
+ .mode_fixup = nwl_dsi_bridge_mode_fixup,
+ .mode_set = nwl_dsi_bridge_mode_set,
+ .mode_valid = nwl_dsi_bridge_mode_valid,
+ .attach = nwl_dsi_bridge_attach,
+ .detach = nwl_dsi_bridge_detach,
+};
+
+static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)
+{
+ struct platform_device *pdev = to_platform_device(dsi->dev);
+ struct clk *clk;
+ void __iomem *base;
+ int ret;
+
+ dsi->phy = devm_phy_get(dsi->dev, "dphy");
+ if (IS_ERR(dsi->phy)) {
+ ret = PTR_ERR(dsi->phy);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dsi->dev, "Could not get PHY: %d\n", ret);
+ return ret;
+ }
+
+ clk = devm_clk_get(dsi->dev, "lcdif");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get lcdif clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->lcdif_clk = clk;
+
+ clk = devm_clk_get(dsi->dev, "core");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get core clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->core_clk = clk;
+
+ clk = devm_clk_get(dsi->dev, "phy_ref");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get phy_ref clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->phy_ref_clk = clk;
+
+ clk = devm_clk_get(dsi->dev, "rx_esc");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get rx_esc clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->rx_esc_clk = clk;
+
+ clk = devm_clk_get(dsi->dev, "tx_esc");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get tx_esc clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->tx_esc_clk = clk;
+
+ dsi->mux = devm_mux_control_get(dsi->dev, NULL);
+ if (IS_ERR(dsi->mux)) {
+ ret = PTR_ERR(dsi->mux);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dsi->dev, "Failed to get mux: %d\n", ret);
+ return ret;
+ }
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ dsi->regmap =
+ devm_regmap_init_mmio(dsi->dev, base, &nwl_dsi_regmap_config);
+ if (IS_ERR(dsi->regmap)) {
+ ret = PTR_ERR(dsi->regmap);
+ DRM_DEV_ERROR(dsi->dev, "Failed to create NWL DSI regmap: %d\n",
+ ret);
+ return ret;
+ }
+
+ dsi->irq = platform_get_irq(pdev, 0);
+ if (dsi->irq < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get device IRQ: %d\n",
+ dsi->irq);
+ return dsi->irq;
+ }
+
+ dsi->rst_pclk = devm_reset_control_get_exclusive(dsi->dev, "pclk");
+ if (IS_ERR(dsi->rst_pclk)) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get pclk reset: %ld\n",
+ PTR_ERR(dsi->rst_pclk));
+ return PTR_ERR(dsi->rst_pclk);
+ }
+ dsi->rst_byte = devm_reset_control_get_exclusive(dsi->dev, "byte");
+ if (IS_ERR(dsi->rst_byte)) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get byte reset: %ld\n",
+ PTR_ERR(dsi->rst_byte));
+ return PTR_ERR(dsi->rst_byte);
+ }
+ dsi->rst_esc = devm_reset_control_get_exclusive(dsi->dev, "esc");
+ if (IS_ERR(dsi->rst_esc)) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get esc reset: %ld\n",
+ PTR_ERR(dsi->rst_esc));
+ return PTR_ERR(dsi->rst_esc);
+ }
+ dsi->rst_dpi = devm_reset_control_get_exclusive(dsi->dev, "dpi");
+ if (IS_ERR(dsi->rst_dpi)) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get dpi reset: %ld\n",
+ PTR_ERR(dsi->rst_dpi));
+ return PTR_ERR(dsi->rst_dpi);
+ }
+ return 0;
+}
+
+static int nwl_dsi_select_input(struct nwl_dsi *dsi)
+{
+ struct device_node *remote;
+ u32 use_dcss = 1;
+ int ret;
+
+ remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
+ NWL_DSI_ENDPOINT_LCDIF);
+ if (remote) {
+ use_dcss = 0;
+ } else {
+ remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
+ NWL_DSI_ENDPOINT_DCSS);
+ if (!remote) {
+ DRM_DEV_ERROR(dsi->dev,
+ "No valid input endpoint found\n");
+ return -EINVAL;
+ }
+ }
+
+ DRM_DEV_INFO(dsi->dev, "Using %s as input source\n",
+ (use_dcss) ? "DCSS" : "LCDIF");
+ ret = mux_control_try_select(dsi->mux, use_dcss);
+ if (ret < 0)
+ DRM_DEV_ERROR(dsi->dev, "Failed to select input: %d\n", ret);
+
+ of_node_put(remote);
+ return ret;
+}
+
+static int nwl_dsi_deselect_input(struct nwl_dsi *dsi)
+{
+ int ret;
+
+ ret = mux_control_deselect(dsi->mux);
+ if (ret < 0)
+ DRM_DEV_ERROR(dsi->dev, "Failed to deselect input: %d\n", ret);
+
+ return ret;
+}
+
+static const struct drm_bridge_timings nwl_dsi_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_DE_LOW,
+};
+
+static const struct of_device_id nwl_dsi_dt_ids[] = {
+ { .compatible = "fsl,imx8mq-nwl-dsi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids);
+
+static const struct soc_device_attribute nwl_dsi_quirks_match[] = {
+ { .soc_id = "i.MX8MQ", .revision = "2.0",
+ .data = (void *)E11418_HS_MODE_QUIRK },
+ { /* sentinel. */ },
+};
+
+static int nwl_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct soc_device_attribute *attr;
+ struct nwl_dsi *dsi;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->dev = dev;
+
+ ret = nwl_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, dsi->irq, nwl_dsi_irq_handler, 0,
+ dev_name(dev), dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to request IRQ %d: %d\n", dsi->irq,
+ ret);
+ return ret;
+ }
+
+ dsi->dsi_host.ops = &nwl_dsi_host_ops;
+ dsi->dsi_host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
+ return ret;
+ }
+
+ attr = soc_device_match(nwl_dsi_quirks_match);
+ if (attr)
+ dsi->quirks = (uintptr_t)attr->data;
+
+ dsi->bridge.driver_private = dsi;
+ dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
+ dsi->bridge.of_node = dev->of_node;
+ dsi->bridge.timings = &nwl_dsi_timings;
+
+ dev_set_drvdata(dev, dsi);
+ pm_runtime_enable(dev);
+
+ ret = nwl_dsi_select_input(dsi);
+ if (ret < 0) {
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ return ret;
+ }
+
+ drm_bridge_add(&dsi->bridge);
+ return 0;
+}
+
+static int nwl_dsi_remove(struct platform_device *pdev)
+{
+ struct nwl_dsi *dsi = platform_get_drvdata(pdev);
+
+ nwl_dsi_deselect_input(dsi);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ drm_bridge_remove(&dsi->bridge);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver nwl_dsi_driver = {
+ .probe = nwl_dsi_probe,
+ .remove = nwl_dsi_remove,
+ .driver = {
+ .of_match_table = nwl_dsi_dt_ids,
+ .name = DRV_NAME,
+ },
+};
+
+module_platform_driver(nwl_dsi_driver);
+
+MODULE_AUTHOR("NXP Semiconductor");
+MODULE_AUTHOR("Purism SPC");
+MODULE_DESCRIPTION("Northwest Logic MIPI-DSI driver");
+MODULE_LICENSE("GPL"); /* GPLv2 or later */
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.h b/drivers/gpu/drm/bridge/nwl-dsi.h
new file mode 100644
index 000000000000..a247a8a11c7c
--- /dev/null
+++ b/drivers/gpu/drm/bridge/nwl-dsi.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * NWL MIPI DSI host driver
+ *
+ * Copyright (C) 2017 NXP
+ * Copyright (C) 2019 Purism SPC
+ */
+#ifndef __NWL_DSI_H__
+#define __NWL_DSI_H__
+
+/* DSI HOST registers */
+#define NWL_DSI_CFG_NUM_LANES 0x0
+#define NWL_DSI_CFG_NONCONTINUOUS_CLK 0x4
+#define NWL_DSI_CFG_T_PRE 0x8
+#define NWL_DSI_CFG_T_POST 0xc
+#define NWL_DSI_CFG_TX_GAP 0x10
+#define NWL_DSI_CFG_AUTOINSERT_EOTP 0x14
+#define NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP 0x18
+#define NWL_DSI_CFG_HTX_TO_COUNT 0x1c
+#define NWL_DSI_CFG_LRX_H_TO_COUNT 0x20
+#define NWL_DSI_CFG_BTA_H_TO_COUNT 0x24
+#define NWL_DSI_CFG_TWAKEUP 0x28
+#define NWL_DSI_CFG_STATUS_OUT 0x2c
+#define NWL_DSI_RX_ERROR_STATUS 0x30
+
+/* DSI DPI registers */
+#define NWL_DSI_PIXEL_PAYLOAD_SIZE 0x200
+#define NWL_DSI_PIXEL_FIFO_SEND_LEVEL 0x204
+#define NWL_DSI_INTERFACE_COLOR_CODING 0x208
+#define NWL_DSI_PIXEL_FORMAT 0x20c
+#define NWL_DSI_VSYNC_POLARITY 0x210
+#define NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW 0
+#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH BIT(1)
+
+#define NWL_DSI_HSYNC_POLARITY 0x214
+#define NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW 0
+#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH BIT(1)
+
+#define NWL_DSI_VIDEO_MODE 0x218
+#define NWL_DSI_HFP 0x21c
+#define NWL_DSI_HBP 0x220
+#define NWL_DSI_HSA 0x224
+#define NWL_DSI_ENABLE_MULT_PKTS 0x228
+#define NWL_DSI_VBP 0x22c
+#define NWL_DSI_VFP 0x230
+#define NWL_DSI_BLLP_MODE 0x234
+#define NWL_DSI_USE_NULL_PKT_BLLP 0x238
+#define NWL_DSI_VACTIVE 0x23c
+#define NWL_DSI_VC 0x240
+
+/* DSI APB PKT control */
+#define NWL_DSI_TX_PAYLOAD 0x280
+#define NWL_DSI_PKT_CONTROL 0x284
+#define NWL_DSI_SEND_PACKET 0x288
+#define NWL_DSI_PKT_STATUS 0x28c
+#define NWL_DSI_PKT_FIFO_WR_LEVEL 0x290
+#define NWL_DSI_PKT_FIFO_RD_LEVEL 0x294
+#define NWL_DSI_RX_PAYLOAD 0x298
+#define NWL_DSI_RX_PKT_HEADER 0x29c
+
+/* DSI IRQ handling */
+#define NWL_DSI_IRQ_STATUS 0x2a0
+#define NWL_DSI_SM_NOT_IDLE BIT(0)
+#define NWL_DSI_TX_PKT_DONE BIT(1)
+#define NWL_DSI_DPHY_DIRECTION BIT(2)
+#define NWL_DSI_TX_FIFO_OVFLW BIT(3)
+#define NWL_DSI_TX_FIFO_UDFLW BIT(4)
+#define NWL_DSI_RX_FIFO_OVFLW BIT(5)
+#define NWL_DSI_RX_FIFO_UDFLW BIT(6)
+#define NWL_DSI_RX_PKT_HDR_RCVD BIT(7)
+#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD BIT(8)
+#define NWL_DSI_BTA_TIMEOUT BIT(29)
+#define NWL_DSI_LP_RX_TIMEOUT BIT(30)
+#define NWL_DSI_HS_TX_TIMEOUT BIT(31)
+
+#define NWL_DSI_IRQ_STATUS2 0x2a4
+#define NWL_DSI_SINGLE_BIT_ECC_ERR BIT(0)
+#define NWL_DSI_MULTI_BIT_ECC_ERR BIT(1)
+#define NWL_DSI_CRC_ERR BIT(2)
+
+#define NWL_DSI_IRQ_MASK 0x2a8
+#define NWL_DSI_SM_NOT_IDLE_MASK BIT(0)
+#define NWL_DSI_TX_PKT_DONE_MASK BIT(1)
+#define NWL_DSI_DPHY_DIRECTION_MASK BIT(2)
+#define NWL_DSI_TX_FIFO_OVFLW_MASK BIT(3)
+#define NWL_DSI_TX_FIFO_UDFLW_MASK BIT(4)
+#define NWL_DSI_RX_FIFO_OVFLW_MASK BIT(5)
+#define NWL_DSI_RX_FIFO_UDFLW_MASK BIT(6)
+#define NWL_DSI_RX_PKT_HDR_RCVD_MASK BIT(7)
+#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD_MASK BIT(8)
+#define NWL_DSI_BTA_TIMEOUT_MASK BIT(29)
+#define NWL_DSI_LP_RX_TIMEOUT_MASK BIT(30)
+#define NWL_DSI_HS_TX_TIMEOUT_MASK BIT(31)
+
+#define NWL_DSI_IRQ_MASK2 0x2ac
+#define NWL_DSI_SINGLE_BIT_ECC_ERR_MASK BIT(0)
+#define NWL_DSI_MULTI_BIT_ECC_ERR_MASK BIT(1)
+#define NWL_DSI_CRC_ERR_MASK BIT(2)
+
+/*
+ * PKT_CONTROL format:
+ * [15: 0] - word count
+ * [17:16] - virtual channel
+ * [23:18] - data type
+ * [24] - LP or HS select (0 - LP, 1 - HS)
+ * [25] - perform BTA after packet is sent
+ * [26] - perform BTA only, no packet tx
+ */
+#define NWL_DSI_WC(x) FIELD_PREP(GENMASK(15, 0), (x))
+#define NWL_DSI_TX_VC(x) FIELD_PREP(GENMASK(17, 16), (x))
+#define NWL_DSI_TX_DT(x) FIELD_PREP(GENMASK(23, 18), (x))
+#define NWL_DSI_HS_SEL(x) FIELD_PREP(GENMASK(24, 24), (x))
+#define NWL_DSI_BTA_TX(x) FIELD_PREP(GENMASK(25, 25), (x))
+#define NWL_DSI_BTA_NO_TX(x) FIELD_PREP(GENMASK(26, 26), (x))
+
+/*
+ * RX_PKT_HEADER format:
+ * [15: 0] - word count
+ * [21:16] - data type
+ * [23:22] - virtual channel
+ */
+#define NWL_DSI_RX_DT(x) FIELD_GET(GENMASK(21, 16), (x))
+#define NWL_DSI_RX_VC(x) FIELD_GET(GENMASK(23, 22), (x))
+
+/* DSI Video mode */
+#define NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES 0
+#define NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS BIT(0)
+#define NWL_DSI_VM_BURST_MODE BIT(1)
+
+/* * DPI color coding */
+#define NWL_DSI_DPI_16_BIT_565_PACKED 0
+#define NWL_DSI_DPI_16_BIT_565_ALIGNED 1
+#define NWL_DSI_DPI_16_BIT_565_SHIFTED 2
+#define NWL_DSI_DPI_18_BIT_PACKED 3
+#define NWL_DSI_DPI_18_BIT_ALIGNED 4
+#define NWL_DSI_DPI_24_BIT 5
+
+/* * DPI Pixel format */
+#define NWL_DSI_PIXEL_FORMAT_16 0
+#define NWL_DSI_PIXEL_FORMAT_18 BIT(0)
+#define NWL_DSI_PIXEL_FORMAT_18L BIT(1)
+#define NWL_DSI_PIXEL_FORMAT_24 (BIT(0) | BIT(1))
+
+#endif /* __NWL_DSI_H__ */
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 8461ee8304ba..1e63ed6b18aa 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -166,7 +166,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
*
* The connector type is set to @panel->connector_type, which must be set to a
* known type. Calling this function with a panel whose connector type is
- * DRM_MODE_CONNECTOR_Unknown will return NULL.
+ * DRM_MODE_CONNECTOR_Unknown will return ERR_PTR(-EINVAL).
*
* See devm_drm_panel_bridge_add() for an automatically managed version of this
* function.
@@ -174,7 +174,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
{
if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
- return NULL;
+ return ERR_PTR(-EINVAL);
return drm_panel_bridge_add_typed(panel, panel->connector_type);
}
@@ -265,7 +265,7 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
struct drm_panel *panel)
{
if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
- return NULL;
+ return ERR_PTR(-EINVAL);
return devm_drm_panel_bridge_add_typed(dev, panel,
panel->connector_type);
@@ -311,6 +311,7 @@ EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
/**
* drm_panel_bridge_connector - return the connector for the panel bridge
+ * @bridge: The drm_bridge.
*
* drm_panel_bridge creates the connector.
* This function gives external access to the connector.
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index d3a53442d449..4b099196afeb 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -268,8 +268,6 @@ static int ps8640_probe(struct i2c_client *client)
if (!panel)
return -ENODEV;
- panel->connector_type = DRM_MODE_CONNECTOR_eDP;
-
ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
if (IS_ERR(ps_bridge->panel_bridge))
return PTR_ERR(ps_bridge->panel_bridge);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index f81f81b7051f..b1258f0ed205 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -836,7 +836,8 @@ static int sii9234_init_resources(struct sii9234 *ctx,
ctx->supplies[3].supply = "cvcc12";
ret = devm_regulator_bulk_get(ctx->dev, 4, ctx->supplies);
if (ret) {
- dev_err(ctx->dev, "regulator_bulk failed\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(ctx->dev, "regulator_bulk failed\n");
return ret;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index dd56996fe9c7..d0db1acf11d7 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -630,7 +630,7 @@ static struct platform_driver snd_dw_hdmi_driver = {
module_platform_driver(snd_dw_hdmi_driver);
-MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
MODULE_DESCRIPTION("Synopsis Designware HDMI AHB ALSA interface");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 383b1073d7de..30681398cfb0 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -92,6 +92,12 @@ static const u16 csc_coeff_rgb_in_eitu709[3][4] = {
{ 0x6756, 0x78ab, 0x2000, 0x0200 }
};
+static const u16 csc_coeff_rgb_full_to_rgb_limited[3][4] = {
+ { 0x1b7c, 0x0000, 0x0000, 0x0020 },
+ { 0x0000, 0x1b7c, 0x0000, 0x0020 },
+ { 0x0000, 0x0000, 0x1b7c, 0x0020 }
+};
+
struct hdmi_vmode {
bool mdataenablepolarity;
@@ -109,6 +115,7 @@ struct hdmi_data_info {
unsigned int pix_repet_factor;
unsigned int hdcp_enable;
struct hdmi_vmode video_mode;
+ bool rgb_limited_range;
};
struct dw_hdmi_i2c {
@@ -956,7 +963,14 @@ static void hdmi_video_sample(struct dw_hdmi *hdmi)
static int is_color_space_conversion(struct dw_hdmi *hdmi)
{
- return hdmi->hdmi_data.enc_in_bus_format != hdmi->hdmi_data.enc_out_bus_format;
+ struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
+ bool is_input_rgb, is_output_rgb;
+
+ is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_in_bus_format);
+ is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_out_bus_format);
+
+ return (is_input_rgb != is_output_rgb) ||
+ (is_input_rgb && is_output_rgb && hdmi_data->rgb_limited_range);
}
static int is_color_space_decimation(struct dw_hdmi *hdmi)
@@ -983,28 +997,37 @@ static int is_color_space_interpolation(struct dw_hdmi *hdmi)
return 0;
}
+static bool is_csc_needed(struct dw_hdmi *hdmi)
+{
+ return is_color_space_conversion(hdmi) ||
+ is_color_space_decimation(hdmi) ||
+ is_color_space_interpolation(hdmi);
+}
+
static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi)
{
const u16 (*csc_coeff)[3][4] = &csc_coeff_default;
+ bool is_input_rgb, is_output_rgb;
unsigned i;
u32 csc_scale = 1;
- if (is_color_space_conversion(hdmi)) {
- if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
- if (hdmi->hdmi_data.enc_out_encoding ==
- V4L2_YCBCR_ENC_601)
- csc_coeff = &csc_coeff_rgb_out_eitu601;
- else
- csc_coeff = &csc_coeff_rgb_out_eitu709;
- } else if (hdmi_bus_fmt_is_rgb(
- hdmi->hdmi_data.enc_in_bus_format)) {
- if (hdmi->hdmi_data.enc_out_encoding ==
- V4L2_YCBCR_ENC_601)
- csc_coeff = &csc_coeff_rgb_in_eitu601;
- else
- csc_coeff = &csc_coeff_rgb_in_eitu709;
- csc_scale = 0;
- }
+ is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_in_bus_format);
+ is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format);
+
+ if (!is_input_rgb && is_output_rgb) {
+ if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601)
+ csc_coeff = &csc_coeff_rgb_out_eitu601;
+ else
+ csc_coeff = &csc_coeff_rgb_out_eitu709;
+ } else if (is_input_rgb && !is_output_rgb) {
+ if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601)
+ csc_coeff = &csc_coeff_rgb_in_eitu601;
+ else
+ csc_coeff = &csc_coeff_rgb_in_eitu709;
+ csc_scale = 0;
+ } else if (is_input_rgb && is_output_rgb &&
+ hdmi->hdmi_data.rgb_limited_range) {
+ csc_coeff = &csc_coeff_rgb_full_to_rgb_limited;
}
/* The CSC registers are sequential, alternating MSB then LSB */
@@ -1614,6 +1637,18 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
drm_hdmi_avi_infoframe_from_display_mode(&frame,
&hdmi->connector, mode);
+ if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
+ drm_hdmi_avi_infoframe_quant_range(&frame, &hdmi->connector,
+ mode,
+ hdmi->hdmi_data.rgb_limited_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame.ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
+
if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
frame.colorspace = HDMI_COLORSPACE_YUV444;
else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format))
@@ -1654,8 +1689,6 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
}
- frame.scan_mode = HDMI_SCAN_MODE_NONE;
-
/*
* The Designware IP uses a different byte format from standard
* AVI info frames, though generally the bits are in the correct
@@ -2010,18 +2043,19 @@ static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi)
hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
/* Enable csc path */
- if (is_color_space_conversion(hdmi)) {
+ if (is_csc_needed(hdmi)) {
hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
- }
- /* Enable color space conversion if needed */
- if (is_color_space_conversion(hdmi))
hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH,
HDMI_MC_FLOWCTRL);
- else
+ } else {
+ hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CSCCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
+
hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS,
HDMI_MC_FLOWCTRL);
+ }
}
/* Workaround to clear the overflow condition */
@@ -2119,6 +2153,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED)
hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ hdmi->hdmi_data.rgb_limited_range = hdmi->sink_is_hdmi &&
+ drm_default_rgb_quant_range(mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+
hdmi->hdmi_data.pix_repet_factor = 0;
hdmi->hdmi_data.hdcp_enable = 0;
hdmi->hdmi_data.video_mode.mdataenablepolarity = true;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 1b39e8d37834..6650fe4cfc20 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -178,6 +178,8 @@ static int tc358768_clear_error(struct tc358768_priv *priv)
static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
{
+ /* work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+ int tmpval = val;
size_t count = 2;
if (priv->error)
@@ -187,7 +189,7 @@ static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
if (reg < 0x100 || reg >= 0x600)
count = 1;
- priv->error = regmap_bulk_write(priv->regmap, reg, &val, count);
+ priv->error = regmap_bulk_write(priv->regmap, reg, &tmpval, count);
}
static void tc358768_read(struct tc358768_priv *priv, u32 reg, u32 *val)
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
deleted file mode 100644
index c6bbd988b0e5..000000000000
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config DRM_CIRRUS_QEMU
- tristate "Cirrus driver for QEMU emulated device"
- depends on DRM && PCI && MMU
- select DRM_KMS_HELPER
- select DRM_GEM_SHMEM_HELPER
- help
- This is a KMS driver for emulated cirrus device in qemu.
- It is *NOT* intended for real cirrus devices. This requires
- the modesetting userspace X.org driver.
-
- Cirrus is obsolete, the hardware was designed in the 90ies
- and can't keep up with todays needs. More background:
- https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
-
- Better alternatives are:
- - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
- - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
- - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
deleted file mode 100644
index 0c1ed3f99725..000000000000
--- a/drivers/gpu/drm/cirrus/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9ccfbf213d72..965173fd0ac2 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1641,10 +1641,10 @@ static const struct drm_info_list drm_atomic_debugfs_list[] = {
{"state", drm_state_info, 0},
};
-int drm_atomic_debugfs_init(struct drm_minor *minor)
+void drm_atomic_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(drm_atomic_debugfs_list,
- ARRAY_SIZE(drm_atomic_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 531b876d0ed8..800ac39f3213 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -135,6 +135,7 @@ static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
}
}
+ fpriv->was_master = (ret == 0);
return ret;
}
@@ -174,17 +175,77 @@ out_err:
return ret;
}
+/*
+ * In the olden days the SET/DROP_MASTER ioctls used to return EACCES when
+ * CAP_SYS_ADMIN was not set. This was used to prevent rogue applications
+ * from becoming master and/or failing to release it.
+ *
+ * At the same time, the first client (for a given VT) is _always_ master.
+ * Thus in order for the ioctls to succeed, one had to _explicitly_ run the
+ * application as root or flip the setuid bit.
+ *
+ * If the CAP_SYS_ADMIN was missing, no other client could become master...
+ * EVER :-( Leading to a) the graphics session dying badly or b) a completely
+ * locked session.
+ *
+ *
+ * As some point systemd-logind was introduced to orchestrate and delegate
+ * master as applicable. It does so by opening the fd and passing it to users
+ * while in itself logind a) does the set/drop master per users' request and
+ * b) * implicitly drops master on VT switch.
+ *
+ * Even though logind looks like the future, there are a few issues:
+ * - some platforms don't have equivalent (Android, CrOS, some BSDs) so
+ * root is required _solely_ for SET/DROP MASTER.
+ * - applications may not be updated to use it,
+ * - any client which fails to drop master* can DoS the application using
+ * logind, to a varying degree.
+ *
+ * * Either due missing CAP_SYS_ADMIN or simply not calling DROP_MASTER.
+ *
+ *
+ * Here we implement the next best thing:
+ * - ensure the logind style of fd passing works unchanged, and
+ * - allow a client to drop/set master, iff it is/was master at a given point
+ * in time.
+ *
+ * Note: DROP_MASTER cannot be free for all, as an arbitrator user could:
+ * - DoS/crash the arbitrator - details would be implementation specific
+ * - open the node, become master implicitly and cause issues
+ *
+ * As a result this fixes the following when using root-less build w/o logind
+ * - startx
+ * - weston
+ * - various compositors based on wlroots
+ */
+static int
+drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
+{
+ if (file_priv->pid == task_pid(current) && file_priv->was_master)
+ return 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return 0;
+}
+
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret = 0;
mutex_lock(&dev->master_mutex);
+
+ ret = drm_master_check_perm(dev, file_priv);
+ if (ret)
+ goto out_unlock;
+
if (drm_is_current_master(file_priv))
goto out_unlock;
if (dev->master) {
- ret = -EINVAL;
+ ret = -EBUSY;
goto out_unlock;
}
@@ -224,6 +285,12 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
int ret = -EINVAL;
mutex_lock(&dev->master_mutex);
+
+ ret = drm_master_check_perm(dev, file_priv);
+ if (ret)
+ goto out_unlock;
+
+ ret = -EINVAL;
if (!drm_is_current_master(file_priv))
goto out_unlock;
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 121481f6aa71..f1dcad96f341 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -135,7 +135,9 @@
* are underneath planes with higher Z position values. Two planes with the
* same Z position value have undefined ordering. Note that the Z position
* value can also be immutable, to inform userspace about the hard-coded
- * stacking of planes, see drm_plane_create_zpos_immutable_property().
+ * stacking of planes, see drm_plane_create_zpos_immutable_property(). If
+ * any plane has a zpos property (either mutable or immutable), then all
+ * planes shall have a zpos property.
*
* pixel blend mode:
* Pixel blend mode is set up with drm_plane_create_blend_mode_property().
@@ -183,6 +185,12 @@
* plane does not expose the "alpha" property, then this is
* assumed to be 1.0
*
+ * IN_FORMATS:
+ * Blob property which contains the set of buffer format and modifier
+ * pairs supported by this plane. The blob is a drm_format_modifier_blob
+ * struct. Without this property the plane doesn't support buffers with
+ * modifiers. Userspace cannot change this property.
+ *
* Note that all the property extensions described here apply either to the
* plane or the CRTC (e.g. for the background color, which currently is not
* exposed and assumed to be black).
@@ -338,10 +346,10 @@ EXPORT_SYMBOL(drm_rotation_simplify);
* should be set to 0 and max to maximal number of planes for given crtc - 1.
*
* If zpos of some planes cannot be changed (like fixed background or
- * cursor/topmost planes), driver should adjust min/max values and assign those
- * planes immutable zpos property with lower or higher values (for more
+ * cursor/topmost planes), drivers shall adjust the min/max values and assign
+ * those planes immutable zpos properties with lower or higher values (for more
* information, see drm_plane_create_zpos_immutable_property() function). In such
- * case driver should also assign proper initial zpos values for all planes in
+ * case drivers shall also assign proper initial zpos values for all planes in
* its plane_reset() callback, so the planes will be always sorted properly.
*
* See also drm_atomic_normalize_zpos().
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index dcabf5698333..ef26ac57f039 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -33,6 +33,7 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/nospec.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
@@ -43,7 +44,6 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 6b0c6ef8b9b3..8cb93f5209a4 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -457,10 +457,10 @@ static const struct drm_info_list drm_client_debugfs_list[] = {
{ "internal_clients", drm_client_debugfs_internal_clients, 0 },
};
-int drm_client_debugfs_init(struct drm_minor *minor)
+void drm_client_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(drm_client_debugfs_list,
- ARRAY_SIZE(drm_client_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(drm_client_debugfs_list,
+ ARRAY_SIZE(drm_client_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 644f0ad10671..d877ddc6dc57 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -27,6 +27,7 @@
#include <drm/drm_print.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_sysfs.h>
#include <linux/uaccess.h>
@@ -523,6 +524,10 @@ int drm_connector_register(struct drm_connector *connector)
drm_mode_object_register(connector->dev, &connector->base);
connector->registration_state = DRM_CONNECTOR_REGISTERED;
+
+ /* Let userspace know we have a new connector */
+ drm_sysfs_hotplug_event(connector->dev);
+
goto unlock;
err_debugfs:
@@ -1970,6 +1975,8 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
else
drm_reset_display_info(connector);
+ drm_update_tile_info(connector, edid);
+
drm_object_property_set_value(&connector->base,
dev->mode_config.non_desktop_property,
connector->display_info.non_desktop);
@@ -2392,7 +2399,7 @@ EXPORT_SYMBOL(drm_mode_put_tile_group);
* tile group or NULL if not found.
*/
struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
- char topology[8])
+ const char topology[8])
{
struct drm_tile_group *tg;
int id;
@@ -2422,7 +2429,7 @@ EXPORT_SYMBOL(drm_mode_get_tile_group);
* new tile group or NULL.
*/
struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
- char topology[8])
+ const char topology[8])
{
struct drm_tile_group *tg;
int ret;
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 16f2413403aa..da96b2f64d7e 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -82,6 +82,7 @@ int drm_mode_setcrtc(struct drm_device *dev,
/* drm_mode_config.c */
int drm_modeset_register_all(struct drm_device *dev);
void drm_modeset_unregister_all(struct drm_device *dev);
+void drm_mode_config_validate(struct drm_device *dev);
/* drm_modes.c */
const char *drm_get_mode_status_name(enum drm_mode_status status);
@@ -224,7 +225,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
/* drm_atomic.c */
#ifdef CONFIG_DEBUG_FS
struct drm_minor;
-int drm_atomic_debugfs_init(struct drm_minor *minor);
+void drm_atomic_debugfs_init(struct drm_minor *minor);
#endif
int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
@@ -278,3 +279,4 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
void drm_reset_display_info(struct drm_connector *connector);
u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
+void drm_update_tile_info(struct drm_connector *connector, const struct edid *edid);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 4e673d318503..2bea22130703 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -172,8 +172,8 @@ static const struct file_operations drm_debugfs_fops = {
* &struct drm_info_list in the given root directory. These files will be removed
* automatically on drm_debugfs_cleanup().
*/
-int drm_debugfs_create_files(const struct drm_info_list *files, int count,
- struct dentry *root, struct drm_minor *minor)
+void drm_debugfs_create_files(const struct drm_info_list *files, int count,
+ struct dentry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct drm_info_node *tmp;
@@ -199,7 +199,6 @@ int drm_debugfs_create_files(const struct drm_info_list *files, int count,
list_add(&tmp->list, &minor->debugfs_list);
mutex_unlock(&minor->debugfs_lock);
}
- return 0;
}
EXPORT_SYMBOL(drm_debugfs_create_files);
@@ -208,52 +207,28 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
{
struct drm_device *dev = minor->dev;
char name[64];
- int ret;
INIT_LIST_HEAD(&minor->debugfs_list);
mutex_init(&minor->debugfs_lock);
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, root);
- ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
- if (ret) {
- debugfs_remove(minor->debugfs_root);
- minor->debugfs_root = NULL;
- DRM_ERROR("Failed to create core drm debugfs files\n");
- return ret;
- }
+ drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
if (drm_drv_uses_atomic_modeset(dev)) {
- ret = drm_atomic_debugfs_init(minor);
- if (ret) {
- DRM_ERROR("Failed to create atomic debugfs files\n");
- return ret;
- }
+ drm_atomic_debugfs_init(minor);
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_framebuffer_debugfs_init(minor);
- if (ret) {
- DRM_ERROR("Failed to create framebuffer debugfs file\n");
- return ret;
- }
+ drm_framebuffer_debugfs_init(minor);
- ret = drm_client_debugfs_init(minor);
- if (ret) {
- DRM_ERROR("Failed to create client debugfs file\n");
- return ret;
- }
+ drm_client_debugfs_init(minor);
}
- if (dev->driver->debugfs_init) {
- ret = dev->driver->debugfs_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/sys/kernel/debug/dri.\n");
- return ret;
- }
- }
+ if (dev->driver->debugfs_init)
+ dev->driver->debugfs_init(minor);
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index a7add55a85b4..d07ba54ec945 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -34,9 +34,9 @@
*/
#include <linux/export.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
-#include <drm/drm_pci.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index c6fbe6e6bc9d..19c99dddcb99 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1238,6 +1238,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
/* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
+ /* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
+ { OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
};
#undef OUI
@@ -1313,6 +1315,7 @@ static const struct edid_quirk edid_quirk_list[] = {
{ MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
{ MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
{ MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4c, 0x83), PROD_ID(0x47, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
};
#undef MFG
@@ -1533,3 +1536,271 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S
return num_bpc;
}
EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
+
+/**
+ * drm_dp_get_phy_test_pattern() - get the requested pattern from the sink.
+ * @aux: DisplayPort AUX channel
+ * @data: DP phy compliance test parameters.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
+ struct drm_dp_phy_test_params *data)
+{
+ int err;
+ u8 rate, lanes;
+
+ err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate);
+ if (err < 0)
+ return err;
+ data->link_rate = drm_dp_bw_code_to_link_rate(rate);
+
+ err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes);
+ if (err < 0)
+ return err;
+ data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK;
+
+ if (lanes & DP_ENHANCED_FRAME_CAP)
+ data->enhanced_frame_cap = true;
+
+ err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
+ if (err < 0)
+ return err;
+
+ switch (data->phy_pattern) {
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ &data->custom80, sizeof(data->custom80));
+ if (err < 0)
+ return err;
+
+ break;
+ case DP_PHY_TEST_PATTERN_CP2520:
+ err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
+ &data->hbr2_reset,
+ sizeof(data->hbr2_reset));
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_get_phy_test_pattern);
+
+/**
+ * drm_dp_set_phy_test_pattern() - set the pattern to the sink.
+ * @aux: DisplayPort AUX channel
+ * @data: DP phy compliance test parameters.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
+ struct drm_dp_phy_test_params *data, u8 dp_rev)
+{
+ int err, i;
+ u8 link_config[2];
+ u8 test_pattern;
+
+ link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate);
+ link_config[1] = data->num_lanes;
+ if (data->enhanced_frame_cap)
+ link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2);
+ if (err < 0)
+ return err;
+
+ test_pattern = data->phy_pattern;
+ if (dp_rev < 0x12) {
+ test_pattern = (test_pattern << 2) &
+ DP_LINK_QUAL_PATTERN_11_MASK;
+ err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET,
+ test_pattern);
+ if (err < 0)
+ return err;
+ } else {
+ for (i = 0; i < data->num_lanes; i++) {
+ err = drm_dp_dpcd_writeb(aux,
+ DP_LINK_QUAL_LANE0_SET + i,
+ test_pattern);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_set_phy_test_pattern);
+
+static const char *dp_pixelformat_get_name(enum dp_pixelformat pixelformat)
+{
+ if (pixelformat < 0 || pixelformat > DP_PIXELFORMAT_RESERVED)
+ return "Invalid";
+
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "RGB";
+ case DP_PIXELFORMAT_YUV444:
+ return "YUV444";
+ case DP_PIXELFORMAT_YUV422:
+ return "YUV422";
+ case DP_PIXELFORMAT_YUV420:
+ return "YUV420";
+ case DP_PIXELFORMAT_Y_ONLY:
+ return "Y_ONLY";
+ case DP_PIXELFORMAT_RAW:
+ return "RAW";
+ default:
+ return "Reserved";
+ }
+}
+
+static const char *dp_colorimetry_get_name(enum dp_pixelformat pixelformat,
+ enum dp_colorimetry colorimetry)
+{
+ if (pixelformat < 0 || pixelformat > DP_PIXELFORMAT_RESERVED)
+ return "Invalid";
+
+ switch (colorimetry) {
+ case DP_COLORIMETRY_DEFAULT:
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "sRGB";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "BT.601";
+ case DP_PIXELFORMAT_Y_ONLY:
+ return "DICOM PS3.14";
+ case DP_PIXELFORMAT_RAW:
+ return "Custom Color Profile";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_RGB_WIDE_FIXED: /* and DP_COLORIMETRY_BT709_YCC */
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "Wide Fixed";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "BT.709";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_RGB_WIDE_FLOAT: /* and DP_COLORIMETRY_XVYCC_601 */
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "Wide Float";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "xvYCC 601";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_OPRGB: /* and DP_COLORIMETRY_XVYCC_709 */
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "OpRGB";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "xvYCC 709";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_DCI_P3_RGB: /* and DP_COLORIMETRY_SYCC_601 */
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "DCI-P3";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "sYCC 601";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_RGB_CUSTOM: /* and DP_COLORIMETRY_OPYCC_601 */
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "Custom Profile";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "OpYCC 601";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_BT2020_RGB: /* and DP_COLORIMETRY_BT2020_CYCC */
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_RGB:
+ return "BT.2020 RGB";
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "BT.2020 CYCC";
+ default:
+ return "Reserved";
+ }
+ case DP_COLORIMETRY_BT2020_YCC:
+ switch (pixelformat) {
+ case DP_PIXELFORMAT_YUV444:
+ case DP_PIXELFORMAT_YUV422:
+ case DP_PIXELFORMAT_YUV420:
+ return "BT.2020 YCC";
+ default:
+ return "Reserved";
+ }
+ default:
+ return "Invalid";
+ }
+}
+
+static const char *dp_dynamic_range_get_name(enum dp_dynamic_range dynamic_range)
+{
+ switch (dynamic_range) {
+ case DP_DYNAMIC_RANGE_VESA:
+ return "VESA range";
+ case DP_DYNAMIC_RANGE_CTA:
+ return "CTA range";
+ default:
+ return "Invalid";
+ }
+}
+
+static const char *dp_content_type_get_name(enum dp_content_type content_type)
+{
+ switch (content_type) {
+ case DP_CONTENT_TYPE_NOT_DEFINED:
+ return "Not defined";
+ case DP_CONTENT_TYPE_GRAPHICS:
+ return "Graphics";
+ case DP_CONTENT_TYPE_PHOTO:
+ return "Photo";
+ case DP_CONTENT_TYPE_VIDEO:
+ return "Video";
+ case DP_CONTENT_TYPE_GAME:
+ return "Game";
+ default:
+ return "Reserved";
+ }
+}
+
+void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
+ const struct drm_dp_vsc_sdp *vsc)
+{
+#define DP_SDP_LOG(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
+ DP_SDP_LOG("DP SDP: %s, revision %u, length %u\n", "VSC",
+ vsc->revision, vsc->length);
+ DP_SDP_LOG(" pixelformat: %s\n",
+ dp_pixelformat_get_name(vsc->pixelformat));
+ DP_SDP_LOG(" colorimetry: %s\n",
+ dp_colorimetry_get_name(vsc->pixelformat, vsc->colorimetry));
+ DP_SDP_LOG(" bpc: %u\n", vsc->bpc);
+ DP_SDP_LOG(" dynamic range: %s\n",
+ dp_dynamic_range_get_name(vsc->dynamic_range));
+ DP_SDP_LOG(" content type: %s\n",
+ dp_content_type_get_name(vsc->content_type));
+#undef DP_SDP_LOG
+}
+EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 9d89ebf3a749..1e26b89628f9 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
+#include <linux/iopoll.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stacktrace.h>
@@ -687,51 +688,45 @@ static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *
raw->cur_len = idx;
}
-/* this adds a chunk of msg to the builder to get the final msg */
-static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
- u8 *replybuf, u8 replybuflen, bool hdr)
+static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
+ struct drm_dp_sideband_msg_hdr *hdr,
+ u8 hdrlen)
{
- int ret;
- u8 crc4;
+ /*
+ * ignore out-of-order messages or messages that are part of a
+ * failed transaction
+ */
+ if (!hdr->somt && !msg->have_somt)
+ return false;
- if (hdr) {
- u8 hdrlen;
- struct drm_dp_sideband_msg_hdr recv_hdr;
- ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
- if (ret == false) {
- print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
- return false;
- }
+ /* get length contained in this portion */
+ msg->curchunk_idx = 0;
+ msg->curchunk_len = hdr->msg_len;
+ msg->curchunk_hdrlen = hdrlen;
- /*
- * ignore out-of-order messages or messages that are part of a
- * failed transaction
- */
- if (!recv_hdr.somt && !msg->have_somt)
- return false;
+ /* we have already gotten an somt - don't bother parsing */
+ if (hdr->somt && msg->have_somt)
+ return false;
- /* get length contained in this portion */
- msg->curchunk_len = recv_hdr.msg_len;
- msg->curchunk_hdrlen = hdrlen;
+ if (hdr->somt) {
+ memcpy(&msg->initial_hdr, hdr,
+ sizeof(struct drm_dp_sideband_msg_hdr));
+ msg->have_somt = true;
+ }
+ if (hdr->eomt)
+ msg->have_eomt = true;
- /* we have already gotten an somt - don't bother parsing */
- if (recv_hdr.somt && msg->have_somt)
- return false;
+ return true;
+}
- if (recv_hdr.somt) {
- memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
- msg->have_somt = true;
- }
- if (recv_hdr.eomt)
- msg->have_eomt = true;
+/* this adds a chunk of msg to the builder to get the final msg */
+static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
+ u8 *replybuf, u8 replybuflen)
+{
+ u8 crc4;
- /* copy the bytes for the remainder of this header chunk */
- msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
- memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
- } else {
- memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
- msg->curchunk_idx += replybuflen;
- }
+ memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
+ msg->curchunk_idx += replybuflen;
if (msg->curchunk_idx >= msg->curchunk_len) {
/* do CRC */
@@ -1060,13 +1055,12 @@ static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
drm_dp_encode_sideband_req(&req, msg);
}
-static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
+static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
drm_dp_encode_sideband_req(&req, msg);
- return 0;
}
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
@@ -1203,16 +1197,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
/* remove from q */
if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
- txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
+ txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND)
list_del(&txmsg->next);
- }
-
- if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
- txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
- mstb->tx_slots[txmsg->seqno] = NULL;
- }
- mgr->is_waiting_for_dwn_reply = false;
-
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -2691,22 +2677,6 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
struct drm_dp_mst_branch *mstb = txmsg->dst;
u8 req_type;
- /* both msg slots are full */
- if (txmsg->seqno == -1) {
- if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
- DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
- return -EAGAIN;
- }
- if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
- txmsg->seqno = mstb->last_seqno;
- mstb->last_seqno ^= 1;
- } else if (mstb->tx_slots[0] == NULL)
- txmsg->seqno = 0;
- else
- txmsg->seqno = 1;
- mstb->tx_slots[txmsg->seqno] = txmsg;
- }
-
req_type = txmsg->msg[0] & 0x7f;
if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
req_type == DP_RESOURCE_STATUS_NOTIFY)
@@ -2718,7 +2688,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
hdr->lcr = mstb->lct - 1;
if (mstb->lct > 1)
memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
- hdr->seqno = txmsg->seqno;
+
return 0;
}
/*
@@ -2733,15 +2703,15 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
int len, space, idx, tosend;
int ret;
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
+ return 0;
+
memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
- if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
- txmsg->seqno = -1;
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
- }
- /* make hdr from dst mst - for replies use seqno
- otherwise assign one */
+ /* make hdr from dst mst */
ret = set_hdr_from_dst_qlock(&hdr, txmsg);
if (ret < 0)
return ret;
@@ -2794,42 +2764,17 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
if (list_empty(&mgr->tx_msg_downq))
return;
- txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
+ txmsg = list_first_entry(&mgr->tx_msg_downq,
+ struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, false);
- if (ret == 1) {
- /* txmsg is sent it should be in the slots now */
- mgr->is_waiting_for_dwn_reply = true;
- list_del(&txmsg->next);
- } else if (ret) {
+ if (ret < 0) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
- mgr->is_waiting_for_dwn_reply = false;
list_del(&txmsg->next);
- if (txmsg->seqno != -1)
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
wake_up_all(&mgr->tx_waitq);
}
}
-/* called holding qlock */
-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_sideband_msg_tx *txmsg)
-{
- int ret;
-
- /* construct a chunk from the first msg in the tx_msg queue */
- ret = process_single_tx_qlock(mgr, txmsg, true);
-
- if (ret != 1)
- DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
-
- if (txmsg->seqno != -1) {
- WARN_ON((unsigned int)txmsg->seqno >
- ARRAY_SIZE(txmsg->dst->tx_slots));
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
- }
-}
-
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{
@@ -2842,8 +2787,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
- if (list_is_singular(&mgr->tx_msg_downq) &&
- !mgr->is_waiting_for_dwn_reply)
+ if (list_is_singular(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -3467,7 +3411,7 @@ static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req
static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
- int req_type, int seqno, bool broadcast)
+ int req_type, bool broadcast)
{
struct drm_dp_sideband_msg_tx *txmsg;
@@ -3476,13 +3420,11 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return -ENOMEM;
txmsg->dst = mstb;
- txmsg->seqno = seqno;
drm_dp_encode_up_ack_reply(txmsg, req_type);
mutex_lock(&mgr->qlock);
-
- process_single_up_tx_qlock(mgr, txmsg);
-
+ /* construct a chunk from the first msg in the tx_msg queue */
+ process_single_tx_qlock(mgr, txmsg, true);
mutex_unlock(&mgr->qlock);
kfree(txmsg);
@@ -3707,31 +3649,63 @@ out_fail:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
-static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool
+drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
+ struct drm_dp_mst_branch **mstb)
{
int len;
u8 replyblock[32];
int replylen, curreply;
int ret;
- struct drm_dp_sideband_msg_rx *msg;
- int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
- msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+ u8 hdrlen;
+ struct drm_dp_sideband_msg_hdr hdr;
+ struct drm_dp_sideband_msg_rx *msg =
+ up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+ int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
+ DP_SIDEBAND_MSG_DOWN_REP_BASE;
+
+ if (!up)
+ *mstb = NULL;
len = min(mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg,
- replyblock, len);
+ ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
return false;
}
- ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
+
+ ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
+ if (ret == false) {
+ print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
+ 1, replyblock, len, false);
+ DRM_DEBUG_KMS("ERROR: failed header\n");
+ return false;
+ }
+
+ if (!up) {
+ /* Caller is responsible for giving back this reference */
+ *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
+ if (!*mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+ hdr.lct);
+ return false;
+ }
+ }
+
+ if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
+ DRM_DEBUG_KMS("sideband msg set header failed %d\n",
+ replyblock[0]);
+ return false;
+ }
+
+ replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
+ ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
return false;
}
- replylen = msg->curchunk_len + msg->curchunk_hdrlen;
- replylen -= len;
+ replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
curreply = len;
while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
@@ -3743,7 +3717,7 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
return false;
}
- ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
+ ret = drm_dp_sideband_append_payload(msg, replyblock, len);
if (!ret) {
DRM_DEBUG_KMS("failed to build sideband msg\n");
return false;
@@ -3758,67 +3732,60 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
- struct drm_dp_mst_branch *mstb;
- struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
- int slot = -1;
-
- if (!drm_dp_get_one_sb_msg(mgr, false))
- goto clear_down_rep_recv;
+ struct drm_dp_mst_branch *mstb = NULL;
+ struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
- if (!mgr->down_rep_recv.have_eomt)
- return 0;
+ if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
+ goto out;
- mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
- hdr->lct);
- goto clear_down_rep_recv;
- }
+ /* Multi-packet message transmission, don't clear the reply */
+ if (!msg->have_eomt)
+ goto out;
/* find the message */
- slot = hdr->seqno;
mutex_lock(&mgr->qlock);
- txmsg = mstb->tx_slots[slot];
- /* remove from slots */
+ txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
+ struct drm_dp_sideband_msg_tx, next);
mutex_unlock(&mgr->qlock);
- if (!txmsg) {
+ /* Were we actually expecting a response, and from this mstb? */
+ if (!txmsg || txmsg->dst != mstb) {
+ struct drm_dp_sideband_msg_hdr *hdr;
+ hdr = &msg->initial_hdr;
DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
mstb, hdr->seqno, hdr->lct, hdr->rad[0],
- mgr->down_rep_recv.msg[0]);
- goto no_msg;
+ msg->msg[0]);
+ goto out_clear_reply;
}
- drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+ drm_dp_sideband_parse_reply(msg, &txmsg->reply);
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
txmsg->reply.req_type,
drm_dp_mst_req_type_str(txmsg->reply.req_type),
txmsg->reply.u.nak.reason,
drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
txmsg->reply.u.nak.nak_data);
+ }
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
drm_dp_mst_topology_put_mstb(mstb);
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
- mstb->tx_slots[slot] = NULL;
- mgr->is_waiting_for_dwn_reply = false;
+ list_del(&txmsg->next);
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
return 0;
-no_msg:
- drm_dp_mst_topology_put_mstb(mstb);
-clear_down_rep_recv:
- mutex_lock(&mgr->qlock);
- mgr->is_waiting_for_dwn_reply = false;
- mutex_unlock(&mgr->qlock);
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+out_clear_reply:
+ memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
+out:
+ if (mstb)
+ drm_dp_mst_topology_put_mstb(mstb);
return 0;
}
@@ -3894,11 +3861,9 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
- struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
struct drm_dp_pending_up_req *up_req;
- bool seqno;
- if (!drm_dp_get_one_sb_msg(mgr, true))
+ if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
goto out;
if (!mgr->up_req_recv.have_eomt)
@@ -3911,7 +3876,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
}
INIT_LIST_HEAD(&up_req->next);
- seqno = hdr->seqno;
drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
@@ -3923,7 +3887,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
}
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
- seqno, false);
+ false);
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
@@ -3945,7 +3909,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
res_stat->available_pbn);
}
- up_req->hdr = *hdr;
+ up_req->hdr = mgr->up_req_recv.initial_hdr;
mutex_lock(&mgr->up_req_lock);
list_add_tail(&up_req->next, &mgr->up_req_list);
mutex_unlock(&mgr->up_req_lock);
@@ -4051,27 +4015,6 @@ out:
EXPORT_SYMBOL(drm_dp_mst_detect_port);
/**
- * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
- * @mgr: manager for this port
- * @port: unverified pointer to a port.
- *
- * This returns whether the port supports audio or not.
- */
-bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port)
-{
- bool ret = false;
-
- port = drm_dp_mst_topology_get_port_validated(mgr, port);
- if (!port)
- return ret;
- ret = port->has_audio;
- drm_dp_mst_topology_put_port(port);
- return ret;
-}
-EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
-
-/**
* drm_dp_mst_get_edid() - get EDID for an MST port
* @connector: toplevel connector to get EDID for
* @mgr: manager for this port
@@ -4448,42 +4391,58 @@ fail:
return ret;
}
+static int do_get_act_status(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 status;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ return status;
+}
/**
- * drm_dp_check_act_status() - Check ACT handled status.
+ * drm_dp_check_act_status() - Polls for ACT handled status.
* @mgr: manager to use
*
- * Check the payload status bits in the DPCD for ACT handled completion.
+ * Tries waiting for the MST hub to finish updating it's payload table by
+ * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
+ * take that long).
+ *
+ * Returns:
+ * 0 if the ACT was handled in time, negative error code on failure.
*/
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
{
- u8 status;
- int ret;
- int count = 0;
-
- do {
- ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
-
- if (ret < 0) {
- DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
- goto fail;
- }
-
- if (status & DP_PAYLOAD_ACT_HANDLED)
- break;
- count++;
- udelay(100);
-
- } while (count < 30);
-
- if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
- DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
- ret = -EINVAL;
- goto fail;
+ /*
+ * There doesn't seem to be any recommended retry count or timeout in
+ * the MST specification. Since some hubs have been observed to take
+ * over 1 second to update their payload allocations under certain
+ * conditions, we use a rather large timeout value.
+ */
+ const int timeout_ms = 3000;
+ int ret, status;
+
+ ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
+ status & DP_PAYLOAD_ACT_HANDLED || status < 0,
+ 200, timeout_ms * USEC_PER_MSEC);
+ if (ret < 0 && status >= 0) {
+ DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
+ timeout_ms, status);
+ return -EINVAL;
+ } else if (status < 0) {
+ /*
+ * Failure here isn't unexpected - the hub may have
+ * just been unplugged
+ */
+ DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
+ status);
+ return status;
}
+
return 0;
-fail:
- return ret;
}
EXPORT_SYMBOL(drm_dp_check_act_status);
@@ -4674,28 +4633,18 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
+ if (!list_empty(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
-static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port)
+static inline void
+drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
- if (!port->connector)
- return;
-
- if (port->mgr->cbs->destroy_connector) {
- port->mgr->cbs->destroy_connector(port->mgr, port->connector);
- } else {
+ if (port->connector) {
drm_connector_unregister(port->connector);
drm_connector_put(port->connector);
}
-}
-
-static inline void
-drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
-{
- drm_dp_destroy_connector(port);
drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
drm_dp_mst_put_port_malloc(port);
@@ -4705,26 +4654,25 @@ static inline void
drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
- struct drm_dp_mst_port *port, *tmp;
+ struct drm_dp_mst_port *port, *port_tmp;
+ struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
bool wake_tx = false;
mutex_lock(&mgr->lock);
- list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
list_del(&port->next);
drm_dp_mst_topology_put_port(port);
}
mutex_unlock(&mgr->lock);
- /* drop any tx slots msg */
+ /* drop any tx slot msg */
mutex_lock(&mstb->mgr->qlock);
- if (mstb->tx_slots[0]) {
- mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[0] = NULL;
- wake_tx = true;
- }
- if (mstb->tx_slots[1]) {
- mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[1] = NULL;
+ list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
+ if (txmsg->dst != mstb)
+ continue;
+
+ txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ list_del(&txmsg->next);
wake_tx = true;
}
mutex_unlock(&mstb->mgr->qlock);
@@ -5499,7 +5447,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
{
struct drm_dp_mst_port *immediate_upstream_port;
struct drm_dp_mst_port *fec_port;
- struct drm_dp_desc desc = { 0 };
+ struct drm_dp_desc desc = { };
u8 endpoint_fec;
u8 endpoint_dsc;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7b1a628d1f6e..bc38322f306e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -39,6 +39,7 @@
#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_print.h>
@@ -92,13 +93,27 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
}
}
+static void drm_minor_alloc_release(struct drm_device *dev, void *data)
+{
+ struct drm_minor *minor = data;
+ unsigned long flags;
+
+ WARN_ON(dev != minor->dev);
+
+ put_device(minor->kdev);
+
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_remove(&drm_minors_idr, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+}
+
static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
{
struct drm_minor *minor;
unsigned long flags;
int r;
- minor = kzalloc(sizeof(*minor), GFP_KERNEL);
+ minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
if (!minor)
return -ENOMEM;
@@ -116,46 +131,20 @@ static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
idr_preload_end();
if (r < 0)
- goto err_free;
+ return r;
minor->index = r;
+ r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
+ if (r)
+ return r;
+
minor->kdev = drm_sysfs_minor_alloc(minor);
- if (IS_ERR(minor->kdev)) {
- r = PTR_ERR(minor->kdev);
- goto err_index;
- }
+ if (IS_ERR(minor->kdev))
+ return PTR_ERR(minor->kdev);
*drm_minor_get_slot(dev, type) = minor;
return 0;
-
-err_index:
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
-err_free:
- kfree(minor);
- return r;
-}
-
-static void drm_minor_free(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor **slot, *minor;
- unsigned long flags;
-
- slot = drm_minor_get_slot(dev, type);
- minor = *slot;
- if (!minor)
- return;
-
- put_device(minor->kdev);
-
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
-
- kfree(minor);
- *slot = NULL;
}
static int drm_minor_register(struct drm_device *dev, unsigned int type)
@@ -270,17 +259,22 @@ void drm_minor_release(struct drm_minor *minor)
* any other resources allocated at device initialization and drop the driver's
* reference to &drm_device using drm_dev_put().
*
- * Note that the lifetime rules for &drm_device instance has still a lot of
- * historical baggage. Hence use the reference counting provided by
- * drm_dev_get() and drm_dev_put() only carefully.
+ * Note that any allocation or resource which is visible to userspace must be
+ * released only when the final drm_dev_put() is called, and not when the
+ * driver is unbound from the underlying physical struct &device. Best to use
+ * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
+ * related functions.
+ *
+ * devres managed resources like devm_kmalloc() can only be used for resources
+ * directly related to the underlying hardware device, and only used in code
+ * paths fully protected by drm_dev_enter() and drm_dev_exit().
*
* Display driver example
* ~~~~~~~~~~~~~~~~~~~~~~
*
* The following example shows a typical structure of a DRM display driver.
* The example focus on the probe() function and the other functions that is
- * almost always present and serves as a demonstration of devm_drm_dev_init()
- * usage with its accompanying drm_driver->release callback.
+ * almost always present and serves as a demonstration of devm_drm_dev_init().
*
* .. code-block:: c
*
@@ -290,19 +284,8 @@ void drm_minor_release(struct drm_minor *minor)
* struct clk *pclk;
* };
*
- * static void driver_drm_release(struct drm_device *drm)
- * {
- * struct driver_device *priv = container_of(...);
- *
- * drm_mode_config_cleanup(drm);
- * drm_dev_fini(drm);
- * kfree(priv->userspace_facing);
- * kfree(priv);
- * }
- *
* static struct drm_driver driver_drm_driver = {
* [...]
- * .release = driver_drm_release,
* };
*
* static int driver_probe(struct platform_device *pdev)
@@ -322,13 +305,16 @@ void drm_minor_release(struct drm_minor *minor)
*
* ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
* if (ret) {
- * kfree(drm);
+ * kfree(priv);
* return ret;
* }
+ * drmm_add_final_kfree(drm, priv);
*
- * drm_mode_config_init(drm);
+ * ret = drmm_mode_config_init(drm);
+ * if (ret)
+ * return ret;
*
- * priv->userspace_facing = kzalloc(..., GFP_KERNEL);
+ * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
* if (!priv->userspace_facing)
* return -ENOMEM;
*
@@ -580,6 +566,23 @@ static void drm_fs_inode_free(struct inode *inode)
* used.
*/
+static void drm_dev_init_release(struct drm_device *dev, void *res)
+{
+ drm_legacy_ctxbitmap_cleanup(dev);
+ drm_legacy_remove_map_hash(dev);
+ drm_fs_inode_free(dev->anon_inode);
+
+ put_device(dev->dev);
+ /* Prevent use-after-free in drm_managed_release when debugging is
+ * enabled. Slightly awkward, but can't really be helped. */
+ dev->dev = NULL;
+ mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->clientlist_mutex);
+ mutex_destroy(&dev->filelist_mutex);
+ mutex_destroy(&dev->struct_mutex);
+ drm_legacy_destroy_members(dev);
+}
+
/**
* drm_dev_init - Initialise new DRM device
* @dev: DRM device
@@ -608,6 +611,9 @@ static void drm_fs_inode_free(struct inode *inode)
* arbitrary offset, you must supply a &drm_driver.release callback and control
* the finalization explicitly.
*
+ * Note that drivers must call drmm_add_final_kfree() after this function has
+ * completed successfully.
+ *
* RETURNS:
* 0 on success, or error code on failure.
*/
@@ -629,6 +635,9 @@ int drm_dev_init(struct drm_device *dev,
dev->dev = get_device(parent);
dev->driver = driver;
+ INIT_LIST_HEAD(&dev->managed.resources);
+ spin_lock_init(&dev->managed.lock);
+
/* no per-device feature limits by default */
dev->driver_features = ~0u;
@@ -644,26 +653,30 @@ int drm_dev_init(struct drm_device *dev,
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
+ ret = drmm_add_action(dev, drm_dev_init_release, NULL);
+ if (ret)
+ return ret;
+
dev->anon_inode = drm_fs_inode_new();
if (IS_ERR(dev->anon_inode)) {
ret = PTR_ERR(dev->anon_inode);
DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
- goto err_free;
+ goto err;
}
if (drm_core_check_feature(dev, DRIVER_RENDER)) {
ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
if (ret)
- goto err_minors;
+ goto err;
}
ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
if (ret)
- goto err_minors;
+ goto err;
ret = drm_legacy_create_map_hash(dev);
if (ret)
- goto err_minors;
+ goto err;
drm_legacy_ctxbitmap_init(dev);
@@ -671,33 +684,19 @@ int drm_dev_init(struct drm_device *dev,
ret = drm_gem_init(dev);
if (ret) {
DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
- goto err_ctxbitmap;
+ goto err;
}
}
ret = drm_dev_set_unique(dev, dev_name(parent));
if (ret)
- goto err_setunique;
+ goto err;
return 0;
-err_setunique:
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_destroy(dev);
-err_ctxbitmap:
- drm_legacy_ctxbitmap_cleanup(dev);
- drm_legacy_remove_map_hash(dev);
-err_minors:
- drm_minor_free(dev, DRM_MINOR_PRIMARY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
- drm_fs_inode_free(dev->anon_inode);
-err_free:
- put_device(dev->dev);
- mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->clientlist_mutex);
- mutex_destroy(&dev->filelist_mutex);
- mutex_destroy(&dev->struct_mutex);
- drm_legacy_destroy_members(dev);
+err:
+ drm_managed_release(dev);
+
return ret;
}
EXPORT_SYMBOL(drm_dev_init);
@@ -714,8 +713,10 @@ static void devm_drm_dev_init_release(void *data)
* @driver: DRM driver
*
* Managed drm_dev_init(). The DRM device initialized with this function is
- * automatically put on driver detach using drm_dev_put(). You must supply a
- * &drm_driver.release callback to control the finalization explicitly.
+ * automatically put on driver detach using drm_dev_put().
+ *
+ * Note that drivers must call drmm_add_final_kfree() after this function has
+ * completed successfully.
*
* RETURNS:
* 0 on success, or error code on failure.
@@ -726,9 +727,6 @@ int devm_drm_dev_init(struct device *parent,
{
int ret;
- if (WARN_ON(!driver->release))
- return -EINVAL;
-
ret = drm_dev_init(dev, driver, parent);
if (ret)
return ret;
@@ -741,42 +739,28 @@ int devm_drm_dev_init(struct device *parent,
}
EXPORT_SYMBOL(devm_drm_dev_init);
-/**
- * drm_dev_fini - Finalize a dead DRM device
- * @dev: DRM device
- *
- * Finalize a dead DRM device. This is the converse to drm_dev_init() and
- * frees up all data allocated by it. All driver private data should be
- * finalized first. Note that this function does not free the @dev, that is
- * left to the caller.
- *
- * The ref-count of @dev must be zero, and drm_dev_fini() should only be called
- * from a &drm_driver.release callback.
- */
-void drm_dev_fini(struct drm_device *dev)
+void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
+ size_t size, size_t offset)
{
- drm_vblank_cleanup(dev);
-
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_destroy(dev);
-
- drm_legacy_ctxbitmap_cleanup(dev);
- drm_legacy_remove_map_hash(dev);
- drm_fs_inode_free(dev->anon_inode);
+ void *container;
+ struct drm_device *drm;
+ int ret;
- drm_minor_free(dev, DRM_MINOR_PRIMARY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
- put_device(dev->dev);
+ drm = container + offset;
+ ret = devm_drm_dev_init(parent, drm, driver);
+ if (ret) {
+ kfree(container);
+ return ERR_PTR(ret);
+ }
+ drmm_add_final_kfree(drm, container);
- mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->clientlist_mutex);
- mutex_destroy(&dev->filelist_mutex);
- mutex_destroy(&dev->struct_mutex);
- drm_legacy_destroy_members(dev);
- kfree(dev->unique);
+ return container;
}
-EXPORT_SYMBOL(drm_dev_fini);
+EXPORT_SYMBOL(__devm_drm_dev_alloc);
/**
* drm_dev_alloc - Allocate new DRM device
@@ -816,6 +800,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
return ERR_PTR(ret);
}
+ drmm_add_final_kfree(dev, dev);
+
return dev;
}
EXPORT_SYMBOL(drm_dev_alloc);
@@ -824,12 +810,13 @@ static void drm_dev_release(struct kref *ref)
{
struct drm_device *dev = container_of(ref, struct drm_device, ref);
- if (dev->driver->release) {
+ if (dev->driver->release)
dev->driver->release(dev);
- } else {
- drm_dev_fini(dev);
- kfree(dev);
- }
+
+ drm_managed_release(dev);
+
+ if (dev->managed.final_kfree)
+ kfree(dev->managed.final_kfree);
}
/**
@@ -946,6 +933,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
struct drm_driver *driver = dev->driver;
int ret;
+ if (!driver->load)
+ drm_mode_config_validate(dev);
+
+ WARN_ON(!dev->managed.final_kfree);
+
if (drm_dev_needs_global_mutex(dev))
mutex_lock(&drm_global_mutex);
@@ -1046,8 +1038,8 @@ EXPORT_SYMBOL(drm_dev_unregister);
*/
int drm_dev_set_unique(struct drm_device *dev, const char *name)
{
- kfree(dev->unique);
- dev->unique = kstrdup(name, GFP_KERNEL);
+ drmm_kfree(dev, dev->unique);
+ dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL);
return dev->unique ? 0 : -ENOMEM;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index d96e3ce3e535..fed653f13c26 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1584,8 +1584,6 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
-static void drm_get_displayid(struct drm_connector *connector,
- struct edid *edid);
static int validate_displayid(u8 *displayid, int length, int idx);
static int drm_edid_block_checksum(const u8 *raw_edid)
@@ -2019,18 +2017,13 @@ EXPORT_SYMBOL(drm_probe_ddc);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
- struct edid *edid;
-
if (connector->force == DRM_FORCE_OFF)
return NULL;
if (connector->force == DRM_FORCE_UNSPECIFIED && !drm_probe_ddc(adapter))
return NULL;
- edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
- if (edid)
- drm_get_displayid(connector, edid);
- return edid;
+ return drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
}
EXPORT_SYMBOL(drm_get_edid);
@@ -2388,6 +2381,14 @@ bad_std_timing(u8 a, u8 b)
(a == 0x20 && b == 0x20);
}
+static int drm_mode_hsync(const struct drm_display_mode *mode)
+{
+ if (mode->htotal <= 0)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(mode->clock, mode->htotal);
+}
+
/**
* drm_mode_std - convert standard mode info (width, height, refresh) into mode
* @connector: connector of for the EDID block
@@ -3213,16 +3214,33 @@ static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
}
-static u8 *drm_find_displayid_extension(const struct edid *edid)
+static u8 *drm_find_displayid_extension(const struct edid *edid,
+ int *length, int *idx)
{
- return drm_find_edid_extension(edid, DISPLAYID_EXT);
+ u8 *displayid = drm_find_edid_extension(edid, DISPLAYID_EXT);
+ struct displayid_hdr *base;
+ int ret;
+
+ if (!displayid)
+ return NULL;
+
+ /* EDID extensions block checksum isn't for us */
+ *length = EDID_LENGTH - 1;
+ *idx = 1;
+
+ ret = validate_displayid(displayid, *length, *idx);
+ if (ret)
+ return NULL;
+
+ base = (struct displayid_hdr *)&displayid[*idx];
+ *length = *idx + sizeof(*base) + base->bytes;
+
+ return displayid;
}
static u8 *drm_find_cea_extension(const struct edid *edid)
{
- int ret;
- int idx = 1;
- int length = EDID_LENGTH;
+ int length, idx;
struct displayid_block *block;
u8 *cea;
u8 *displayid;
@@ -3233,14 +3251,10 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
return cea;
/* CEA blocks can also be found embedded in a DisplayID block */
- displayid = drm_find_displayid_extension(edid);
+ displayid = drm_find_displayid_extension(edid, &length, &idx);
if (!displayid)
return NULL;
- ret = validate_displayid(displayid, length, idx);
- if (ret)
- return NULL;
-
idx += sizeof(struct displayid_hdr);
for_each_displayid_db(displayid, block, idx, length) {
if (block->tag == DATA_BLOCK_CTA) {
@@ -5085,7 +5099,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
static int validate_displayid(u8 *displayid, int length, int idx)
{
- int i;
+ int i, dispid_length;
u8 csum = 0;
struct displayid_hdr *base;
@@ -5094,15 +5108,18 @@ static int validate_displayid(u8 *displayid, int length, int idx)
DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
base->rev, base->bytes, base->prod_id, base->ext_count);
- if (base->bytes + 5 > length - idx)
+ /* +1 for DispID checksum */
+ dispid_length = sizeof(*base) + base->bytes + 1;
+ if (dispid_length > length - idx)
return -EINVAL;
- for (i = idx; i <= base->bytes + 5; i++) {
- csum += displayid[i];
- }
+
+ for (i = 0; i < dispid_length; i++)
+ csum += displayid[idx + i];
if (csum) {
DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
return -EINVAL;
}
+
return 0;
}
@@ -5181,20 +5198,14 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
struct edid *edid)
{
u8 *displayid;
- int ret;
- int idx = 1;
- int length = EDID_LENGTH;
+ int length, idx;
struct displayid_block *block;
int num_modes = 0;
- displayid = drm_find_displayid_extension(edid);
+ displayid = drm_find_displayid_extension(edid, &length, &idx);
if (!displayid)
return 0;
- ret = validate_displayid(displayid, length, idx);
- if (ret)
- return 0;
-
idx += sizeof(struct displayid_hdr);
for_each_displayid_db(displayid, block, idx, length) {
switch (block->tag) {
@@ -5783,9 +5794,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
static int drm_parse_tiled_block(struct drm_connector *connector,
- struct displayid_block *block)
+ const struct displayid_block *block)
{
- struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
+ const struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
u16 w, h;
u8 tile_v_loc, tile_h_loc;
u8 num_v_tile, num_h_tile;
@@ -5836,22 +5847,12 @@ static int drm_parse_tiled_block(struct drm_connector *connector,
return 0;
}
-static int drm_parse_display_id(struct drm_connector *connector,
- u8 *displayid, int length,
- bool is_edid_extension)
+static int drm_displayid_parse_tiled(struct drm_connector *connector,
+ const u8 *displayid, int length, int idx)
{
- /* if this is an EDID extension the first byte will be 0x70 */
- int idx = 0;
- struct displayid_block *block;
+ const struct displayid_block *block;
int ret;
- if (is_edid_extension)
- idx = 1;
-
- ret = validate_displayid(displayid, length, idx);
- if (ret)
- return ret;
-
idx += sizeof(struct displayid_hdr);
for_each_displayid_db(displayid, block, idx, length) {
DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n",
@@ -5863,12 +5864,6 @@ static int drm_parse_display_id(struct drm_connector *connector,
if (ret)
return ret;
break;
- case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
- /* handled in mode gathering code. */
- break;
- case DATA_BLOCK_CTA:
- /* handled in the cea parser code. */
- break;
default:
DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
break;
@@ -5877,19 +5872,21 @@ static int drm_parse_display_id(struct drm_connector *connector,
return 0;
}
-static void drm_get_displayid(struct drm_connector *connector,
- struct edid *edid)
+void drm_update_tile_info(struct drm_connector *connector,
+ const struct edid *edid)
{
- void *displayid = NULL;
+ const void *displayid = NULL;
+ int length, idx;
int ret;
+
connector->has_tile = false;
- displayid = drm_find_displayid_extension(edid);
+ displayid = drm_find_displayid_extension(edid, &length, &idx);
if (!displayid) {
/* drop reference to any tile group we had */
goto out_drop_ref;
}
- ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
+ ret = drm_displayid_parse_tiled(connector, displayid, length, idx);
if (ret < 0)
goto out_drop_ref;
if (!connector->has_tile)
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 9801c0333eca..cb2349ad338d 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -2,7 +2,7 @@
/*
* drm kms/fb cma (contiguous memory allocator) helper functions
*
- * Copyright (C) 2012 Analog Device Inc.
+ * Copyright (C) 2012 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
*
* Based on udl_fbdev.c
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a9771de4d17e..170aa7689110 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -307,13 +307,13 @@ static void drm_fb_helper_sysrq(int dummy1)
schedule_work(&drm_fb_helper_restore_work);
}
-static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
+static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.handler = drm_fb_helper_sysrq,
.help_msg = "force-fb(V)",
.action_msg = "Restore framebuffer console",
};
#else
-static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
+static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
@@ -514,6 +514,14 @@ struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
if (ret)
goto err_release;
+ /*
+ * TODO: We really should be smarter here and alloc an apperture
+ * for each IORESOURCE_MEM resource helper->dev->dev has and also
+ * init the ranges of the appertures based on the resources.
+ * Note some drivers currently count on there being only 1 empty
+ * aperture and fill this themselves, these will need to be dealt
+ * with somehow when fixing this.
+ */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
@@ -2162,6 +2170,8 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
*
* This function sets up generic fbdev emulation for drivers that supports
* dumb buffers with a virtual address and that can be mmap'ed.
+ * drm_fbdev_generic_setup() shall be called after the DRM driver registered
+ * the new DRM device with drm_dev_register().
*
* Restore, hotplug events and teardown are all taken care of. Drivers that do
* suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
@@ -2178,29 +2188,30 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
* Setup will be retried on the next hotplug event.
*
* The fbdev is destroyed by drm_dev_unregister().
- *
- * Returns:
- * Zero on success or negative error code on failure.
*/
-int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+void drm_fbdev_generic_setup(struct drm_device *dev,
+ unsigned int preferred_bpp)
{
struct drm_fb_helper *fb_helper;
int ret;
- WARN(dev->fb_helper, "fb_helper is already set!\n");
+ drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
+ drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
if (!drm_fbdev_emulation)
- return 0;
+ return;
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
- if (!fb_helper)
- return -ENOMEM;
+ if (!fb_helper) {
+ drm_err(dev, "Failed to allocate fb_helper\n");
+ return;
+ }
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
if (ret) {
kfree(fb_helper);
drm_err(dev, "Failed to register client: %d\n", ret);
- return ret;
+ return;
}
if (!preferred_bpp)
@@ -2214,8 +2225,6 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
drm_client_register(&fb_helper->client);
-
- return 0;
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index eb009d3ab48f..2f12b8c1d01c 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -569,9 +569,6 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
struct drm_device *dev = file_priv->minor->dev;
ssize_t ret;
- if (!access_ok(buffer, count))
- return -EFAULT;
-
ret = mutex_lock_interruptible(&file_priv->event_read_lock);
if (ret)
return ret;
@@ -613,7 +610,8 @@ put_back_event:
file_priv->event_space -= length;
list_add(&e->link, &file_priv->event_list);
spin_unlock_irq(&dev->event_lock);
- wake_up_interruptible(&file_priv->event_wait);
+ wake_up_interruptible_poll(&file_priv->event_wait,
+ EPOLLIN | EPOLLRDNORM);
break;
}
@@ -809,7 +807,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
list_del(&e->pending_link);
list_add_tail(&e->link,
&e->file_priv->event_list);
- wake_up_interruptible(&e->file_priv->event_wait);
+ wake_up_interruptible_poll(&e->file_priv->event_wait,
+ EPOLLIN | EPOLLRDNORM);
}
EXPORT_SYMBOL(drm_send_event_locked);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 57ac94ce9b9e..0375b3d7f8d0 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -1207,10 +1207,10 @@ static const struct drm_info_list drm_framebuffer_debugfs_list[] = {
{ "framebuffer", drm_framebuffer_info, 0 },
};
-int drm_framebuffer_debugfs_init(struct drm_minor *minor)
+void drm_framebuffer_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(drm_framebuffer_debugfs_list,
- ARRAY_SIZE(drm_framebuffer_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(drm_framebuffer_debugfs_list,
+ ARRAY_SIZE(drm_framebuffer_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 37627d06fb06..7bf628e13023 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -44,6 +44,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
@@ -77,6 +78,12 @@
* up at a later date, and as our interface with shmfs for memory allocation.
*/
+static void
+drm_gem_init_release(struct drm_device *dev, void *ptr)
+{
+ drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+}
+
/**
* drm_gem_init - Initialize the GEM device fields
* @dev: drm_devic structure to initialize
@@ -89,7 +96,8 @@ drm_gem_init(struct drm_device *dev)
mutex_init(&dev->object_name_lock);
idr_init_base(&dev->object_name_idr, 1);
- vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+ vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
+ GFP_KERNEL);
if (!vma_offset_manager) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
@@ -100,16 +108,7 @@ drm_gem_init(struct drm_device *dev)
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
- return 0;
-}
-
-void
-drm_gem_destroy(struct drm_device *dev)
-{
-
- drm_vma_offset_manager_destroy(dev->vma_offset_manager);
- kfree(dev->vma_offset_manager);
- dev->vma_offset_manager = NULL;
+ return drmm_add_action(dev, drm_gem_init_release, NULL);
}
/**
@@ -432,7 +431,7 @@ err_unref:
* drm_gem_handle_create - create a gem handle for an object
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
- * @handlep: pionter to return the created handle to the caller
+ * @handlep: pointer to return the created handle to the caller
*
* Create a handle for this object. This adds a handle reference to the object,
* which includes a regular reference count. Callers will likely want to
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 3a7ace19a902..ccc2c71fa491 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -21,6 +21,13 @@
#include <drm/drm_modeset_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#define AFBC_HEADER_SIZE 16
+#define AFBC_TH_LAYOUT_ALIGNMENT 8
+#define AFBC_HDR_ALIGN 64
+#define AFBC_SUPERBLOCK_PIXELS 256
+#define AFBC_SUPERBLOCK_ALIGNMENT 128
+#define AFBC_TH_BODY_START_ALIGNMENT 4096
+
/**
* DOC: overview
*
@@ -54,32 +61,25 @@ struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
-static struct drm_framebuffer *
-drm_gem_fb_alloc(struct drm_device *dev,
+static int
+drm_gem_fb_init(struct drm_device *dev,
+ struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes,
const struct drm_framebuffer_funcs *funcs)
{
- struct drm_framebuffer *fb;
int ret, i;
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb)
- return ERR_PTR(-ENOMEM);
-
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
for (i = 0; i < num_planes; i++)
fb->obj[i] = obj[i];
ret = drm_framebuffer_init(dev, fb, funcs);
- if (ret) {
+ if (ret)
drm_err(dev, "Failed to init framebuffer: %d\n", ret);
- kfree(fb);
- return ERR_PTR(ret);
- }
- return fb;
+ return ret;
}
/**
@@ -123,10 +123,13 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
EXPORT_SYMBOL(drm_gem_fb_create_handle);
/**
- * drm_gem_fb_create_with_funcs() - Helper function for the
- * &drm_mode_config_funcs.fb_create
- * callback
+ * drm_gem_fb_init_with_funcs() - Helper function for implementing
+ * &drm_mode_config_funcs.fb_create
+ * callback in cases when the driver
+ * allocates a subclass of
+ * struct drm_framebuffer
* @dev: DRM device
+ * @fb: framebuffer object
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
@@ -134,23 +137,26 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
* This function can be used to set &drm_framebuffer_funcs for drivers that need
* custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
* change &drm_framebuffer_funcs. The function does buffer size validation.
+ * The buffer size validation is for a general case, though, so users should
+ * pay attention to the checks being appropriate for them or, at least,
+ * non-conflicting.
*
* Returns:
- * Pointer to a &drm_framebuffer on success or an error pointer on failure.
+ * Zero or a negative error code.
*/
-struct drm_framebuffer *
-drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- const struct drm_framebuffer_funcs *funcs)
+int drm_gem_fb_init_with_funcs(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs)
{
const struct drm_format_info *info;
struct drm_gem_object *objs[4];
- struct drm_framebuffer *fb;
int ret, i;
info = drm_get_format_info(dev, mode_cmd);
if (!info)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
for (i = 0; i < info->num_planes; i++) {
unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
@@ -175,19 +181,55 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
}
}
- fb = drm_gem_fb_alloc(dev, mode_cmd, objs, i, funcs);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
+ ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs);
+ if (ret)
goto err_gem_object_put;
- }
- return fb;
+ return 0;
err_gem_object_put:
for (i--; i >= 0; i--)
drm_gem_object_put_unlocked(objs[i]);
- return ERR_PTR(ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
+
+/**
+ * drm_gem_fb_create_with_funcs() - Helper function for the
+ * &drm_mode_config_funcs.fb_create
+ * callback
+ * @dev: DRM device
+ * @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ * @funcs: vtable to be used for the new framebuffer object
+ *
+ * This function can be used to set &drm_framebuffer_funcs for drivers that need
+ * custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
+ * change &drm_framebuffer_funcs. The function does buffer size validation.
+ *
+ * Returns:
+ * Pointer to a &drm_framebuffer on success or an error pointer on failure.
+ */
+struct drm_framebuffer *
+drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_framebuffer *fb;
+ int ret;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs);
+ if (ret) {
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+
+ return fb;
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_funcs);
@@ -265,6 +307,132 @@ drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
+static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ const struct drm_format_info *info;
+
+ info = drm_get_format_info(dev, mode_cmd);
+
+ /* use whatever a driver has set */
+ if (info->cpp[0])
+ return info->cpp[0] * 8;
+
+ /* guess otherwise */
+ switch (info->format) {
+ case DRM_FORMAT_YUV420_8BIT:
+ return 12;
+ case DRM_FORMAT_YUV420_10BIT:
+ return 15;
+ case DRM_FORMAT_VUY101010:
+ return 30;
+ default:
+ break;
+ }
+
+ /* all attempts failed */
+ return 0;
+}
+
+static int drm_gem_afbc_min_size(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_afbc_framebuffer *afbc_fb)
+{
+ __u32 n_blocks, w_alignment, h_alignment, hdr_alignment;
+ /* remove bpp when all users properly encode cpp in drm_format_info */
+ __u32 bpp;
+
+ switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+ afbc_fb->block_width = 16;
+ afbc_fb->block_height = 16;
+ break;
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8:
+ afbc_fb->block_width = 32;
+ afbc_fb->block_height = 8;
+ break;
+ /* no user exists yet - fall through */
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_64x4:
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4:
+ default:
+ drm_dbg_kms(dev, "Invalid AFBC_FORMAT_MOD_BLOCK_SIZE: %lld.\n",
+ mode_cmd->modifier[0]
+ & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK);
+ return -EINVAL;
+ }
+
+ /* tiled header afbc */
+ w_alignment = afbc_fb->block_width;
+ h_alignment = afbc_fb->block_height;
+ hdr_alignment = AFBC_HDR_ALIGN;
+ if (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_TILED) {
+ w_alignment *= AFBC_TH_LAYOUT_ALIGNMENT;
+ h_alignment *= AFBC_TH_LAYOUT_ALIGNMENT;
+ hdr_alignment = AFBC_TH_BODY_START_ALIGNMENT;
+ }
+
+ afbc_fb->aligned_width = ALIGN(mode_cmd->width, w_alignment);
+ afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment);
+ afbc_fb->offset = mode_cmd->offsets[0];
+
+ bpp = drm_gem_afbc_get_bpp(dev, mode_cmd);
+ if (!bpp) {
+ drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp);
+ return -EINVAL;
+ }
+
+ n_blocks = (afbc_fb->aligned_width * afbc_fb->aligned_height)
+ / AFBC_SUPERBLOCK_PIXELS;
+ afbc_fb->afbc_size = ALIGN(n_blocks * AFBC_HEADER_SIZE, hdr_alignment);
+ afbc_fb->afbc_size += n_blocks * ALIGN(bpp * AFBC_SUPERBLOCK_PIXELS / 8,
+ AFBC_SUPERBLOCK_ALIGNMENT);
+
+ return 0;
+}
+
+/**
+ * drm_gem_fb_afbc_init() - Helper function for drivers using afbc to
+ * fill and validate all the afbc-specific
+ * struct drm_afbc_framebuffer members
+ *
+ * @dev: DRM device
+ * @afbc_fb: afbc-specific framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ * @afbc_fb: afbc framebuffer
+ *
+ * This function can be used by drivers which support afbc to complete
+ * the preparation of struct drm_afbc_framebuffer. It must be called after
+ * allocating the said struct and calling drm_gem_fb_init_with_funcs().
+ * It is caller's responsibility to put afbc_fb->base.obj objects in case
+ * the call is unsuccessful.
+ *
+ * Returns:
+ * Zero on success or a negative error value on failure.
+ */
+int drm_gem_fb_afbc_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_afbc_framebuffer *afbc_fb)
+{
+ const struct drm_format_info *info;
+ struct drm_gem_object **objs;
+ int ret;
+
+ objs = afbc_fb->base.obj;
+ info = drm_get_format_info(dev, mode_cmd);
+ if (!info)
+ return -EINVAL;
+
+ ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb);
+ if (ret < 0)
+ return ret;
+
+ if (objs[0]->size < afbc_fb->afbc_size)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_afbc_init);
+
/**
* drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
* @plane: Plane
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 92a11bb42365..8b2d5c945c95 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1,10 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/module.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_mode.h>
@@ -18,13 +21,93 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
/**
* DOC: overview
*
- * This library provides a GEM buffer object that is backed by video RAM
- * (VRAM). It can be used for framebuffer devices with dedicated memory.
+ * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
+ * buffer object that is backed by video RAM (VRAM). It can be used for
+ * framebuffer devices with dedicated memory.
*
* The data structure &struct drm_vram_mm and its helpers implement a memory
- * manager for simple framebuffer devices with dedicated video memory. Buffer
- * objects are either placed in video RAM or evicted to system memory. The rsp.
- * buffer object is provided by &struct drm_gem_vram_object.
+ * manager for simple framebuffer devices with dedicated video memory. GEM
+ * VRAM buffer objects are either placed in the video memory or remain evicted
+ * to system memory.
+ *
+ * With the GEM interface userspace applications create, manage and destroy
+ * graphics buffers, such as an on-screen framebuffer. GEM does not provide
+ * an implementation of these interfaces. It's up to the DRM driver to
+ * provide an implementation that suits the hardware. If the hardware device
+ * contains dedicated video memory, the DRM driver can use the VRAM helper
+ * library. Each active buffer object is stored in video RAM. Active
+ * buffer are used for drawing the current frame, typically something like
+ * the frame's scanout buffer or the cursor image. If there's no more space
+ * left in VRAM, inactive GEM objects can be moved to system memory.
+ *
+ * The easiest way to use the VRAM helper library is to call
+ * drm_vram_helper_alloc_mm(). The function allocates and initializes an
+ * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use
+ * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and
+ * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations;
+ * as illustrated below.
+ *
+ * .. code-block:: c
+ *
+ * struct file_operations fops ={
+ * .owner = THIS_MODULE,
+ * DRM_VRAM_MM_FILE_OPERATION
+ * };
+ * struct drm_driver drv = {
+ * .driver_feature = DRM_ ... ,
+ * .fops = &fops,
+ * DRM_GEM_VRAM_DRIVER
+ * };
+ *
+ * int init_drm_driver()
+ * {
+ * struct drm_device *dev;
+ * uint64_t vram_base;
+ * unsigned long vram_size;
+ * int ret;
+ *
+ * // setup device, vram base and size
+ * // ...
+ *
+ * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
+ * if (ret)
+ * return ret;
+ * return 0;
+ * }
+ *
+ * This creates an instance of &struct drm_vram_mm, exports DRM userspace
+ * interfaces for GEM buffer management and initializes file operations to
+ * allow for accessing created GEM buffers. With this setup, the DRM driver
+ * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
+ * to userspace.
+ *
+ * To clean up the VRAM memory management, call drm_vram_helper_release_mm()
+ * in the driver's clean-up code.
+ *
+ * .. code-block:: c
+ *
+ * void fini_drm_driver()
+ * {
+ * struct drm_device *dev = ...;
+ *
+ * drm_vram_helper_release_mm(dev);
+ * }
+ *
+ * For drawing or scanout operations, buffer object have to be pinned in video
+ * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
+ * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
+ * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
+ *
+ * A buffer object that is pinned in video RAM has a fixed address within that
+ * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
+ * it's used to program the hardware's scanout engine for framebuffers, set
+ * the cursor overlay's image for a mouse cursor, or use it as input to the
+ * hardware's draing engine.
+ *
+ * To access a buffer object's memory from the DRM driver, call
+ * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address
+ * space and returns the memory address. Use drm_gem_vram_kunmap() to
+ * release the mapping.
*/
/*
@@ -670,9 +753,9 @@ EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
* @plane: a DRM plane
* @new_state: the plane's new state
*
- * During plane updates, this function pins the GEM VRAM
- * objects of the plane's new framebuffer to VRAM. Call
- * drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
+ * During plane updates, this function sets the plane's fence and
+ * pins the GEM VRAM objects of the plane's new framebuffer to VRAM.
+ * Call drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
*
* Returns:
* 0 on success, or
@@ -698,6 +781,10 @@ drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
goto err_drm_gem_vram_unpin;
}
+ ret = drm_gem_fb_prepare_fb(plane, new_state);
+ if (ret)
+ goto err_drm_gem_vram_unpin;
+
return 0;
err_drm_gem_vram_unpin:
@@ -1018,7 +1105,6 @@ static struct ttm_bo_driver bo_driver = {
* struct drm_vram_mm
*/
-#if defined(CONFIG_DEBUG_FS)
static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1035,27 +1121,18 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
{ "vram-mm", drm_vram_mm_debugfs, 0, NULL },
};
-#endif
/**
* drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
*
* @minor: drm minor device.
*
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
*/
-int drm_vram_mm_debugfs_init(struct drm_minor *minor)
+void drm_vram_mm_debugfs_init(struct drm_minor *minor)
{
- int ret = 0;
-
-#if defined(CONFIG_DEBUG_FS)
- ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
- ARRAY_SIZE(drm_vram_mm_debugfs_list),
- minor->debugfs_root, minor);
-#endif
- return ret;
+ drm_debugfs_create_files(drm_vram_mm_debugfs_list,
+ ARRAY_SIZE(drm_vram_mm_debugfs_list),
+ minor->debugfs_root, minor);
}
EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
@@ -1202,3 +1279,6 @@ drm_vram_helper_mode_valid(struct drm_device *dev,
return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
}
EXPORT_SYMBOL(drm_vram_helper_mode_valid);
+
+MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 5714a78365ac..2470a352730b 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -89,9 +89,11 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
void drm_minor_release(struct drm_minor *minor);
+/* drm_managed.c */
+void drm_managed_release(struct drm_device *dev);
+
/* drm_vblank.c */
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
-void drm_vblank_cleanup(struct drm_device *dev);
/* IOCTLS */
int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
@@ -141,7 +143,6 @@ void drm_sysfs_lease_event(struct drm_device *dev);
/* drm_gem.c */
struct drm_gem_object;
int drm_gem_init(struct drm_device *dev);
-void drm_gem_destroy(struct drm_device *dev);
int drm_gem_handle_create_tail(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -235,4 +236,4 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
/* drm_framebuffer.c */
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
-int drm_framebuffer_debugfs_init(struct drm_minor *minor);
+void drm_framebuffer_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9e41972c4bbc..328502aafaf7 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -599,8 +599,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -741,7 +741,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
* };
*
* Please make sure that you follow all the best practices from
- * ``Documentation/ioctl/botching-up-ioctls.rst``. Note that drm_ioctl()
+ * ``Documentation/process/botching-up-ioctls.rst``. Note that drm_ioctl()
* automatically zero-extends structures, hence make sure you can add more stuff
* at the end, i.e. don't put a variable sized array there.
*
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
new file mode 100644
index 000000000000..9cebfe370a65
--- /dev/null
+++ b/drivers/gpu/drm/drm_managed.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel
+ *
+ * Based on drivers/base/devres.c
+ */
+
+#include <drm/drm_managed.h>
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: managed resources
+ *
+ * Inspired by struct &device managed resources, but tied to the lifetime of
+ * struct &drm_device, which can outlive the underlying physical device, usually
+ * when userspace has some open files and other handles to resources still open.
+ *
+ * Release actions can be added with drmm_add_action(), memory allocations can
+ * be done directly with drmm_kmalloc() and the related functions. Everything
+ * will be released on the final drm_dev_put() in reverse order of how the
+ * release actions have been added and memory has been allocated since driver
+ * loading started with drm_dev_init().
+ *
+ * Note that release actions and managed memory can also be added and removed
+ * during the lifetime of the driver, all the functions are fully concurrent
+ * safe. But it is recommended to use managed resources only for resources that
+ * change rarely, if ever, during the lifetime of the &drm_device instance.
+ */
+
+struct drmres_node {
+ struct list_head entry;
+ drmres_release_t release;
+ const char *name;
+ size_t size;
+};
+
+struct drmres {
+ struct drmres_node node;
+ /*
+ * Some archs want to perform DMA into kmalloc caches
+ * and need a guaranteed alignment larger than
+ * the alignment of a 64-bit integer.
+ * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
+ * buffer alignment as if it was allocated by plain kmalloc().
+ */
+ u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
+};
+
+static void free_dr(struct drmres *dr)
+{
+ kfree_const(dr->node.name);
+ kfree(dr);
+}
+
+void drm_managed_release(struct drm_device *dev)
+{
+ struct drmres *dr, *tmp;
+
+ drm_dbg_drmres(dev, "drmres release begin\n");
+ list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
+ drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
+ dr, dr->node.name, dr->node.size);
+
+ if (dr->node.release)
+ dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
+
+ list_del(&dr->node.entry);
+ free_dr(dr);
+ }
+ drm_dbg_drmres(dev, "drmres release end\n");
+}
+
+/*
+ * Always inline so that kmalloc_track_caller tracks the actual interesting
+ * caller outside of drm_managed.c.
+ */
+static __always_inline struct drmres * alloc_dr(drmres_release_t release,
+ size_t size, gfp_t gfp, int nid)
+{
+ size_t tot_size;
+ struct drmres *dr;
+
+ /* We must catch any near-SIZE_MAX cases that could overflow. */
+ if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
+ return NULL;
+
+ dr = kmalloc_node_track_caller(tot_size, gfp, nid);
+ if (unlikely(!dr))
+ return NULL;
+
+ memset(dr, 0, offsetof(struct drmres, data));
+
+ INIT_LIST_HEAD(&dr->node.entry);
+ dr->node.release = release;
+ dr->node.size = size;
+
+ return dr;
+}
+
+static void del_dr(struct drm_device *dev, struct drmres *dr)
+{
+ list_del_init(&dr->node.entry);
+
+ drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
+ dr, dr->node.name, (unsigned long) dr->node.size);
+}
+
+static void add_dr(struct drm_device *dev, struct drmres *dr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->managed.lock, flags);
+ list_add(&dr->node.entry, &dev->managed.resources);
+ spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+ drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
+ dr, dr->node.name, (unsigned long) dr->node.size);
+}
+
+/**
+ * drmm_add_final_kfree - add release action for the final kfree()
+ * @dev: DRM device
+ * @container: pointer to the kmalloc allocation containing @dev
+ *
+ * Since the allocation containing the struct &drm_device must be allocated
+ * before it can be initialized with drm_dev_init() there's no way to allocate
+ * that memory with drmm_kmalloc(). To side-step this chicken-egg problem the
+ * pointer for this final kfree() must be specified by calling this function. It
+ * will be released in the final drm_dev_put() for @dev, after all other release
+ * actions installed through drmm_add_action() have been processed.
+ */
+void drmm_add_final_kfree(struct drm_device *dev, void *container)
+{
+ WARN_ON(dev->managed.final_kfree);
+ WARN_ON(dev < (struct drm_device *) container);
+ WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
+ dev->managed.final_kfree = container;
+}
+EXPORT_SYMBOL(drmm_add_final_kfree);
+
+int __drmm_add_action(struct drm_device *dev,
+ drmres_release_t action,
+ void *data, const char *name)
+{
+ struct drmres *dr;
+ void **void_ptr;
+
+ dr = alloc_dr(action, data ? sizeof(void*) : 0,
+ GFP_KERNEL | __GFP_ZERO,
+ dev_to_node(dev->dev));
+ if (!dr) {
+ drm_dbg_drmres(dev, "failed to add action %s for %p\n",
+ name, data);
+ return -ENOMEM;
+ }
+
+ dr->node.name = kstrdup_const(name, GFP_KERNEL);
+ if (data) {
+ void_ptr = (void **)&dr->data;
+ *void_ptr = data;
+ }
+
+ add_dr(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL(__drmm_add_action);
+
+int __drmm_add_action_or_reset(struct drm_device *dev,
+ drmres_release_t action,
+ void *data, const char *name)
+{
+ int ret;
+
+ ret = __drmm_add_action(dev, action, data, name);
+ if (ret)
+ action(dev, data);
+
+ return ret;
+}
+EXPORT_SYMBOL(__drmm_add_action_or_reset);
+
+/**
+ * drmm_kmalloc - &drm_device managed kmalloc()
+ * @dev: DRM device
+ * @size: size of the memory allocation
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kmalloc(). The allocated memory is
+ * automatically freed on the final drm_dev_put(). Memory can also be freed
+ * before the final drm_dev_put() by calling drmm_kfree().
+ */
+void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
+{
+ struct drmres *dr;
+
+ dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
+ if (!dr) {
+ drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
+ size, gfp);
+ return NULL;
+ }
+ dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
+
+ add_dr(dev, dr);
+
+ return dr->data;
+}
+EXPORT_SYMBOL(drmm_kmalloc);
+
+/**
+ * drmm_kstrdup - &drm_device managed kstrdup()
+ * @dev: DRM device
+ * @s: 0-terminated string to be duplicated
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kstrdup(). The allocated memory is
+ * automatically freed on the final drm_dev_put() and works exactly like a
+ * memory allocation obtained by drmm_kmalloc().
+ */
+char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
+{
+ size_t size;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ size = strlen(s) + 1;
+ buf = drmm_kmalloc(dev, size, gfp);
+ if (buf)
+ memcpy(buf, s, size);
+ return buf;
+}
+EXPORT_SYMBOL_GPL(drmm_kstrdup);
+
+/**
+ * drmm_kfree - &drm_device managed kfree()
+ * @dev: DRM device
+ * @data: memory allocation to be freed
+ *
+ * This is a &drm_device managed version of kfree() which can be used to
+ * release memory allocated through drmm_kmalloc() or any of its related
+ * functions before the final drm_dev_put() of @dev.
+ */
+void drmm_kfree(struct drm_device *dev, void *data)
+{
+ struct drmres *dr_match = NULL, *dr;
+ unsigned long flags;
+
+ if (!data)
+ return;
+
+ spin_lock_irqsave(&dev->managed.lock, flags);
+ list_for_each_entry(dr, &dev->managed.resources, node.entry) {
+ if (dr->data == data) {
+ dr_match = dr;
+ del_dr(dev, dr_match);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+ if (WARN_ON(!dr_match))
+ return;
+
+ free_dr(dr_match);
+}
+EXPORT_SYMBOL(drmm_kfree);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 558baf989f5a..bb27c82757f1 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -169,7 +169,8 @@ int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
EXPORT_SYMBOL(mipi_dbi_command_buf);
/* This should only be used by mipi_dbi_command() */
-int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
+int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
+ size_t len)
{
u8 *buf;
int ret;
@@ -510,6 +511,10 @@ int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
if (!dbidev->dbi.command)
return -EINVAL;
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
dbidev->tx_buf = devm_kmalloc(drm->dev, tx_buf_size, GFP_KERNEL);
if (!dbidev->tx_buf)
return -ENOMEM;
@@ -579,26 +584,6 @@ int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
EXPORT_SYMBOL(mipi_dbi_dev_init);
/**
- * mipi_dbi_release - DRM driver release helper
- * @drm: DRM device
- *
- * This function finalizes and frees &mipi_dbi.
- *
- * Drivers can use this as their &drm_driver->release callback.
- */
-void mipi_dbi_release(struct drm_device *drm)
-{
- struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(drm);
-
- DRM_DEBUG_DRIVER("\n");
-
- drm_mode_config_cleanup(drm);
- drm_dev_fini(drm);
- kfree(dbidev);
-}
-EXPORT_SYMBOL(mipi_dbi_release);
-
-/**
* mipi_dbi_hw_reset - Hardware reset of controller
* @dbi: MIPI DBI structure
*
@@ -1308,10 +1293,8 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = {
* controller or getting the read command values.
* Drivers can use this as their &drm_driver->debugfs_init callback.
*
- * Returns:
- * Zero on success, negative error code on failure.
*/
-int mipi_dbi_debugfs_init(struct drm_minor *minor)
+void mipi_dbi_debugfs_init(struct drm_minor *minor)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(minor->dev);
umode_t mode = S_IFREG | S_IWUSR;
@@ -1320,8 +1303,6 @@ int mipi_dbi_debugfs_init(struct drm_minor *minor)
mode |= S_IRUGO;
debugfs_create_file("command", mode, minor->debugfs_root, dbidev,
&mipi_dbi_debugfs_command_fops);
-
- return 0;
}
EXPORT_SYMBOL(mipi_dbi_debugfs_init);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 8981abe8b7c9..f4ca1ff80af9 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -212,20 +212,6 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
&drm_mm_interval_tree_augment);
}
-#define RB_INSERT(root, member, expr) do { \
- struct rb_node **link = &root.rb_node, *rb = NULL; \
- u64 x = expr(node); \
- while (*link) { \
- rb = *link; \
- if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
- link = &rb->rb_left; \
- else \
- link = &rb->rb_right; \
- } \
- rb_link_node(&node->member, rb, link); \
- rb_insert_color(&node->member, &root); \
-} while (0)
-
#define HOLE_SIZE(NODE) ((NODE)->hole_size)
#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
@@ -255,16 +241,42 @@ static void insert_hole_size(struct rb_root_cached *root,
rb_insert_color_cached(&node->rb_hole_size, root, first);
}
+RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
+ struct drm_mm_node, rb_hole_addr,
+ u64, subtree_max_hole, HOLE_SIZE)
+
+static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
+{
+ struct rb_node **link = &root->rb_node, *rb_parent = NULL;
+ u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
+ struct drm_mm_node *parent;
+
+ while (*link) {
+ rb_parent = *link;
+ parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
+ if (parent->subtree_max_hole < subtree_max_hole)
+ parent->subtree_max_hole = subtree_max_hole;
+ if (start < HOLE_ADDR(parent))
+ link = &parent->rb_hole_addr.rb_left;
+ else
+ link = &parent->rb_hole_addr.rb_right;
+ }
+
+ rb_link_node(&node->rb_hole_addr, rb_parent, link);
+ rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
+}
+
static void add_hole(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
node->hole_size =
__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
+ node->subtree_max_hole = node->hole_size;
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
insert_hole_size(&mm->holes_size, node);
- RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
+ insert_hole_addr(&mm->holes_addr, node);
list_add(&node->hole_stack, &mm->hole_stack);
}
@@ -275,8 +287,10 @@ static void rm_hole(struct drm_mm_node *node)
list_del(&node->hole_stack);
rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
- rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
+ rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
+ &augment_callbacks);
node->hole_size = 0;
+ node->subtree_max_hole = 0;
DRM_MM_BUG_ON(drm_mm_hole_follows(node));
}
@@ -361,9 +375,90 @@ first_hole(struct drm_mm *mm,
}
}
+/**
+ * next_hole_high_addr - returns next hole for a DRM_MM_INSERT_HIGH mode request
+ * @entry: previously selected drm_mm_node
+ * @size: size of the a hole needed for the request
+ *
+ * This function will verify whether left subtree of @entry has hole big enough
+ * to fit the requtested size. If so, it will return previous node of @entry or
+ * else it will return parent node of @entry
+ *
+ * It will also skip the complete left subtree if subtree_max_hole of that
+ * subtree is same as the subtree_max_hole of the @entry.
+ *
+ * Returns:
+ * previous node of @entry if left subtree of @entry can serve the request or
+ * else return parent of @entry
+ */
+static struct drm_mm_node *
+next_hole_high_addr(struct drm_mm_node *entry, u64 size)
+{
+ struct rb_node *rb_node, *left_rb_node, *parent_rb_node;
+ struct drm_mm_node *left_node;
+
+ if (!entry)
+ return NULL;
+
+ rb_node = &entry->rb_hole_addr;
+ if (rb_node->rb_left) {
+ left_rb_node = rb_node->rb_left;
+ parent_rb_node = rb_parent(rb_node);
+ left_node = rb_entry(left_rb_node,
+ struct drm_mm_node, rb_hole_addr);
+ if ((left_node->subtree_max_hole < size ||
+ entry->size == entry->subtree_max_hole) &&
+ parent_rb_node && parent_rb_node->rb_left != rb_node)
+ return rb_hole_addr_to_node(parent_rb_node);
+ }
+
+ return rb_hole_addr_to_node(rb_prev(rb_node));
+}
+
+/**
+ * next_hole_low_addr - returns next hole for a DRM_MM_INSERT_LOW mode request
+ * @entry: previously selected drm_mm_node
+ * @size: size of the a hole needed for the request
+ *
+ * This function will verify whether right subtree of @entry has hole big enough
+ * to fit the requtested size. If so, it will return next node of @entry or
+ * else it will return parent node of @entry
+ *
+ * It will also skip the complete right subtree if subtree_max_hole of that
+ * subtree is same as the subtree_max_hole of the @entry.
+ *
+ * Returns:
+ * next node of @entry if right subtree of @entry can serve the request or
+ * else return parent of @entry
+ */
+static struct drm_mm_node *
+next_hole_low_addr(struct drm_mm_node *entry, u64 size)
+{
+ struct rb_node *rb_node, *right_rb_node, *parent_rb_node;
+ struct drm_mm_node *right_node;
+
+ if (!entry)
+ return NULL;
+
+ rb_node = &entry->rb_hole_addr;
+ if (rb_node->rb_right) {
+ right_rb_node = rb_node->rb_right;
+ parent_rb_node = rb_parent(rb_node);
+ right_node = rb_entry(right_rb_node,
+ struct drm_mm_node, rb_hole_addr);
+ if ((right_node->subtree_max_hole < size ||
+ entry->size == entry->subtree_max_hole) &&
+ parent_rb_node && parent_rb_node->rb_right != rb_node)
+ return rb_hole_addr_to_node(parent_rb_node);
+ }
+
+ return rb_hole_addr_to_node(rb_next(rb_node));
+}
+
static struct drm_mm_node *
next_hole(struct drm_mm *mm,
struct drm_mm_node *node,
+ u64 size,
enum drm_mm_insert_mode mode)
{
switch (mode) {
@@ -372,10 +467,10 @@ next_hole(struct drm_mm *mm,
return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
case DRM_MM_INSERT_LOW:
- return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
+ return next_hole_low_addr(node, size);
case DRM_MM_INSERT_HIGH:
- return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
+ return next_hole_high_addr(node, size);
case DRM_MM_INSERT_EVICT:
node = list_next_entry(node, hole_stack);
@@ -489,7 +584,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
for (hole = first_hole(mm, range_start, range_end, size, mode);
hole;
- hole = once ? NULL : next_hole(mm, hole, mode)) {
+ hole = once ? NULL : next_hole(mm, hole, size, mode)) {
u64 hole_start = __drm_mm_hole_node_start(hole);
u64 hole_end = hole_start + hole->hole_size;
u64 adj_start, adj_end;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 08e6eff6a179..5761f838a057 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -25,6 +25,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_print.h>
#include <linux/dma-resv.h>
@@ -373,8 +374,14 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return 0;
}
+static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
+{
+ drm_mode_config_cleanup(dev);
+}
+
/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
+ * drmm_mode_config_init - managed DRM mode_configuration structure
+ * initialization
* @dev: DRM device
*
* Initialize @dev's mode_config structure, used for tracking the graphics
@@ -384,8 +391,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
* problem, since this should happen single threaded at init time. It is the
* driver's problem to ensure this guarantee.
*
+ * Cleanup is automatically handled through registering drm_mode_config_cleanup
+ * with drmm_add_action().
+ *
+ * Returns: 0 on success, negative error value on failure.
*/
-void drm_mode_config_init(struct drm_device *dev)
+int drmm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
@@ -443,8 +454,11 @@ void drm_mode_config_init(struct drm_device *dev)
drm_modeset_acquire_fini(&modeset_ctx);
dma_resv_fini(&resv);
}
+
+ return drmm_add_action_or_reset(dev, drm_mode_config_init_release,
+ NULL);
}
-EXPORT_SYMBOL(drm_mode_config_init);
+EXPORT_SYMBOL(drmm_mode_config_init);
/**
* drm_mode_config_cleanup - free up DRM mode_config info
@@ -456,6 +470,9 @@ EXPORT_SYMBOL(drm_mode_config_init);
* Note that since this /should/ happen single-threaded at driver/device
* teardown time, no locking is required. It's the driver's job to ensure that
* this guarantee actually holds true.
+ *
+ * FIXME: With the managed drmm_mode_config_init() it is no longer necessary for
+ * drivers to explicitly call this function.
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
@@ -532,3 +549,90 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+static u32 full_encoder_mask(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ u32 encoder_mask = 0;
+
+ drm_for_each_encoder(encoder, dev)
+ encoder_mask |= drm_encoder_mask(encoder);
+
+ return encoder_mask;
+}
+
+/*
+ * For some reason we want the encoder itself included in
+ * possible_clones. Make life easy for drivers by allowing them
+ * to leave possible_clones unset if no cloning is possible.
+ */
+static void fixup_encoder_possible_clones(struct drm_encoder *encoder)
+{
+ if (encoder->possible_clones == 0)
+ encoder->possible_clones = drm_encoder_mask(encoder);
+}
+
+static void validate_encoder_possible_clones(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ u32 encoder_mask = full_encoder_mask(dev);
+ struct drm_encoder *other;
+
+ drm_for_each_encoder(other, dev) {
+ WARN(!!(encoder->possible_clones & drm_encoder_mask(other)) !=
+ !!(other->possible_clones & drm_encoder_mask(encoder)),
+ "possible_clones mismatch: "
+ "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x vs. "
+ "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x\n",
+ encoder->base.id, encoder->name,
+ drm_encoder_mask(encoder), encoder->possible_clones,
+ other->base.id, other->name,
+ drm_encoder_mask(other), other->possible_clones);
+ }
+
+ WARN((encoder->possible_clones & drm_encoder_mask(encoder)) == 0 ||
+ (encoder->possible_clones & ~encoder_mask) != 0,
+ "Bogus possible_clones: "
+ "[ENCODER:%d:%s] possible_clones=0x%x (full encoder mask=0x%x)\n",
+ encoder->base.id, encoder->name,
+ encoder->possible_clones, encoder_mask);
+}
+
+static u32 full_crtc_mask(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ u32 crtc_mask = 0;
+
+ drm_for_each_crtc(crtc, dev)
+ crtc_mask |= drm_crtc_mask(crtc);
+
+ return crtc_mask;
+}
+
+static void validate_encoder_possible_crtcs(struct drm_encoder *encoder)
+{
+ u32 crtc_mask = full_crtc_mask(encoder->dev);
+
+ WARN((encoder->possible_crtcs & crtc_mask) == 0 ||
+ (encoder->possible_crtcs & ~crtc_mask) != 0,
+ "Bogus possible_crtcs: "
+ "[ENCODER:%d:%s] possible_crtcs=0x%x (full crtc mask=0x%x)\n",
+ encoder->base.id, encoder->name,
+ encoder->possible_crtcs, crtc_mask);
+}
+
+void drm_mode_config_validate(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ drm_for_each_encoder(encoder, dev)
+ fixup_encoder_possible_clones(encoder);
+
+ drm_for_each_encoder(encoder, dev) {
+ validate_encoder_possible_clones(encoder);
+ validate_encoder_possible_crtcs(encoder);
+ }
+}
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index 35c2719407a8..901b078abf40 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -402,12 +402,13 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
{
struct drm_mode_obj_get_properties *arg = data;
struct drm_mode_object *obj;
+ struct drm_modeset_acquire_ctx ctx;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
- drm_modeset_lock_all(dev);
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
if (!obj) {
@@ -427,7 +428,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
out_unref:
drm_mode_object_put(obj);
out:
- drm_modeset_unlock_all(dev);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
return ret;
}
@@ -449,12 +450,13 @@ static int set_property_legacy(struct drm_mode_object *obj,
{
struct drm_device *dev = prop->dev;
struct drm_mode_object *ref;
+ struct drm_modeset_acquire_ctx ctx;
int ret = -EINVAL;
if (!drm_property_change_valid_get(prop, prop_value, &ref))
return -EINVAL;
- drm_modeset_lock_all(dev);
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR:
ret = drm_connector_set_obj_prop(obj, prop, prop_value);
@@ -468,7 +470,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
break;
}
drm_property_change_valid_put(prop, ref);
- drm_modeset_unlock_all(dev);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index d4d64518e11b..fec1c33b3045 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -748,32 +748,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
EXPORT_SYMBOL(drm_mode_set_name);
/**
- * drm_mode_hsync - get the hsync of a mode
- * @mode: mode
- *
- * Returns:
- * @modes's hsync rate in kHz, rounded to the nearest integer. Calculates the
- * value first if it is not yet set.
- */
-int drm_mode_hsync(const struct drm_display_mode *mode)
-{
- unsigned int calc_val;
-
- if (mode->hsync)
- return mode->hsync;
-
- if (mode->htotal <= 0)
- return 0;
-
- calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
- calc_val += 500; /* round to 1000Hz */
- calc_val /= 1000; /* truncate to kHz */
-
- return calc_val;
-}
-EXPORT_SYMBOL(drm_mode_hsync);
-
-/**
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
*
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 81aa21561982..75e2b7053f35 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -30,12 +30,13 @@
#include <drm/drm.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_drv.h>
-#include <drm/drm_pci.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
#include "drm_legacy.h"
+#ifdef CONFIG_DRM_LEGACY
+
/**
* drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
* @dev: DRM device
@@ -93,6 +94,7 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
}
EXPORT_SYMBOL(drm_pci_free);
+#endif
static int drm_get_pci_domain(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index d6ad60ab0d38..4af173ced327 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -289,6 +289,8 @@ EXPORT_SYMBOL(drm_universal_plane_init);
int drm_plane_register_all(struct drm_device *dev)
{
+ unsigned int num_planes = 0;
+ unsigned int num_zpos = 0;
struct drm_plane *plane;
int ret = 0;
@@ -297,8 +299,15 @@ int drm_plane_register_all(struct drm_device *dev)
ret = plane->funcs->late_register(plane);
if (ret)
return ret;
+
+ if (plane->zpos_property)
+ num_zpos++;
+ num_planes++;
}
+ drm_WARN(dev, num_zpos && num_planes != num_zpos,
+ "Mixing planes with and without zpos property is invalid\n");
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index ca520028b2cb..f4e6184d1877 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -43,15 +43,6 @@
#define DEBUG_SCATTER 0
-static inline void *drm_vmalloc_dma(unsigned long size)
-{
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL));
-#else
- return vmalloc_32(size);
-#endif
-}
-
static void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
@@ -126,7 +117,7 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
return -ENOMEM;
}
- entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
+ entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
if (!entry->virtual) {
kfree(entry->busaddr);
kfree(entry->pagelist);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 939f0032aab1..f0336c804639 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -291,9 +291,6 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
return PTR_ERR(connector->kdev);
}
- /* Let userspace know we have a new connector */
- drm_sysfs_hotplug_event(dev);
-
if (connector->ddc)
return sysfs_create_link(&connector->kdev->kobj,
&connector->ddc->dev.kobj, "ddc");
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index da7b0b0c1090..2d5ce690d214 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -40,6 +41,69 @@
/**
* DOC: vblank handling
*
+ * From the computer's perspective, every time the monitor displays
+ * a new frame the scanout engine has "scanned out" the display image
+ * from top to bottom, one row of pixels at a time. The current row
+ * of pixels is referred to as the current scanline.
+ *
+ * In addition to the display's visible area, there's usually a couple of
+ * extra scanlines which aren't actually displayed on the screen.
+ * These extra scanlines don't contain image data and are occasionally used
+ * for features like audio and infoframes. The region made up of these
+ * scanlines is referred to as the vertical blanking region, or vblank for
+ * short.
+ *
+ * For historical reference, the vertical blanking period was designed to
+ * give the electron gun (on CRTs) enough time to move back to the top of
+ * the screen to start scanning out the next frame. Similar for horizontal
+ * blanking periods. They were designed to give the electron gun enough
+ * time to move back to the other side of the screen to start scanning the
+ * next scanline.
+ *
+ * ::
+ *
+ *
+ * physical → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽
+ * top of | |
+ * display | |
+ * | New frame |
+ * | |
+ * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓|
+ * |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| ← Scanline,
+ * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| updates the
+ * | | frame as it
+ * | | travels down
+ * | | ("sacn out")
+ * | Old frame |
+ * | |
+ * | |
+ * | |
+ * | | physical
+ * | | bottom of
+ * vertical |⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽| ← display
+ * blanking ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ * region → ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ * ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ * start of → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽
+ * new frame
+ *
+ * "Physical top of display" is the reference point for the high-precision/
+ * corrected timestamp.
+ *
+ * On a lot of display hardware, programming needs to take effect during the
+ * vertical blanking period so that settings like gamma, the image buffer
+ * buffer to be scanned out, etc. can safely be changed without showing
+ * any visual artifacts on the screen. In some unforgiving hardware, some of
+ * this programming has to both start and end in the same vblank. To help
+ * with the timing of the hardware programming, an interrupt is usually
+ * available to notify the driver when it can start the updating of registers.
+ * The interrupt is in this context named the vblank interrupt.
+ *
+ * The vblank interrupt may be fired at different points depending on the
+ * hardware. Some hardware implementations will fire the interrupt when the
+ * new frame start, other implementations will fire the interrupt at different
+ * points in time.
+ *
* Vertical blanking plays a major role in graphics rendering. To achieve
* tear-free display, users must synchronize page flips and/or rendering to
* vertical blanking. The DRM API offers ioctls to perform page flips
@@ -278,8 +342,8 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%llu, diff=%u, hw=%u hw_last=%u\n",
- pipe, atomic64_read(&vblank->count), diff,
- cur_vblank, vblank->last);
+ pipe, (unsigned long long)atomic64_read(&vblank->count),
+ diff, cur_vblank, vblank->last);
if (diff == 0) {
WARN_ON_ONCE(cur_vblank != vblank->last);
@@ -425,14 +489,10 @@ static void vblank_disable_fn(struct timer_list *t)
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-void drm_vblank_cleanup(struct drm_device *dev)
+static void drm_vblank_init_release(struct drm_device *dev, void *ptr)
{
unsigned int pipe;
- /* Bail if the driver didn't call drm_vblank_init() */
- if (dev->num_crtcs == 0)
- return;
-
for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -441,10 +501,6 @@ void drm_vblank_cleanup(struct drm_device *dev)
del_timer_sync(&vblank->disable_timer);
}
-
- kfree(dev->vblank);
-
- dev->num_crtcs = 0;
}
/**
@@ -453,25 +509,29 @@ void drm_vblank_cleanup(struct drm_device *dev)
* @num_crtcs: number of CRTCs supported by @dev
*
* This function initializes vblank support for @num_crtcs display pipelines.
- * Cleanup is handled by the DRM core, or through calling drm_dev_fini() for
- * drivers with a &drm_driver.release callback.
+ * Cleanup is handled automatically through a cleanup function added with
+ * drmm_add_action().
*
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
{
- int ret = -ENOMEM;
+ int ret;
unsigned int i;
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->vblank_time_lock);
+ dev->vblank = drmm_kcalloc(dev, num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+ if (!dev->vblank)
+ return -ENOMEM;
+
dev->num_crtcs = num_crtcs;
- dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
- if (!dev->vblank)
- goto err;
+ ret = drmm_add_action(dev, drm_vblank_init_release, NULL);
+ if (ret)
+ return ret;
for (i = 0; i < num_crtcs; i++) {
struct drm_vblank_crtc *vblank = &dev->vblank[i];
@@ -486,10 +546,6 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
return 0;
-
-err:
- dev->num_crtcs = 0;
- return ret;
}
EXPORT_SYMBOL(drm_vblank_init);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index aa88911bbc06..4391e242356d 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -37,6 +37,7 @@
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#if defined(__ia64__)
#include <linux/efi.h>
@@ -44,7 +45,6 @@
#endif
#include <linux/mem_encrypt.h>
-#include <asm/pgtable.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
@@ -595,8 +595,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_ops;
break;
}
+ fallthrough; /* to _DRM_FRAME_BUFFER... */
#endif
- /* fall through - to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
offset = drm_core_get_reg_ofs(dev);
@@ -621,7 +621,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
- /* fall through - to _DRM_SHM */
+ fallthrough; /* to _DRM_SHM */
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
diff --git a/drivers/gpu/drm/drm_vram_helper_common.c b/drivers/gpu/drm/drm_vram_helper_common.c
deleted file mode 100644
index 2000d9b33fd5..000000000000
--- a/drivers/gpu/drm/drm_vram_helper_common.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <linux/module.h>
-
-/**
- * DOC: overview
- *
- * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
- * buffer object that is backed by video RAM. It can be used for
- * framebuffer devices with dedicated memory. The video RAM is managed
- * by &struct drm_vram_mm (VRAM MM).
- *
- * With the GEM interface userspace applications create, manage and destroy
- * graphics buffers, such as an on-screen framebuffer. GEM does not provide
- * an implementation of these interfaces. It's up to the DRM driver to
- * provide an implementation that suits the hardware. If the hardware device
- * contains dedicated video memory, the DRM driver can use the VRAM helper
- * library. Each active buffer object is stored in video RAM. Active
- * buffer are used for drawing the current frame, typically something like
- * the frame's scanout buffer or the cursor image. If there's no more space
- * left in VRAM, inactive GEM objects can be moved to system memory.
- *
- * The easiest way to use the VRAM helper library is to call
- * drm_vram_helper_alloc_mm(). The function allocates and initializes an
- * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use
- * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and
- * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations;
- * as illustrated below.
- *
- * .. code-block:: c
- *
- * struct file_operations fops ={
- * .owner = THIS_MODULE,
- * DRM_VRAM_MM_FILE_OPERATION
- * };
- * struct drm_driver drv = {
- * .driver_feature = DRM_ ... ,
- * .fops = &fops,
- * DRM_GEM_VRAM_DRIVER
- * };
- *
- * int init_drm_driver()
- * {
- * struct drm_device *dev;
- * uint64_t vram_base;
- * unsigned long vram_size;
- * int ret;
- *
- * // setup device, vram base and size
- * // ...
- *
- * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
- * if (ret)
- * return ret;
- * return 0;
- * }
- *
- * This creates an instance of &struct drm_vram_mm, exports DRM userspace
- * interfaces for GEM buffer management and initializes file operations to
- * allow for accessing created GEM buffers. With this setup, the DRM driver
- * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
- * to userspace.
- *
- * To clean up the VRAM memory management, call drm_vram_helper_release_mm()
- * in the driver's clean-up code.
- *
- * .. code-block:: c
- *
- * void fini_drm_driver()
- * {
- * struct drm_device *dev = ...;
- *
- * drm_vram_helper_release_mm(dev);
- * }
- *
- * For drawing or scanout operations, buffer object have to be pinned in video
- * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
- * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
- * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
- *
- * A buffer object that is pinned in video RAM has a fixed address within that
- * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
- * it's used to program the hardware's scanout engine for framebuffers, set
- * the cursor overlay's image for a mouse cursor, or use it as input to the
- * hardware's draing engine.
- *
- * To access a buffer object's memory from the DRM driver, call
- * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address
- * space and returns the memory address. Use drm_gem_vram_kunmap() to
- * release the mapping.
- */
-
-MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index a8685b2e1803..f9afe11c50f0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -231,21 +231,11 @@ static struct drm_info_list etnaviv_debugfs_list[] = {
{"ring", show_each_gpu, 0, etnaviv_ring_show},
};
-static int etnaviv_debugfs_init(struct drm_minor *minor)
+static void etnaviv_debugfs_init(struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
- int ret;
-
- ret = drm_debugfs_create_files(etnaviv_debugfs_list,
- ARRAY_SIZE(etnaviv_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
- return ret;
- }
-
- return ret;
+ drm_debugfs_create_files(etnaviv_debugfs_list,
+ ARRAY_SIZE(etnaviv_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
@@ -736,7 +726,7 @@ static void __exit etnaviv_exit(void)
module_exit(etnaviv_exit);
MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
-MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
MODULE_DESCRIPTION("etnaviv DRM Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 648cf0207309..706af0304ca4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -154,8 +154,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
file_size += sizeof(*iter.hdr) * n_obj;
/* Allocate the file in vmalloc memory, it's likely to be big */
- iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
- PAGE_KERNEL);
+ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
+ __GFP_NORETRY);
if (!iter.start) {
mutex_unlock(&gpu->mmu_context->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index dc9ef302f517..701f3995f621 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -661,7 +661,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
- might_lock_read(&current->mm->mmap_sem);
+ might_lock_read(&current->mm->mmap_lock);
if (userptr->mm != current->mm)
return -EPERM;
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 5ee090691390..9ac51b6ab34b 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -25,6 +25,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
@@ -135,10 +136,6 @@ static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
.disable = exynos_dp_nop,
};
-static const struct drm_encoder_funcs exynos_dp_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
{
int ret;
@@ -167,8 +164,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 43fa0f26c052..7ba5354e7d94 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -14,6 +14,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
@@ -149,10 +150,6 @@ static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
.disable = exynos_dpi_disable,
};
-static const struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
enum {
FIMD_PORT_IN0,
FIMD_PORT_IN1,
@@ -201,8 +198,7 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
{
int ret;
- drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 57defeb44522..dbd80f1e4c78 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -76,7 +76,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
}
static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
- .fault = exynos_drm_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index e080aa92338c..ee96a95fb6be 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -30,6 +30,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
@@ -211,7 +212,7 @@
#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
-static char *clk_names[5] = { "bus_clk", "sclk_mipi",
+static const char *const clk_names[5] = { "bus_clk", "sclk_mipi",
"phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0",
"sclk_rgb_vclk_to_dsim0" };
@@ -1523,10 +1524,6 @@ static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
.disable = exynos_dsi_disable,
};
-static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
@@ -1704,8 +1701,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
struct drm_bridge *in_bridge;
int ret;
- drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
@@ -1763,10 +1759,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->dev = dev;
dsi->driver_data = of_device_get_match_data(dev);
- ret = exynos_dsi_parse_dt(dsi);
- if (ret)
- return ret;
-
dsi->supplies[0].supply = "vddcore";
dsi->supplies[1].supply = "vddio";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
@@ -1813,10 +1805,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
}
dsi->irq = platform_get_irq(pdev, 0);
- if (dsi->irq < 0) {
- dev_err(dev, "failed to request dsi irq resource\n");
+ if (dsi->irq < 0)
return dsi->irq;
- }
irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
@@ -1827,11 +1817,25 @@ static int exynos_dsi_probe(struct platform_device *pdev)
return ret;
}
+ ret = exynos_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, &dsi->encoder);
pm_runtime_enable(dev);
- return component_add(dev, &exynos_dsi_component_ops);
+ ret = component_add(dev, &exynos_dsi_component_ops);
+ if (ret)
+ goto err_disable_runtime;
+
+ return 0;
+
+err_disable_runtime:
+ pm_runtime_disable(dev);
+ of_node_put(dsi->in_bridge_node);
+
+ return ret;
}
static int exynos_dsi_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e6ceaf36fb04..56a2b47e1af7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -76,7 +76,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
struct fb_info *fbi;
struct drm_framebuffer *fb = helper->fb;
unsigned int size = fb->width * fb->height * fb->format->cpp[0];
- unsigned int nr_pages;
unsigned long offset;
fbi = drm_fb_helper_alloc_fbi(helper);
@@ -90,16 +89,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(fbi, helper, sizes);
- nr_pages = exynos_gem->size >> PAGE_SHIFT;
-
- exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
- if (!exynos_gem->kvaddr) {
- DRM_DEV_ERROR(to_dma_dev(helper->dev),
- "failed to map pages to kernel space.\n");
- return -EIO;
- }
-
offset = fbi->var.xoffset * fb->format->cpp[0];
offset += fbi->var.yoffset * fb->pitches[0];
@@ -133,18 +122,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
- exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
- /*
- * If physically contiguous memory allocation fails and if IOMMU is
- * supported then try to get buffer from non physically contiguous
- * memory area.
- */
- if (IS_ERR(exynos_gem) && is_drm_iommu_supported(dev)) {
- dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
- exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
- size);
- }
-
+ exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_WC, size, true);
if (IS_ERR(exynos_gem))
return PTR_ERR(exynos_gem);
@@ -229,12 +207,8 @@ err_init:
static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct drm_fb_helper *fb_helper)
{
- struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
- struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
struct drm_framebuffer *fb;
- vunmap(exynos_gem->kvaddr);
-
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d734d9d51762..0df57ee34144 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -17,28 +17,23 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
-static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
+static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
{
struct drm_device *dev = exynos_gem->base.dev;
- unsigned long attr;
- unsigned int nr_pages;
- struct sg_table sgt;
- int ret = -ENOMEM;
+ unsigned long attr = 0;
if (exynos_gem->dma_addr) {
DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
return 0;
}
- exynos_gem->dma_attrs = 0;
-
/*
* if EXYNOS_BO_CONTIG, fully physically contiguous memory
* region will be allocated else physically contiguous
* as possible.
*/
if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
- exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
+ attr |= DMA_ATTR_FORCE_CONTIGUOUS;
/*
* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
@@ -46,61 +41,29 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
*/
if (exynos_gem->flags & EXYNOS_BO_WC ||
!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
- attr = DMA_ATTR_WRITE_COMBINE;
+ attr |= DMA_ATTR_WRITE_COMBINE;
else
- attr = DMA_ATTR_NON_CONSISTENT;
-
- exynos_gem->dma_attrs |= attr;
- exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+ attr |= DMA_ATTR_NON_CONSISTENT;
- nr_pages = exynos_gem->size >> PAGE_SHIFT;
-
- exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
- GFP_KERNEL | __GFP_ZERO);
- if (!exynos_gem->pages) {
- DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
- return -ENOMEM;
- }
+ /* FBDev emulation requires kernel mapping */
+ if (!kvmap)
+ attr |= DMA_ATTR_NO_KERNEL_MAPPING;
+ exynos_gem->dma_attrs = attr;
exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
&exynos_gem->dma_addr, GFP_KERNEL,
exynos_gem->dma_attrs);
if (!exynos_gem->cookie) {
DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
- goto err_free;
- }
-
- ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
- exynos_gem->dma_addr, exynos_gem->size,
- exynos_gem->dma_attrs);
- if (ret < 0) {
- DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
- goto err_dma_free;
- }
-
- if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
- nr_pages)) {
- DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
- ret = -EINVAL;
- goto err_sgt_free;
+ return -ENOMEM;
}
- sg_free_table(&sgt);
+ if (kvmap)
+ exynos_gem->kvaddr = exynos_gem->cookie;
DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
-
return 0;
-
-err_sgt_free:
- sg_free_table(&sgt);
-err_dma_free:
- dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
- exynos_gem->dma_addr, exynos_gem->dma_attrs);
-err_free:
- kvfree(exynos_gem->pages);
-
- return ret;
}
static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
@@ -118,8 +81,6 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
(dma_addr_t)exynos_gem->dma_addr,
exynos_gem->dma_attrs);
-
- kvfree(exynos_gem->pages);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -203,7 +164,8 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
- unsigned long size)
+ unsigned long size,
+ bool kvmap)
{
struct exynos_drm_gem *exynos_gem;
int ret;
@@ -237,7 +199,7 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */
exynos_gem->flags = flags;
- ret = exynos_drm_alloc_buf(exynos_gem);
+ ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
if (ret < 0) {
drm_gem_object_release(&exynos_gem->base);
kfree(exynos_gem);
@@ -254,7 +216,7 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
struct exynos_drm_gem *exynos_gem;
int ret;
- exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
+ exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
if (IS_ERR(exynos_gem))
return PTR_ERR(exynos_gem);
@@ -365,7 +327,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
else
flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
- exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
+ exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
if (IS_ERR(exynos_gem)) {
dev_warn(dev->dev, "FB allocation failed.\n");
return PTR_ERR(exynos_gem);
@@ -381,26 +343,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
return 0;
}
-vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
- unsigned long pfn;
- pgoff_t page_offset;
-
- page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
-
- if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
- DRM_ERROR("invalid page offset\n");
- return VM_FAULT_SIGBUS;
- }
-
- pfn = page_to_pfn(exynos_gem->pages[page_offset]);
- return vmf_insert_mixed(vma, vmf->address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
-}
-
static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
@@ -462,11 +404,24 @@ struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
- int npages;
+ struct drm_device *drm_dev = obj->dev;
+ struct sg_table *sgt;
+ int ret;
- npages = exynos_gem->size >> PAGE_SHIFT;
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
- return drm_prime_pages_to_sg(exynos_gem->pages, npages);
+ ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
+ exynos_gem->dma_addr, exynos_gem->size,
+ exynos_gem->dma_attrs);
+ if (ret) {
+ DRM_ERROR("failed to get sgtable, %d\n", ret);
+ kfree(sgt);
+ return ERR_PTR(ret);
+ }
+
+ return sgt;
}
struct drm_gem_object *
@@ -475,52 +430,47 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct exynos_drm_gem *exynos_gem;
- int npages;
- int ret;
-
- exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
- if (IS_ERR(exynos_gem)) {
- ret = PTR_ERR(exynos_gem);
- return ERR_PTR(ret);
- }
- exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
+ if (sgt->nents < 1)
+ return ERR_PTR(-EINVAL);
- npages = exynos_gem->size >> PAGE_SHIFT;
- exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!exynos_gem->pages) {
- ret = -ENOMEM;
- goto err;
+ /*
+ * Check if the provided buffer has been mapped as contiguous
+ * into DMA address space.
+ */
+ if (sgt->nents > 1) {
+ dma_addr_t next_addr = sg_dma_address(sgt->sgl);
+ struct scatterlist *s;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ if (!sg_dma_len(s))
+ break;
+ if (sg_dma_address(s) != next_addr) {
+ DRM_ERROR("buffer chunks must be mapped contiguously");
+ return ERR_PTR(-EINVAL);
+ }
+ next_addr = sg_dma_address(s) + sg_dma_len(s);
+ }
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
- npages);
- if (ret < 0)
- goto err_free_large;
-
- exynos_gem->sgt = sgt;
+ exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
+ if (IS_ERR(exynos_gem))
+ return ERR_CAST(exynos_gem);
- if (sgt->nents == 1) {
- /* always physically continuous memory if sgt->nents is 1. */
- exynos_gem->flags |= EXYNOS_BO_CONTIG;
- } else {
- /*
- * this case could be CONTIG or NONCONTIG type but for now
- * sets NONCONTIG.
- * TODO. we have to find a way that exporter can notify
- * the type of its own buffer to importer.
- */
+ /*
+ * Buffer has been mapped as contiguous into DMA address space,
+ * but if there is IOMMU, it can be either CONTIG or NONCONTIG.
+ * We assume a simplified logic below:
+ */
+ if (is_drm_iommu_supported(dev))
exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
- }
+ else
+ exynos_gem->flags |= EXYNOS_BO_CONTIG;
+ exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
+ exynos_gem->sgt = sgt;
return &exynos_gem->base;
-
-err_free_large:
- kvfree(exynos_gem->pages);
-err:
- drm_gem_object_release(&exynos_gem->base);
- kfree(exynos_gem);
- return ERR_PTR(ret);
}
void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 42ec67bc262d..6ef001f890aa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -21,20 +21,15 @@
* @base: a gem object.
* - a new handle to this gem object would be created
* by drm_gem_handle_create().
- * @buffer: a pointer to exynos_drm_gem_buffer object.
- * - contain the information to memory region allocated
- * by user request or at framebuffer creation.
- * continuous memory region allocated by user request
- * or at framebuffer creation.
* @flags: indicate memory type to allocated buffer and cache attruibute.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
* @cookie: cookie returned by dma_alloc_attrs
- * @kvaddr: kernel virtual address to allocated memory region.
+ * @kvaddr: kernel virtual address to allocated memory region (for fbdev)
* @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and
* device address with IOMMU.
- * @pages: Array of backing pages.
+ * @dma_attrs: attrs passed dma mapping framework
* @sgt: Imported sg_table.
*
* P.S. this object would be transferred to user as kms_bo.handle so
@@ -48,7 +43,6 @@ struct exynos_drm_gem {
void __iomem *kvaddr;
dma_addr_t dma_addr;
unsigned long dma_attrs;
- struct page **pages;
struct sg_table *sgt;
};
@@ -58,7 +52,8 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem);
/* create a new buffer with gem object */
struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
- unsigned long size);
+ unsigned long size,
+ bool kvmap);
/*
* request gem object creation and buffer allocation as the size
@@ -101,9 +96,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-/* page fault handler and mmap fault address(virtual) to physical memory. */
-vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf);
-
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index f41d75923557..a86abc173605 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -88,7 +88,7 @@
#define MIC_BS_SIZE_2D(x) ((x) & 0x3fff)
-static char *clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
+static const char *const clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
#define NUM_CLKS ARRAY_SIZE(clk_names)
static DEFINE_MUTEX(mic_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index dafa87b82052..2d94afba031e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -293,10 +293,8 @@ static int rotator_probe(struct platform_device *pdev)
return PTR_ERR(rot->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to get irq\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, rotator_irq_handler, 0, dev_name(dev),
rot);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 93c43c8d914e..ce1857138f89 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -502,10 +502,8 @@ static int scaler_probe(struct platform_device *pdev)
return PTR_ERR(scaler->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to get irq\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(dev, irq, NULL, scaler_irq_handler,
IRQF_ONESHOT, "drm_scaler", scaler);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index b320b3a21ad4..e5662bdcbbde 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
@@ -213,6 +214,12 @@ static ssize_t vidi_store_connection(struct device *dev,
static DEVICE_ATTR(connection, 0644, vidi_show_connection,
vidi_store_connection);
+static struct attribute *vidi_attrs[] = {
+ &dev_attr_connection.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vidi);
+
int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file_priv)
{
@@ -369,10 +376,6 @@ static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs =
.disable = exynos_vidi_disable,
};
-static const struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int vidi_bind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
@@ -406,8 +409,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(ctx->crtc);
}
- drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
@@ -443,7 +445,6 @@ static int vidi_probe(struct platform_device *pdev)
{
struct vidi_context *ctx;
struct device *dev = &pdev->dev;
- int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -457,23 +458,7 @@ static int vidi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- ret = device_create_file(dev, &dev_attr_connection);
- if (ret < 0) {
- DRM_DEV_ERROR(dev,
- "failed to create connection sysfs.\n");
- return ret;
- }
-
- ret = component_add(dev, &vidi_component_ops);
- if (ret)
- goto err_remove_file;
-
- return ret;
-
-err_remove_file:
- device_remove_file(dev, &dev_attr_connection);
-
- return ret;
+ return component_add(dev, &vidi_component_ops);
}
static int vidi_remove(struct platform_device *pdev)
@@ -498,5 +483,6 @@ struct platform_driver vidi_driver = {
.driver = {
.name = "exynos-drm-vidi",
.owner = THIS_MODULE,
+ .dev_groups = vidi_groups,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 1a7c828fc41d..95dd399aa9cc 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -38,6 +38,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "exynos_drm_crtc.h"
#include "regs-hdmi.h"
@@ -1559,10 +1560,6 @@ static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs =
.disable = hdmi_disable,
};
-static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void hdmi_audio_shutdown(struct device *dev, void *data)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
@@ -1843,8 +1840,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
hdata->phy_clk.enable = hdmiphy_clk_enable;
- drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 21b726baedea..c7e2e2ebc327 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1244,9 +1244,11 @@ static int mixer_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
+ pm_runtime_enable(dev);
+
ret = component_add(&pdev->dev, &mixer_component_ops);
- if (!ret)
- pm_runtime_enable(dev);
+ if (ret)
+ pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index cff344367f81..9b0c4736c21a 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -13,19 +13,11 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "fsl_dcu_drm_drv.h"
#include "fsl_tcon.h"
-static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = fsl_dcu_drm_encoder_destroy,
-};
-
int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
struct drm_crtc *crtc)
{
@@ -38,8 +30,8 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
if (fsl_dev->tcon)
fsl_tcon_bypass_enable(fsl_dev->tcon);
- ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ ret = drm_simple_encoder_init(fsl_dev->drm, encoder,
+ DRM_MODE_ENCODER_LVDS);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 29c36d63b20e..88535f5aacc5 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -28,6 +28,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "cdv_device.h"
#include "intel_bios.h"
#include "power.h"
@@ -237,15 +239,6 @@ static const struct drm_connector_helper_funcs
.best_encoder = gma_best_encoder,
};
-static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
- .destroy = cdv_intel_crt_enc_destroy,
-};
-
void cdv_intel_crt_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
@@ -271,8 +264,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
&cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
encoder = &gma_encoder->base;
- drm_encoder_init(dev, encoder,
- &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
gma_connector_attach_encoder(gma_connector, gma_encoder);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 5772b2dce0d6..f41cbb753bb4 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -32,6 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "gma_display.h"
#include "psb_drv.h"
@@ -1271,37 +1272,8 @@ cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZ
return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
-
-#if 0
-static char *voltage_names[] = {
- "0.4V", "0.6V", "0.8V", "1.2V"
-};
-static char *pre_emph_names[] = {
- "0dB", "3.5dB", "6dB", "9.5dB"
-};
-static char *link_train_names[] = {
- "pattern 1", "pattern 2", "idle", "off"
-};
-#endif
-
#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
-/*
-static uint8_t
-cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
-{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
- }
-}
-*/
+
static void
cdv_intel_get_adjust_train(struct gma_encoder *encoder)
{
@@ -1908,11 +1880,6 @@ cdv_intel_dp_destroy(struct drm_connector *connector)
kfree(connector);
}
-static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
.dpms = cdv_intel_dp_dpms,
.mode_fixup = cdv_intel_dp_mode_fixup,
@@ -1935,11 +1902,6 @@ static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_fun
.best_encoder = gma_best_encoder,
};
-static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
- .destroy = cdv_intel_dp_encoder_destroy,
-};
-
-
static void cdv_intel_dp_add_properties(struct drm_connector *connector)
{
cdv_intel_attach_force_audio_property(connector);
@@ -2016,8 +1978,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
encoder = &gma_encoder->base;
drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
- drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
@@ -2120,7 +2081,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
if (ret == 0) {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
- cdv_intel_dp_encoder_destroy(encoder);
+ drm_encoder_cleanup(encoder);
cdv_intel_dp_destroy(connector);
goto err_priv;
} else {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 1711a41acc16..0d12c6ffbc40 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -32,6 +32,7 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
#include "psb_drv.h"
@@ -311,8 +312,7 @@ void cdv_hdmi_init(struct drm_device *dev,
&cdv_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
- drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index ea0a5d9a0acc..eaaf4efec217 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -12,6 +12,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "cdv_device.h"
#include "intel_bios.h"
#include "power.h"
@@ -72,89 +74,6 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
return retval;
}
-#if 0
-/*
- * Set LVDS backlight level by I2C command
- */
-static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
- unsigned int level)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
- u8 out_buf[2];
- unsigned int blc_i2c_brightness;
-
- struct i2c_msg msgs[] = {
- {
- .addr = lvds_i2c_bus->slave_addr,
- .flags = 0,
- .len = 2,
- .buf = out_buf,
- }
- };
-
- blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
- BRIGHTNESS_MASK /
- BRIGHTNESS_MAX_LEVEL);
-
- if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
- blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
-
- out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
- out_buf[1] = (u8)blc_i2c_brightness;
-
- if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
- return 0;
-
- DRM_ERROR("I2C transfer error\n");
- return -1;
-}
-
-
-static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- u32 max_pwm_blc;
- u32 blc_pwm_duty_cycle;
-
- max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
-
- /*BLC_PWM_CTL Should be initiated while backlight device init*/
- BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
-
- blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
-
- if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
- blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
-
- blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
- REG_WRITE(BLC_PWM_CTL,
- (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
- (blc_pwm_duty_cycle));
-
- return 0;
-}
-
-/*
- * Set LVDS backlight level either by I2C or PWM
- */
-void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->lvds_bl) {
- DRM_ERROR("NO LVDS Backlight Info\n");
- return;
- }
-
- if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
- cdv_lvds_i2c_set_brightness(dev, level);
- else
- cdv_lvds_pwm_set_brightness(dev, level);
-}
-#endif
-
/**
* Sets the backlight level.
*
@@ -499,16 +418,6 @@ static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
.destroy = cdv_intel_lvds_destroy,
};
-
-static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
- .destroy = cdv_intel_lvds_enc_destroy,
-};
-
/*
* Enumerate the child dev array parsed from VBT to check whether
* the LVDS is present.
@@ -616,10 +525,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
&cdv_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, encoder,
- &cdv_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
-
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1d8f67e4795a..23a78d755382 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -577,31 +577,31 @@ static void psb_setup_outputs(struct drm_device *dev)
break;
case INTEL_OUTPUT_SDVO:
crtc_mask = dev_priv->ops->sdvo_mask;
- clone_mask = (1 << INTEL_OUTPUT_SDVO);
+ clone_mask = 0;
break;
case INTEL_OUTPUT_LVDS:
- crtc_mask = dev_priv->ops->lvds_mask;
- clone_mask = (1 << INTEL_OUTPUT_LVDS);
+ crtc_mask = dev_priv->ops->lvds_mask;
+ clone_mask = 0;
break;
case INTEL_OUTPUT_MIPI:
crtc_mask = (1 << 0);
- clone_mask = (1 << INTEL_OUTPUT_MIPI);
+ clone_mask = 0;
break;
case INTEL_OUTPUT_MIPI2:
crtc_mask = (1 << 2);
- clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+ clone_mask = 0;
break;
case INTEL_OUTPUT_HDMI:
- crtc_mask = dev_priv->ops->hdmi_mask;
+ crtc_mask = dev_priv->ops->hdmi_mask;
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
case INTEL_OUTPUT_DISPLAYPORT:
crtc_mask = (1 << 0) | (1 << 1);
- clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+ clone_mask = 0;
break;
case INTEL_OUTPUT_EDP:
crtc_mask = (1 << 1);
- clone_mask = (1 << INTEL_OUTPUT_EDP);
+ clone_mask = 0;
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones =
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d4c65f268922..c976a9dd9240 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -27,6 +27,8 @@
#include <linux/delay.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "mdfld_dsi_dpi.h"
#include "mdfld_dsi_pkg_sender.h"
#include "mdfld_output.h"
@@ -993,10 +995,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
/*create drm encoder object*/
connector = &dsi_connector->base.base;
encoder = &dpi_output->base.base.base;
- drm_encoder_init(dev,
- encoder,
- p_funcs->encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
drm_encoder_helper_add(encoder,
p_funcs->encoder_helper_funcs);
@@ -1006,10 +1005,10 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
/*set possible crtcs and clones*/
if (dsi_connector->pipe) {
encoder->possible_crtcs = (1 << 2);
- encoder->possible_clones = (1 << 1);
+ encoder->possible_clones = 0;
} else {
encoder->possible_crtcs = (1 << 0);
- encoder->possible_clones = (1 << 0);
+ encoder->possible_clones = 0;
}
dsi_connector->base.encoder = &dpi_output->base.base;
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 4fff110c4921..aae2d358364c 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -658,16 +658,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
-#if 0
- if (pipe == 1) {
- if (!gma_power_begin(dev, true))
- return 0;
- android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode,
- x, y, old_fb);
- goto mrst_crtc_mode_set_exit;
- }
-#endif
-
ret = check_fb(crtc->primary->fb);
if (ret)
return ret;
@@ -918,14 +908,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
}
dpll = 0;
-#if 0 /* FIXME revisit later */
- if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 ||
- ksel == KSEL_BYPASS_25)
- dpll &= ~MDFLD_INPUT_REF_SEL;
- else if (ksel == KSEL_BYPASS_83_100)
- dpll |= MDFLD_INPUT_REF_SEL;
-#endif /* FIXME revisit later */
-
if (is_hdmi)
dpll |= MDFLD_VCO_SEL;
@@ -935,20 +917,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
/* compute bitmask from p1 value */
dpll |= (1 << (clock.p1 - 2)) << 17;
-#if 0 /* 1080p30 & 720p */
- dpll = 0x00050000;
- fp = 0x000001be;
-#endif
-#if 0 /* 480p */
- dpll = 0x02010000;
- fp = 0x000000d2;
-#endif
} else {
-#if 0 /*DBI_TPO_480x864*/
- dpll = 0x00020000;
- fp = 0x00000156;
-#endif /* DBI_TPO_480x864 */ /* get from spec. */
-
dpll = 0x00800000;
fp = 0x000000c1;
}
diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h
index ab2b27c0f037..17a944d70add 100644
--- a/drivers/gpu/drm/gma500/mdfld_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_output.h
@@ -51,7 +51,6 @@ struct panel_info {
};
struct panel_funcs {
- const struct drm_encoder_funcs *encoder_funcs;
const struct drm_encoder_helper_funcs *encoder_helper_funcs;
struct drm_display_mode * (*get_config_mode)(struct drm_device *);
int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
index 49c92debb7b2..25e897b98f86 100644
--- a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
+++ b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
@@ -188,13 +188,7 @@ static const struct drm_encoder_helper_funcs
.commit = mdfld_dsi_dpi_commit,
};
-/*TPO DPI encoder funcs*/
-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
const struct panel_funcs mdfld_tmd_vid_funcs = {
- .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
.encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
.get_config_mode = &tmd_vid_get_config_mode,
.get_panel_info = tmd_vid_get_panel_info,
diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
index a9420bf9a419..11845978fb0a 100644
--- a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
+++ b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
@@ -76,13 +76,7 @@ static const struct drm_encoder_helper_funcs
.commit = mdfld_dsi_dpi_commit,
};
-/*TPO DPI encoder funcs*/
-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
const struct panel_funcs mdfld_tpo_vid_funcs = {
- .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
.encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
.get_config_mode = &tpo_vid_get_config_mode,
.get_panel_info = tpo_vid_get_panel_info,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f4370232767d..a097a59a9eae 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <drm/drm.h>
+#include <drm/drm_simple_kms_helper.h>
#include "psb_drv.h"
#include "psb_intel_drv.h"
@@ -620,15 +621,6 @@ static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
.destroy = oaktrail_hdmi_destroy,
};
-static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
- .destroy = oaktrail_hdmi_enc_destroy,
-};
-
void oaktrail_hdmi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
@@ -651,9 +643,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
&oaktrail_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
- drm_encoder_init(dev, encoder,
- &oaktrail_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
@@ -673,11 +663,6 @@ failed_connector:
kfree(gma_encoder);
}
-static const struct pci_device_id hdmi_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
- { 0 }
-};
-
void oaktrail_hdmi_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 582e09597500..2828360153d1 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -13,6 +13,8 @@
#include <asm/intel-mid.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "intel_bios.h"
#include "power.h"
#include "psb_drv.h"
@@ -311,8 +313,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
&psb_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 16c6136f778b..fb601983cef0 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -252,7 +252,6 @@ extern int psb_intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value);
extern void psb_intel_lvds_destroy(struct drm_connector *connector);
-extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
/* intel_gmbus.c */
extern void gma_intel_i2c_reset(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index afaebab7bc17..063c66bb946d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -11,6 +11,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "intel_bios.h"
#include "power.h"
#include "psb_drv.h"
@@ -621,18 +623,6 @@ const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
.destroy = psb_intel_lvds_destroy,
};
-
-static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
- .destroy = psb_intel_lvds_enc_destroy,
-};
-
-
-
/**
* psb_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
@@ -683,9 +673,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
&psb_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, encoder,
- &psb_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 264d7ad004b4..68fb3d7c172b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -864,36 +864,6 @@ static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sd
DRM_INFO("HDMI is not supported yet");
return false;
-#if 0
- struct dip_infoframe avi_if = {
- .type = DIP_TYPE_AVI,
- .ver = DIP_VERSION_AVI,
- .len = DIP_LEN_AVI,
- };
- uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
- uint8_t set_buf_index[2] = { 1, 0 };
- uint64_t *data = (uint64_t *)&avi_if;
- unsigned i;
-
- intel_dip_infoframe_csum(&avi_if);
-
- if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
- SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2))
- return false;
-
- for (i = 0; i < sizeof(avi_if); i += 8) {
- if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
- SDVO_CMD_SET_HBUF_DATA,
- data, 8))
- return false;
- data++;
- }
-
- return psb_intel_sdvo_set_value(psb_intel_sdvo,
- SDVO_CMD_SET_HBUF_TXRATE,
- &tx_rate, 1);
-#endif
}
static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
@@ -1227,75 +1197,6 @@ static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdv
return true;
}
-/* No use! */
-#if 0
-struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
-{
- struct drm_connector *connector = NULL;
- struct psb_intel_sdvo *iout = NULL;
- struct psb_intel_sdvo *sdvo;
-
- /* find the sdvo connector */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- iout = to_psb_intel_sdvo(connector);
-
- if (iout->type != INTEL_OUTPUT_SDVO)
- continue;
-
- sdvo = iout->dev_priv;
-
- if (sdvo->sdvo_reg == SDVOB && sdvoB)
- return connector;
-
- if (sdvo->sdvo_reg == SDVOC && !sdvoB)
- return connector;
-
- }
-
- return NULL;
-}
-
-int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
-{
- u8 response[2];
- u8 status;
- struct psb_intel_sdvo *psb_intel_sdvo;
- DRM_DEBUG_KMS("\n");
-
- if (!connector)
- return 0;
-
- psb_intel_sdvo = to_psb_intel_sdvo(connector);
-
- return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
- &response, 2) && response[0];
-}
-
-void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
-{
- u8 response[2];
- u8 status;
- struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
-
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-
- if (on) {
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
- } else {
- response[0] = 0;
- response[1] = 0;
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
- }
-
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-}
-#endif
-
static bool
psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
{
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 9e8224456ea2..e5bdd99ad453 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -747,11 +747,11 @@ static int cmi_lcd_hack_create_device(void)
return -EINVAL;
}
- client = i2c_new_device(adapter, &info);
- if (!client) {
- pr_err("%s: i2c_new_device() failed\n", __func__);
+ client = i2c_new_client_device(adapter, &info);
+ if (IS_ERR(client)) {
+ pr_err("%s: creating I2C device failed\n", __func__);
i2c_put_adapter(adapter);
- return -EINVAL;
+ return PTR_ERR(client);
}
return 0;
@@ -765,12 +765,7 @@ static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = {
.commit = mdfld_dsi_dpi_commit,
};
-static const struct drm_encoder_funcs tc35876x_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
const struct panel_funcs mdfld_tc35876x_funcs = {
- .encoder_funcs = &tc35876x_encoder_funcs,
.encoder_helper_funcs = &tc35876x_encoder_helper_funcs,
.get_config_mode = tc35876x_get_config_mode,
.get_panel_info = tc35876x_get_panel_info,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 55b46a7150a5..cc70e836522f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -94,6 +94,10 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+ if (state->fb->pitches[0] % 128 != 0) {
+ DRM_DEBUG_ATOMIC("wrong stride with 128-byte aligned\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -119,11 +123,8 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
reg = state->fb->width * (state->fb->format->cpp[0]);
- /* now line_pad is 16 */
- reg = PADDING(16, reg);
- line_l = state->fb->width * state->fb->format->cpp[0];
- line_l = PADDING(16, line_l);
+ line_l = state->fb->pitches[0];
writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) |
HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l),
priv->mmio + HIBMC_CRT_FB_WIDTH);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 222356a4f9a8..a6fd0c29e5b8 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -94,7 +94,7 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
priv->dev->mode_config.max_height = 1200;
priv->dev->mode_config.fb_base = priv->fb_base;
- priv->dev->mode_config.preferred_depth = 24;
+ priv->dev->mode_config.preferred_depth = 32;
priv->dev->mode_config.prefer_shadow = 1;
priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
@@ -307,11 +307,7 @@ static int hibmc_load(struct drm_device *dev)
/* reset all the states of crtc/plane/encoder/connector */
drm_mode_config_reset(dev);
- ret = drm_fbdev_generic_setup(dev, 16);
- if (ret) {
- DRM_ERROR("failed to initialize fbdev: %d\n", ret);
- goto err;
- }
+ drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
return 0;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 99397ac3b363..322bd542e89d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -50,7 +50,7 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args);
+ return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
}
const struct drm_mode_config_funcs hibmc_mode_funcs = {
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index f31068d74b18..00e87c290796 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -20,11 +20,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
-#include <drm/drm_encoder_slave.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "dw_dsi_reg.h"
@@ -696,10 +696,6 @@ static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
.disable = dsi_encoder_disable
};
-static const struct drm_encoder_funcs dw_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int dw_drm_encoder_init(struct device *dev,
struct drm_device *drm_dev,
struct drm_encoder *encoder)
@@ -713,8 +709,7 @@ static int dw_drm_encoder_init(struct device *dev,
}
encoder->possible_crtcs = crtc_mask;
- ret = drm_encoder_init(drm_dev, encoder, &dw_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("failed to init dsi encoder\n");
return ret;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 86000127d4ee..c339e632522a 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -940,7 +940,6 @@ static struct drm_driver ade_driver = {
};
struct kirin_drm_data ade_driver_data = {
- .register_connects = false,
.num_planes = ADE_CH_NUM,
.prim_plane = ADE_CH1,
.channel_formats = channel_formats,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index d3145ae877d7..4349da3e2379 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -219,40 +219,6 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev)
return 0;
}
-static int kirin_drm_connectors_register(struct drm_device *dev)
-{
- struct drm_connector *connector;
- struct drm_connector *failed_connector;
- struct drm_connector_list_iter conn_iter;
- int ret;
-
- mutex_lock(&dev->mode_config.mutex);
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- ret = drm_connector_register(connector);
- if (ret) {
- failed_connector = connector;
- goto err;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
-
- return 0;
-
-err:
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (failed_connector == connector)
- break;
- drm_connector_unregister(connector);
- }
- drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
static int kirin_drm_bind(struct device *dev)
{
struct kirin_drm_data *driver_data;
@@ -279,17 +245,8 @@ static int kirin_drm_bind(struct device *dev)
drm_fbdev_generic_setup(drm_dev, 32);
- /* connectors should be registered after drm device register */
- if (driver_data->register_connects) {
- ret = kirin_drm_connectors_register(drm_dev);
- if (ret)
- goto err_drm_dev_unregister;
- }
-
return 0;
-err_drm_dev_unregister:
- drm_dev_unregister(drm_dev);
err_kms_cleanup:
kirin_drm_kms_cleanup(drm_dev);
err_drm_dev_put:
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
index 4d5c05a24065..dee8ec2f7f2e 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
@@ -37,7 +37,6 @@ struct kirin_drm_data {
u32 channel_formats_cnt;
int config_max_width;
int config_max_height;
- bool register_connects;
u32 num_planes;
u32 prim_plane;
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index a839f78a4c8a..741886b54419 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -393,7 +393,7 @@ sil164_detect_slave(struct i2c_client *client)
return NULL;
}
- return i2c_new_device(adap, &info);
+ return i2c_new_client_device(adap, &info);
}
static int
@@ -402,6 +402,7 @@ sil164_encoder_init(struct i2c_client *client,
struct drm_encoder_slave *encoder)
{
struct sil164_priv *priv;
+ struct i2c_client *slave_client;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -410,7 +411,9 @@ sil164_encoder_init(struct i2c_client *client,
encoder->slave_priv = priv;
encoder->slave_funcs = &sil164_encoder_funcs;
- priv->duallink_slave = sil164_detect_slave(client);
+ slave_client = sil164_detect_slave(client);
+ if (!IS_ERR(slave_client))
+ priv->duallink_slave = slave_client;
return 0;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index c3332209f27a..9517f522dcb9 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -19,6 +19,7 @@
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/i2c/tda998x.h>
#include <media/cec-notifier.h>
@@ -1132,7 +1133,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data)
mutex_unlock(&priv->audio_mutex);
}
-int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int tda998x_audio_digital_mute(struct device *dev, void *data,
+ bool enable)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
@@ -1949,9 +1951,9 @@ static int tda998x_create(struct device *dev)
cec_info.platform_data = &priv->cec_glue;
cec_info.irq = client->irq;
- priv->cec = i2c_new_device(client->adapter, &cec_info);
- if (!priv->cec) {
- ret = -ENODEV;
+ priv->cec = i2c_new_client_device(client->adapter, &cec_info);
+ if (IS_ERR(priv->cec)) {
+ ret = PTR_ERR(priv->cec);
goto fail;
}
@@ -1997,15 +1999,6 @@ err_irq:
/* DRM encoder functions */
-static void tda998x_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs tda998x_encoder_funcs = {
- .destroy = tda998x_encoder_destroy,
-};
-
static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
@@ -2023,8 +2016,8 @@ static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
priv->encoder.possible_crtcs = crtcs;
- ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ ret = drm_simple_encoder_init(drm, &priv->encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret)
goto err_encoder;
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index 0bfd276c19fe..35bbe2b80596 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -1,3 +1,15 @@
+config DRM_I915_FENCE_TIMEOUT
+ int "Timeout for unsignaled foreign fences (ms, jiffy granularity)"
+ default 10000 # milliseconds
+ help
+ When listening to a foreign fence, we install a supplementary timer
+ to ensure that we are always signaled and our userspace is able to
+ make forward progress. This value specifies the timeout used for an
+ unsignaled foreign fence.
+
+ May be 0 to disable the timeout, and rely on the foreign fence being
+ eventually signaled.
+
config DRM_I915_USERFAULT_AUTOSUSPEND
int "Runtime autosuspend delay for userspace GGTT mmaps (ms)"
default 250 # milliseconds
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 6cd1f6253814..b0da6ea6e3f1 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@ subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
subdir-ccflags-y += $(call cc-disable-warning, uninitialized)
+subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# Fine grained warnings disable
@@ -34,6 +35,7 @@ subdir-ccflags-y += -I$(srctree)/$(src)
# core driver code
i915-y += i915_drv.o \
+ i915_config.o \
i915_irq.o \
i915_getparam.o \
i915_params.o \
@@ -86,10 +88,12 @@ gt-y += \
gt/intel_engine_cs.o \
gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
- gt/intel_engine_pool.o \
gt/intel_engine_user.o \
gt/intel_ggtt.o \
+ gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
+ gt/intel_gt_buffer_pool.o \
+ gt/intel_gt_clock_utils.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_irq.o \
@@ -108,6 +112,7 @@ gt-y += \
gt/intel_sseu.o \
gt/intel_timeline.o \
gt/intel_workarounds.o \
+ gt/shmem_utils.o \
gt/sysfs_engines.o
# autogenerated null render state
gt-y += \
@@ -150,7 +155,6 @@ i915-y += \
i915_buddy.o \
i915_cmd_parser.o \
i915_gem_evict.o \
- i915_gem_fence_reg.o \
i915_gem_gtt.o \
i915_gem.o \
i915_globals.o \
@@ -164,14 +168,18 @@ i915-y += \
# general-purpose microcontroller (GuC) support
i915-y += gt/uc/intel_uc.o \
+ gt/uc/intel_uc_debugfs.o \
gt/uc/intel_uc_fw.o \
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
gt/uc/intel_guc_ct.o \
+ gt/uc/intel_guc_debugfs.o \
gt/uc/intel_guc_fw.o \
gt/uc/intel_guc_log.o \
+ gt/uc/intel_guc_log_debugfs.o \
gt/uc/intel_guc_submission.o \
gt/uc/intel_huc.o \
+ gt/uc/intel_huc_debugfs.o \
gt/uc/intel_huc_fw.o
# modesetting core code
@@ -240,23 +248,6 @@ i915-y += \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
-# perf code
-i915-y += \
- oa/i915_oa_hsw.o \
- oa/i915_oa_bdw.o \
- oa/i915_oa_chv.o \
- oa/i915_oa_sklgt2.o \
- oa/i915_oa_sklgt3.o \
- oa/i915_oa_sklgt4.o \
- oa/i915_oa_bxt.o \
- oa/i915_oa_kblgt2.o \
- oa/i915_oa_kblgt3.o \
- oa/i915_oa_glk.o \
- oa/i915_oa_cflgt2.o \
- oa/i915_oa_cflgt3.o \
- oa/i915_oa_cnl.o \
- oa/i915_oa_icl.o \
- oa/i915_oa_tgl.o
i915-y += i915_perf.o
# Post-mortem debug and GPU hang state capture
@@ -270,7 +261,8 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/igt_live_test.o \
selftests/igt_mmap.o \
selftests/igt_reset.o \
- selftests/igt_spinner.o
+ selftests/igt_spinner.o \
+ selftests/librapl.o
# virtual gpu code
i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 17cee6f80d8b..4fec5bd64920 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -36,15 +36,15 @@
#include "intel_panel.h"
#include "intel_vdsc.h"
-static inline int header_credits_available(struct drm_i915_private *dev_priv,
- enum transcoder dsi_trans)
+static int header_credits_available(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
{
return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
>> FREE_HEADER_CREDIT_SHIFT;
}
-static inline int payload_credits_available(struct drm_i915_private *dev_priv,
- enum transcoder dsi_trans)
+static int payload_credits_available(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
{
return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
>> FREE_PLOAD_CREDIT_SHIFT;
@@ -186,16 +186,19 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
static int dsi_send_pkt_payld(struct intel_dsi_host *host,
struct mipi_dsi_packet pkt)
{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
/* payload queue can accept *256 bytes*, check limit */
if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
- DRM_ERROR("payload size exceeds max queue limit\n");
+ drm_err(&i915->drm, "payload size exceeds max queue limit\n");
return -1;
}
/* load data into command payload queue */
if (!add_payld_to_queue(host, pkt.payload,
pkt.payload_length)) {
- DRM_ERROR("adding payload to queue failed\n");
+ drm_err(&i915->drm, "adding payload to queue failed\n");
return -1;
}
@@ -744,6 +747,18 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
tmp |= VIDEO_MODE_SYNC_PULSE;
break;
}
+ } else {
+ /*
+ * FIXME: Retrieve this info from VBT.
+ * As per the spec when dsi transcoder is operating
+ * in TE GATE mode, TE comes from GPIO
+ * which is UTIL PIN for DSI 0.
+ * Also this GPIO would not be used for other
+ * purposes is an assumption.
+ */
+ tmp &= ~OP_MODE_MASK;
+ tmp |= CMD_MODE_TE_GATE;
+ tmp |= TE_SOURCE_GPIO;
}
intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
@@ -837,14 +852,33 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
}
hactive = adjusted_mode->crtc_hdisplay;
- htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+
+ if (is_vid_mode(intel_dsi))
+ htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+ else
+ htotal = DIV_ROUND_UP((hactive + 160) * mul, div);
+
hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
hsync_size = hsync_end - hsync_start;
hback_porch = (adjusted_mode->crtc_htotal -
adjusted_mode->crtc_hsync_end);
vactive = adjusted_mode->crtc_vdisplay;
- vtotal = adjusted_mode->crtc_vtotal;
+
+ if (is_vid_mode(intel_dsi)) {
+ vtotal = adjusted_mode->crtc_vtotal;
+ } else {
+ int bpp, line_time_us, byte_clk_period_ns;
+
+ if (crtc_state->dsc.compression_enable)
+ bpp = crtc_state->dsc.compressed_bpp;
+ else
+ bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ byte_clk_period_ns = 1000000 / afe_clk(encoder, crtc_state);
+ line_time_us = (htotal * (bpp / 8) * byte_clk_period_ns) / (1000 * intel_dsi->lane_count);
+ vtotal = vactive + DIV_ROUND_UP(400, line_time_us);
+ }
vsync_start = adjusted_mode->crtc_vsync_start;
vsync_end = adjusted_mode->crtc_vsync_end;
vsync_shift = hsync_start - htotal / 2;
@@ -873,7 +907,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
}
/* TRANS_HSYNC register to be programmed only for video mode */
- if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
+ if (is_vid_mode(intel_dsi)) {
if (intel_dsi->video_mode_format ==
VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
/* BSPEC: hsync size should be atleast 16 pixels */
@@ -916,22 +950,27 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (vsync_start < vactive)
drm_err(&dev_priv->drm, "vsync_start less than vactive\n");
- /* program TRANS_VSYNC register */
- for_each_dsi_port(port, intel_dsi->ports) {
- dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNC(dsi_trans),
- (vsync_start - 1) | ((vsync_end - 1) << 16));
+ /* program TRANS_VSYNC register for video mode only */
+ if (is_vid_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ intel_de_write(dev_priv, VSYNC(dsi_trans),
+ (vsync_start - 1) | ((vsync_end - 1) << 16));
+ }
}
/*
- * FIXME: It has to be programmed only for interlaced
+ * FIXME: It has to be programmed only for video modes and interlaced
* modes. Put the check condition here once interlaced
* info available as described above.
* program TRANS_VSYNCSHIFT register
*/
- for_each_dsi_port(port, intel_dsi->ports) {
- dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), vsync_shift);
+ if (is_vid_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans),
+ vsync_shift);
+ }
}
/* program TRANS_VBLANK register, should be same as vtotal programmed */
@@ -1016,6 +1055,32 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
}
}
+static void gen11_dsi_config_util_pin(struct intel_encoder *encoder,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 tmp;
+
+ /*
+ * used as TE i/p for DSI0,
+ * for dual link/DSI1 TE is from slave DSI1
+ * through GPIO.
+ */
+ if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B)))
+ return;
+
+ tmp = intel_de_read(dev_priv, UTIL_PIN_CTL);
+
+ if (enable) {
+ tmp |= UTIL_PIN_DIRECTION_INPUT;
+ tmp |= UTIL_PIN_ENABLE;
+ } else {
+ tmp &= ~UTIL_PIN_ENABLE;
+ }
+ intel_de_write(dev_priv, UTIL_PIN_CTL, tmp);
+}
+
static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
@@ -1037,6 +1102,9 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
/* setup D-PHY timings */
gen11_dsi_setup_dphy_timings(encoder, crtc_state);
+ /* Since transcoder is configured to take events from GPIO */
+ gen11_dsi_config_util_pin(encoder, true);
+
/* step 4h: setup DSI protocol timeouts */
gen11_dsi_setup_timeouts(encoder, crtc_state);
@@ -1088,7 +1156,8 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
wait_for_cmds_dispatched_to_panel(encoder);
}
-static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
+static void gen11_dsi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -1099,7 +1168,8 @@ static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
gen11_dsi_program_esc_clk_div(encoder, crtc_state);
}
-static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
+static void gen11_dsi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1118,13 +1188,14 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
}
-static void gen11_dsi_enable(struct intel_encoder *encoder,
+static void gen11_dsi_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- WARN_ON(crtc_state->has_pch_encoder);
+ drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
@@ -1180,6 +1251,15 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
enum transcoder dsi_trans;
u32 tmp;
+ /* disable periodic update mode */
+ if (is_cmd_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
+ tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE;
+ intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
+ }
+ }
+
/* put dsi link in ULPS */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
@@ -1264,7 +1344,8 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
}
}
-static void gen11_dsi_disable(struct intel_encoder *encoder,
+static void gen11_dsi_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -1286,11 +1367,14 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
/* step3: disable port */
gen11_dsi_disable_port(encoder);
+ gen11_dsi_config_util_pin(encoder, false);
+
/* step4: disable IO power */
gen11_dsi_disable_io_power(encoder);
}
-static void gen11_dsi_post_disable(struct intel_encoder *encoder,
+static void gen11_dsi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -1347,6 +1431,22 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
}
+static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum transcoder dsi_trans;
+ u32 val;
+
+ if (intel_dsi->ports == BIT(PORT_B))
+ dsi_trans = TRANSCODER_DSI_1;
+ else
+ dsi_trans = TRANSCODER_DSI_0;
+
+ val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
+ return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE);
+}
+
static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -1367,6 +1467,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
gen11_dsi_get_timings(encoder, pipe_config);
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+
+ if (gen11_dsi_is_periodic_cmd_mode(intel_dsi))
+ pipe_config->hw.adjusted_mode.private_flags |=
+ I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
}
static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
@@ -1417,18 +1521,22 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *fixed_mode =
- intel_connector->panel.fixed_mode;
+ intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode =
- &pipe_config->hw.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
+ int ret;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
- intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
+
+ ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
adjusted_mode->flags = 0;
@@ -1446,10 +1554,32 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->clock_set = true;
if (gen11_dsi_dsc_compute_config(encoder, pipe_config))
- DRM_DEBUG_KMS("Attempting to use DSC failed\n");
+ drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n");
pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
+ /* We would not operate in periodic command mode */
+ pipe_config->hw.adjusted_mode.private_flags &=
+ ~I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
+
+ /*
+ * In case of TE GATE cmd mode, we
+ * receive TE from the slave if
+ * dual link is enabled
+ */
+ if (is_cmd_mode(intel_dsi)) {
+ if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A)))
+ pipe_config->hw.adjusted_mode.private_flags |=
+ I915_MODE_FLAG_DSI_USE_TE1 |
+ I915_MODE_FLAG_DSI_USE_TE0;
+ else if (intel_dsi->ports == BIT(PORT_B))
+ pipe_config->hw.adjusted_mode.private_flags |=
+ I915_MODE_FLAG_DSI_USE_TE1;
+ else
+ pipe_config->hw.adjusted_mode.private_flags |=
+ I915_MODE_FLAG_DSI_USE_TE0;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 457b258683d3..79032701873a 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -125,7 +125,7 @@ intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct intel_plane_state *plane_state = to_intel_plane_state(state);
- WARN_ON(plane_state->vma);
+ drm_WARN_ON(plane->dev, plane_state->vma);
__drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
if (plane_state->hw.fb)
@@ -264,6 +264,20 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
plane_state->hw.color_range = from_plane_state->uapi.color_range;
}
+void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+ crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->nv12_planes &= ~BIT(plane->id);
+ crtc_state->c8_planes &= ~BIT(plane->id);
+ crtc_state->data_rate[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
+
+ plane_state->uapi.visible = false;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -273,12 +287,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
const struct drm_framebuffer *fb = new_plane_state->hw.fb;
int ret;
- new_crtc_state->active_planes &= ~BIT(plane->id);
- new_crtc_state->nv12_planes &= ~BIT(plane->id);
- new_crtc_state->c8_planes &= ~BIT(plane->id);
- new_crtc_state->data_rate[plane->id] = 0;
- new_crtc_state->min_cdclk[plane->id] = 0;
- new_plane_state->uapi.visible = false;
+ intel_plane_set_invisible(new_crtc_state, new_plane_state);
if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
return 0;
@@ -387,7 +396,7 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
}
/* should never happen */
- WARN_ON(1);
+ drm_WARN_ON(state->base.dev, 1);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index a6bbf42bae1f..59dd1fbb02ea 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -52,5 +52,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
struct intel_plane *plane,
bool *need_cdclk_calc);
+void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 62f234f641de..ad4aa66fd676 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -252,14 +252,16 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
i = ARRAY_SIZE(hdmi_audio_clock);
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
- DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
- adjusted_mode->crtc_clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
+ adjusted_mode->crtc_clock);
i = 1;
}
- DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
- hdmi_audio_clock[i].clock,
- hdmi_audio_clock[i].config);
+ drm_dbg_kms(&dev_priv->drm,
+ "Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+ hdmi_audio_clock[i].clock,
+ hdmi_audio_clock[i].config);
return hdmi_audio_clock[i].config;
}
@@ -512,6 +514,124 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
mutex_unlock(&dev_priv->av_mutex);
}
+static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ unsigned int link_clks_available, link_clks_required;
+ unsigned int tu_data, tu_line, link_clks_active;
+ unsigned int h_active, h_total, hblank_delta, pixel_clk;
+ unsigned int fec_coeff, cdclk, vdsc_bpp;
+ unsigned int link_clk, lanes;
+ unsigned int hblank_rise;
+
+ h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay;
+ h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
+ pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
+ vdsc_bpp = crtc_state->dsc.compressed_bpp;
+ cdclk = i915->cdclk.hw.cdclk;
+ /* fec= 0.972261, using rounding multiplier of 1000000 */
+ fec_coeff = 972261;
+ link_clk = crtc_state->port_clock;
+ lanes = crtc_state->lane_count;
+
+ drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :"
+ "lanes = %u vdsc_bpp = %u cdclk = %u\n",
+ h_active, link_clk, lanes, vdsc_bpp, cdclk);
+
+ if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bpp || !cdclk))
+ return 0;
+
+ link_clks_available = (h_total - h_active) * link_clk / pixel_clk - 28;
+ link_clks_required = DIV_ROUND_UP(192000 * h_total, 1000 * pixel_clk) * (48 / lanes + 2);
+
+ if (link_clks_available > link_clks_required)
+ hblank_delta = 32;
+ else
+ hblank_delta = DIV64_U64_ROUND_UP(mul_u32_u32(5 * (link_clk + cdclk), pixel_clk),
+ mul_u32_u32(link_clk, cdclk));
+
+ tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bpp * 8, 1000000),
+ mul_u32_u32(link_clk * lanes, fec_coeff));
+ tu_line = div64_u64(h_active * mul_u32_u32(link_clk, fec_coeff),
+ mul_u32_u32(64 * pixel_clk, 1000000));
+ link_clks_active = (tu_line - 1) * 64 + tu_data;
+
+ hblank_rise = (link_clks_active + 6 * DIV_ROUND_UP(link_clks_active, 250) + 4) * pixel_clk / link_clk;
+
+ return h_active - hblank_rise + hblank_delta;
+}
+
+static unsigned int calc_samples_room(const struct intel_crtc_state *crtc_state)
+{
+ unsigned int h_active, h_total, pixel_clk;
+ unsigned int link_clk, lanes;
+
+ h_active = crtc_state->hw.adjusted_mode.hdisplay;
+ h_total = crtc_state->hw.adjusted_mode.htotal;
+ pixel_clk = crtc_state->hw.adjusted_mode.clock;
+ link_clk = crtc_state->port_clock;
+ lanes = crtc_state->lane_count;
+
+ return ((h_total - h_active) * link_clk - 12 * pixel_clk) /
+ (pixel_clk * (48 / lanes + 2));
+}
+
+static void enable_audio_dsc_wa(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ unsigned int hblank_early_prog, samples_room;
+ unsigned int val;
+
+ if (INTEL_GEN(i915) < 11)
+ return;
+
+ val = intel_de_read(i915, AUD_CONFIG_BE);
+
+ if (INTEL_GEN(i915) == 11)
+ val |= HBLANK_EARLY_ENABLE_ICL(pipe);
+ else if (INTEL_GEN(i915) >= 12)
+ val |= HBLANK_EARLY_ENABLE_TGL(pipe);
+
+ if (crtc_state->dsc.compression_enable &&
+ (crtc_state->hw.adjusted_mode.hdisplay >= 3840 &&
+ crtc_state->hw.adjusted_mode.vdisplay >= 2160)) {
+ /* Get hblank early enable value required */
+ hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state);
+ if (hblank_early_prog < 32) {
+ val &= ~HBLANK_START_COUNT_MASK(pipe);
+ val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32);
+ } else if (hblank_early_prog < 64) {
+ val &= ~HBLANK_START_COUNT_MASK(pipe);
+ val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64);
+ } else if (hblank_early_prog < 96) {
+ val &= ~HBLANK_START_COUNT_MASK(pipe);
+ val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96);
+ } else {
+ val &= ~HBLANK_START_COUNT_MASK(pipe);
+ val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128);
+ }
+
+ /* Get samples room value required */
+ samples_room = calc_samples_room(crtc_state);
+ if (samples_room < 3) {
+ val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+ val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room);
+ } else {
+ /* Program 0 i.e "All Samples available in buffer" */
+ val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+ val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0);
+ }
+ }
+
+ intel_de_write(i915, AUD_CONFIG_BE, val);
+}
+
+#undef ROUNDING_FACTOR
+
static void hsw_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -529,6 +649,10 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
mutex_lock(&dev_priv->av_mutex);
+ /* Enable Audio WA for 4k DSC usecases */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
+ enable_audio_dsc_wa(encoder, crtc_state);
+
/* Enable audio presence detect, invalidate ELD */
tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
@@ -891,7 +1015,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
if (dev_priv->audio_power_refcount++ == 0) {
- if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_de_write(dev_priv, AUD_FREQ_CNTRL,
dev_priv->audio_freq_cntrl);
drm_dbg_kms(&dev_priv->drm,
@@ -931,7 +1055,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
unsigned long cookie;
u32 tmp;
- if (!IS_GEN(dev_priv, 9))
+ if (INTEL_GEN(dev_priv) < 9)
return;
cookie = i915_audio_component_get_power(kdev);
@@ -1136,6 +1260,10 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
drm_modeset_unlock_all(&dev_priv->drm);
device_link_remove(hda_kdev, i915_kdev);
+
+ if (dev_priv->audio_power_refcount)
+ drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n",
+ dev_priv->audio_power_refcount);
}
static const struct component_ops i915_audio_component_bind_ops = {
@@ -1173,7 +1301,7 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
return;
}
- if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->audio_freq_cntrl = intel_de_read(dev_priv,
AUD_FREQ_CNTRL);
drm_dbg_kms(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 58b264bc318d..fef04e2d954e 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -8,6 +8,9 @@
#include "intel_bw.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
+#include "intel_atomic.h"
+#include "intel_pm.h"
+
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@@ -113,6 +116,26 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
+int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+ u32 points_mask)
+{
+ int ret;
+
+ /* bspec says to keep retrying for at least 1 ms */
+ ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ points_mask,
+ ICL_PCODE_POINTS_RESTRICTED_MASK,
+ ICL_PCODE_POINTS_RESTRICTED,
+ 1);
+
+ if (ret < 0) {
+ drm_err(&dev_priv->drm, "Failed to disable qgv points (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_info *qi)
{
@@ -240,6 +263,16 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
break;
}
+ /*
+ * In case if SAGV is disabled in BIOS, we always get 1
+ * SAGV point, but we can't send PCode commands to restrict it
+ * as it will fail and pointless anyway.
+ */
+ if (qi.num_points == 1)
+ dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ else
+ dev_priv->sagv_status = I915_SAGV_ENABLED;
+
return 0;
}
@@ -248,6 +281,11 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
{
int i;
+ /*
+ * Let's return max bw for 0 planes
+ */
+ num_planes = max(1, num_planes);
+
for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
const struct intel_bw_info *bi =
&dev_priv->max_bw[i];
@@ -277,34 +315,6 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
icl_get_bw_info(dev_priv, &icl_sa_info);
}
-static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
- int num_planes)
-{
- if (INTEL_GEN(dev_priv) >= 11) {
- /*
- * Any bw group has same amount of QGV points
- */
- const struct intel_bw_info *bi =
- &dev_priv->max_bw[0];
- unsigned int min_bw = UINT_MAX;
- int i;
-
- /*
- * FIXME with SAGV disabled maybe we can assume
- * point 1 will always be used? Seems to match
- * the behaviour observed in the wild.
- */
- for (i = 0; i < bi->num_qgv_points; i++) {
- unsigned int bw = icl_max_bw(dev_priv, num_planes, i);
-
- min_bw = min(bw, min_bw);
- }
- return min_bw;
- } else {
- return UINT_MAX;
- }
-}
-
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
{
/*
@@ -338,16 +348,17 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bw_state->data_rate[crtc->pipe] =
intel_bw_crtc_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
intel_bw_crtc_num_active_planes(crtc_state);
- DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
- pipe_name(crtc->pipe),
- bw_state->data_rate[crtc->pipe],
- bw_state->num_active_planes[crtc->pipe]);
+ drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
}
static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
@@ -374,7 +385,29 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
return data_rate;
}
-static struct intel_bw_state *
+struct intel_bw_state *
+intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *bw_state;
+
+ bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
+
+ return to_intel_bw_state(bw_state);
+}
+
+struct intel_bw_state *
+intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *bw_state;
+
+ bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
+
+ return to_intel_bw_state(bw_state);
+}
+
+struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -391,11 +424,16 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
- struct intel_bw_state *bw_state = NULL;
- unsigned int data_rate, max_data_rate;
+ struct intel_bw_state *new_bw_state = NULL;
+ const struct intel_bw_state *old_bw_state = NULL;
+ unsigned int data_rate;
unsigned int num_active_planes;
struct intel_crtc *crtc;
int i, ret;
+ u32 allowed_points = 0;
+ unsigned int max_bw_point = 0, max_bw = 0;
+ unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
+ u32 mask = (1 << num_qgv_points) - 1;
/* FIXME earlier gens need some checks too */
if (INTEL_GEN(dev_priv) < 11)
@@ -420,41 +458,93 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
old_active_planes == new_active_planes)
continue;
- bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(bw_state))
- return PTR_ERR(bw_state);
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
- bw_state->data_rate[crtc->pipe] = new_data_rate;
- bw_state->num_active_planes[crtc->pipe] = new_active_planes;
+ new_bw_state->data_rate[crtc->pipe] = new_data_rate;
+ new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
drm_dbg_kms(&dev_priv->drm,
"pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
- bw_state->data_rate[crtc->pipe],
- bw_state->num_active_planes[crtc->pipe]);
+ new_bw_state->data_rate[crtc->pipe],
+ new_bw_state->num_active_planes[crtc->pipe]);
}
- if (!bw_state)
+ if (!new_bw_state)
return 0;
- ret = intel_atomic_lock_global_state(&bw_state->base);
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
if (ret)
return ret;
- data_rate = intel_bw_data_rate(dev_priv, bw_state);
- num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
+ data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
+ data_rate = DIV_ROUND_UP(data_rate, 1000);
- max_data_rate = intel_max_data_rate(dev_priv, num_active_planes);
+ num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
- data_rate = DIV_ROUND_UP(data_rate, 1000);
+ for (i = 0; i < num_qgv_points; i++) {
+ unsigned int max_data_rate;
- if (data_rate > max_data_rate) {
- drm_dbg_kms(&dev_priv->drm,
- "Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
- data_rate, max_data_rate, num_active_planes);
+ max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
+ /*
+ * We need to know which qgv point gives us
+ * maximum bandwidth in order to disable SAGV
+ * if we find that we exceed SAGV block time
+ * with watermarks. By that moment we already
+ * have those, as it is calculated earlier in
+ * intel_atomic_check,
+ */
+ if (max_data_rate > max_bw) {
+ max_bw_point = i;
+ max_bw = max_data_rate;
+ }
+ if (max_data_rate >= data_rate)
+ allowed_points |= BIT(i);
+ drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
+ i, max_data_rate, data_rate);
+ }
+
+ /*
+ * BSpec states that we always should have at least one allowed point
+ * left, so if we couldn't - simply reject the configuration for obvious
+ * reasons.
+ */
+ if (allowed_points == 0) {
+ drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
+ " bandwidth %d for display configuration(%d active planes).\n",
+ data_rate, num_active_planes);
return -EINVAL;
}
+ /*
+ * Leave only single point with highest bandwidth, if
+ * we can't enable SAGV due to the increased memory latency it may
+ * cause.
+ */
+ if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
+ allowed_points = BIT(max_bw_point);
+ drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
+ max_bw_point);
+ }
+ /*
+ * We store the ones which need to be masked as that is what PCode
+ * actually accepts as a parameter.
+ */
+ new_bw_state->qgv_points_mask = ~allowed_points & mask;
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ /*
+ * If the actual mask had changed we need to make sure that
+ * the commits are serialized(in case this is a nomodeset, nonblocking)
+ */
+ if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index a8aa7624c5aa..bbcaaa73ec1b 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -18,16 +18,43 @@ struct intel_crtc_state;
struct intel_bw_state {
struct intel_global_state base;
+ /*
+ * Contains a bit mask, used to determine, whether correspondent
+ * pipe allows SAGV or not.
+ */
+ u8 pipe_sagv_reject;
+
+ /*
+ * Current QGV points mask, which restricts
+ * some particular SAGV states, not to confuse
+ * with pipe_sagv_mask.
+ */
+ u8 qgv_points_mask;
+
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
};
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
+struct intel_bw_state *
+intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
+
+struct intel_bw_state *
+intel_atomic_get_new_bw_state(struct intel_atomic_state *state);
+
+struct intel_bw_state *
+intel_atomic_get_bw_state(struct intel_atomic_state *state);
+
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
+int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+ u32 points_mask);
#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index c1cce93a1c25..98ece9cd7cdd 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -460,6 +460,16 @@ static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10);
}
+static void icl_lut_multi_seg_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->red = REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_UDW_MASK, udw) << 6 |
+ REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_LDW_MASK, ldw);
+ entry->green = REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_UDW_MASK, udw) << 6 |
+ REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_LDW_MASK, ldw);
+ entry->blue = REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_UDW_MASK, udw) << 6 |
+ REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_LDW_MASK, ldw);
+}
+
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -893,7 +903,7 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- /* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
+ /* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
@@ -1630,6 +1640,24 @@ static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
}
}
+static int icl_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+ return 0;
+
+ switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ return 16;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1641,7 +1669,9 @@ int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_stat
else
return i9xx_gamma_precision(crtc_state);
} else {
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ return icl_gamma_precision(crtc_state);
+ else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
return glk_gamma_precision(crtc_state);
else if (IS_IRONLAKE(dev_priv))
return ilk_gamma_precision(crtc_state);
@@ -1658,9 +1688,9 @@ static bool err_check(struct drm_color_lut *lut1,
((abs((long)lut2->green - lut1->green)) <= err);
}
-static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1,
- struct drm_color_lut *lut2,
- int lut_size, u32 err)
+static bool intel_color_lut_entries_equal(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2,
+ int lut_size, u32 err)
{
int i;
@@ -1690,16 +1720,8 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
lut_size2 = drm_color_lut_size(blob2);
/* check sw and hw lut size */
- switch (gamma_mode) {
- case GAMMA_MODE_MODE_8BIT:
- case GAMMA_MODE_MODE_10BIT:
- if (lut_size1 != lut_size2)
- return false;
- break;
- default:
- MISSING_CASE(gamma_mode);
- return false;
- }
+ if (lut_size1 != lut_size2)
+ return false;
lut1 = blob1->data;
lut2 = blob2->data;
@@ -1707,11 +1729,16 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
err = 0xffff >> bit_precision;
/* check sw and hw lut entry to be equal */
- switch (gamma_mode) {
+ switch (gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
case GAMMA_MODE_MODE_10BIT:
- if (!intel_color_lut_entry_equal(lut1, lut2,
- lut_size2, err))
+ if (!intel_color_lut_entries_equal(lut1, lut2,
+ lut_size2, err))
+ return false;
+ break;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ if (!intel_color_lut_entries_equal(lut1, lut2,
+ 9, err))
return false;
break;
default:
@@ -1946,6 +1973,63 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
}
+static struct drm_property_blob *
+icl_read_lut_multi_segment(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < 9; i++) {
+ u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
+ u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
+
+ icl_lut_multi_seg_pack(&lut[i], ldw, udw);
+ }
+
+ intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0);
+
+ /*
+ * FIXME readouts from PAL_PREC_DATA register aren't giving
+ * correct values in the case of fine and coarse segments.
+ * Restricting readouts only for super fine segment as of now.
+ */
+
+ return blob;
+}
+
+static void icl_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+ return;
+
+ switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+ case GAMMA_MODE_MODE_8BIT:
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ crtc_state->hw.gamma_lut = icl_read_lut_multi_segment(crtc);
+ break;
+ default:
+ crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
+ }
+}
+
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1989,6 +2073,7 @@ void intel_color_init(struct intel_crtc *crtc)
if (INTEL_GEN(dev_priv) >= 11) {
dev_priv->display.load_luts = icl_load_luts;
+ dev_priv->display.read_luts = icl_read_luts;
} else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
dev_priv->display.load_luts = glk_load_luts;
dev_priv->display.read_luts = glk_read_luts;
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 903e49659f56..406e96785c76 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -33,6 +33,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
+#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
@@ -123,6 +124,8 @@ int intel_connector_register(struct drm_connector *connector)
goto err_backlight;
}
+ intel_connector_debugfs_add(connector);
+
return 0;
err_backlight:
@@ -290,7 +293,7 @@ intel_attach_colorspace_property(struct drm_connector *connector)
return;
break;
default:
- DRM_DEBUG_KMS("Colorspace property not supported\n");
+ MISSING_CASE(connector->connector_type);
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 78f9b6cde810..2f5b9a4baafd 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -203,27 +203,31 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
intel_de_write(dev_priv, crt->adpa_reg, adpa);
}
-static void intel_disable_crt(struct intel_encoder *encoder,
+static void intel_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
}
-static void pch_disable_crt(struct intel_encoder *encoder,
+static void pch_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
}
-static void pch_post_disable_crt(struct intel_encoder *encoder,
+static void pch_post_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_crt(encoder, old_crtc_state, old_conn_state);
+ intel_disable_crt(state, encoder, old_crtc_state, old_conn_state);
}
-static void hsw_disable_crt(struct intel_encoder *encoder,
+static void hsw_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -234,7 +238,8 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
-static void hsw_post_disable_crt(struct intel_encoder *encoder,
+static void hsw_post_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -250,19 +255,20 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
intel_ddi_disable_pipe_clock(old_crtc_state);
- pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
+ pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state);
lpt_disable_pch_transcoder(dev_priv);
lpt_disable_iclkip(dev_priv);
- intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
+ intel_ddi_fdi_post_disable(state, encoder, old_crtc_state, old_conn_state);
drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
-static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
+static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -273,7 +279,8 @@ static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
-static void hsw_pre_enable_crt(struct intel_encoder *encoder,
+static void hsw_pre_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -287,10 +294,11 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
hsw_fdi_link_train(encoder, crtc_state);
- intel_ddi_enable_pipe_clock(crtc_state);
+ intel_ddi_enable_pipe_clock(encoder, crtc_state);
}
-static void hsw_enable_crt(struct intel_encoder *encoder,
+static void hsw_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -300,6 +308,8 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
+ intel_ddi_enable_transcoder_func(encoder, crtc_state);
+
intel_enable_pipe(crtc_state);
lpt_pch_enable(crtc_state);
@@ -314,7 +324,8 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
-static void intel_enable_crt(struct intel_encoder *encoder,
+static void intel_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -594,7 +605,8 @@ static struct edid *intel_crt_get_edid(struct drm_connector *connector,
edid = drm_get_edid(connector, i2c);
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
- DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ drm_dbg_kms(connector->dev,
+ "CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
edid = drm_get_edid(connector, i2c);
intel_gmbus_force_bit(i2c, false);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 52db7852827b..aa22465bb56e 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -568,7 +568,7 @@ static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
};
-static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[] = {
+static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = {
/* NT mV Trans mV db */
{ 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
{ 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
@@ -583,23 +583,51 @@ static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[]
};
struct icl_mg_phy_ddi_buf_trans {
- u32 cri_txdeemph_override_5_0;
u32 cri_txdeemph_override_11_6;
+ u32 cri_txdeemph_override_5_0;
u32 cri_txdeemph_override_17_12;
};
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = {
+ /* Voltage swing pre-emphasis */
+ { 0x18, 0x00, 0x00 }, /* 0 0 */
+ { 0x1D, 0x00, 0x05 }, /* 0 1 */
+ { 0x24, 0x00, 0x0C }, /* 0 2 */
+ { 0x2B, 0x00, 0x14 }, /* 0 3 */
+ { 0x21, 0x00, 0x00 }, /* 1 0 */
+ { 0x2B, 0x00, 0x08 }, /* 1 1 */
+ { 0x30, 0x00, 0x0F }, /* 1 2 */
+ { 0x31, 0x00, 0x03 }, /* 2 0 */
+ { 0x34, 0x00, 0x0B }, /* 2 1 */
+ { 0x3F, 0x00, 0x00 }, /* 3 0 */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
/* Voltage swing pre-emphasis */
- { 0x0, 0x1B, 0x00 }, /* 0 0 */
- { 0x0, 0x23, 0x08 }, /* 0 1 */
- { 0x0, 0x2D, 0x12 }, /* 0 2 */
- { 0x0, 0x00, 0x00 }, /* 0 3 */
- { 0x0, 0x23, 0x00 }, /* 1 0 */
- { 0x0, 0x2B, 0x09 }, /* 1 1 */
- { 0x0, 0x2E, 0x11 }, /* 1 2 */
- { 0x0, 0x2F, 0x00 }, /* 2 0 */
- { 0x0, 0x33, 0x0C }, /* 2 1 */
- { 0x0, 0x00, 0x00 }, /* 3 0 */
+ { 0x18, 0x00, 0x00 }, /* 0 0 */
+ { 0x1D, 0x00, 0x05 }, /* 0 1 */
+ { 0x24, 0x00, 0x0C }, /* 0 2 */
+ { 0x2B, 0x00, 0x14 }, /* 0 3 */
+ { 0x26, 0x00, 0x00 }, /* 1 0 */
+ { 0x2C, 0x00, 0x07 }, /* 1 1 */
+ { 0x33, 0x00, 0x0C }, /* 1 2 */
+ { 0x2E, 0x00, 0x00 }, /* 2 0 */
+ { 0x36, 0x00, 0x09 }, /* 2 1 */
+ { 0x3F, 0x00, 0x00 }, /* 3 0 */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = {
+ /* HDMI Preset VS Pre-emph */
+ { 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */
+ { 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */
+ { 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */
+ { 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */
+ { 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */
+ { 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */
+ { 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */
+ { 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */
+ { 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */
+ { 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */
};
struct tgl_dkl_phy_ddi_buf_trans {
@@ -943,14 +971,29 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
return icl_combo_phy_ddi_translations_dp_hbr2;
}
+static const struct icl_mg_phy_ddi_buf_trans *
+icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
+{
+ if (type == INTEL_OUTPUT_HDMI) {
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi);
+ return icl_mg_phy_ddi_translations_hdmi;
+ } else if (rate > 270000) {
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3);
+ return icl_mg_phy_ddi_translations_hbr2_hbr3;
+ }
+
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr);
+ return icl_mg_phy_ddi_translations_rbr_hbr;
+}
+
static const struct cnl_ddi_buf_trans *
ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
int *n_entries)
{
- if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP &&
- rate > 270000) {
- *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3);
- return ehl_combo_phy_ddi_translations_hbr2_hbr3;
+ if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) {
+ *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp);
+ return ehl_combo_phy_ddi_translations_dp;
}
return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
@@ -989,7 +1032,8 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
- n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+ icl_get_mg_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
+ &n_entries);
default_entry = n_entries - 1;
} else if (IS_CANNONLAKE(dev_priv)) {
cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
@@ -1103,7 +1147,8 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
return;
}
- DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+ drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c idle bit\n",
+ port_name(port));
}
static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
@@ -1216,7 +1261,10 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE);
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 |
+ DP_TP_CTL_ENABLE);
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
* DDI E does not support port reversal, the functionality is
@@ -1250,7 +1298,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
- DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI link training done on step %d\n", i);
break;
}
@@ -1259,7 +1308,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
* Results in less fireworks from the state checker.
*/
if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) {
- DRM_ERROR("FDI link training failed!\n");
+ drm_err(&dev_priv->drm, "FDI link training failed!\n");
break;
}
@@ -1291,7 +1340,10 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
/* Enable normal pixel sending for FDI */
intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE);
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
}
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
@@ -1305,27 +1357,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
}
-static struct intel_encoder *
-intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *encoder, *ret = NULL;
- int num_encoders = 0;
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
- ret = encoder;
- num_encoders++;
- }
-
- if (num_encoders != 1)
- drm_WARN(dev, 1, "%d encoders on crtc for pipe %c\n",
- num_encoders,
- pipe_name(crtc->pipe));
-
- BUG_ON(ret == NULL);
- return ret;
-}
-
static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -1451,6 +1482,14 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp);
}
+static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
+{
+ if (master_transcoder == TRANSCODER_EDP)
+ return 0;
+ else
+ return master_transcoder + 1;
+}
+
/*
* Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
*
@@ -1458,10 +1497,10 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
* intel_ddi_config_transcoder_func().
*/
static u32
-intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
+intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -1551,20 +1590,46 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
+ if (IS_GEN_RANGE(dev_priv, 8, 10) &&
+ crtc_state->master_transcoder != INVALID_TRANSCODER) {
+ u8 master_select =
+ bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
+
+ temp |= TRANS_DDI_PORT_SYNC_ENABLE |
+ TRANS_DDI_PORT_SYNC_MASTER_SELECT(master_select);
+ }
+
return temp;
}
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
+void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 temp;
+ u32 ctl;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ enum transcoder master_transcoder = crtc_state->master_transcoder;
+ u32 ctl2 = 0;
+
+ if (master_transcoder != INVALID_TRANSCODER) {
+ u8 master_select =
+ bdw_trans_port_sync_master_select(master_transcoder);
+
+ ctl2 |= PORT_SYNC_MODE_ENABLE |
+ PORT_SYNC_MODE_MASTER_SELECT(master_select);
+ }
- temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2);
+ }
+
+ ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
- temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
}
/*
@@ -1572,16 +1637,17 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
* bit.
*/
static void
-intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
+intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 temp;
+ u32 ctl;
- temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
- temp &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
+ ctl &= ~TRANS_DDI_FUNC_ENABLE;
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
}
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
@@ -1589,24 +1655,35 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 val;
+ u32 ctl;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(cpu_transcoder), 0);
- val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
- val &= ~TRANS_DDI_FUNC_ENABLE;
+ ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ ctl &= ~TRANS_DDI_FUNC_ENABLE;
+
+ if (IS_GEN_RANGE(dev_priv, 8, 10))
+ ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
+ TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
if (INTEL_GEN(dev_priv) >= 12) {
if (!intel_dp_mst_is_master_trans(crtc_state)) {
- val &= ~(TGL_TRANS_DDI_PORT_MASK |
+ ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
}
} else {
- val &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
+ ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Quirk Increase DDI disabled time\n");
/* Quirk time at 100ms for reliable operation */
msleep(100);
}
@@ -1667,7 +1744,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
- if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
@@ -1729,7 +1806,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
- if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) {
tmp = intel_de_read(dev_priv,
TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -1787,20 +1864,23 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
- DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n",
- encoder->base.base.id, encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "No pipe for [ENCODER:%d:%s] found\n",
+ encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
- encoder->base.base.id, encoder->base.name,
- *pipe_mask);
+ drm_dbg_kms(&dev_priv->drm,
+ "Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask);
*pipe_mask = BIT(ffs(*pipe_mask) - 1);
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
- encoder->base.base.id, encoder->base.name,
- *pipe_mask, mst_pipe_mask);
+ drm_dbg_kms(&dev_priv->drm,
+ "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask, mst_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
@@ -1810,9 +1890,9 @@ out:
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? "
- "(PHY_CTL %08x)\n", encoder->base.base.id,
- encoder->base.name, tmp);
+ drm_err(&dev_priv->drm,
+ "[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n",
+ encoder->base.base.id, encoder->base.name, tmp);
}
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
@@ -1834,7 +1914,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
return true;
}
-static inline enum intel_display_power_domain
+static enum intel_display_power_domain
intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
{
/* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
@@ -1893,11 +1973,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
intel_dsc_power_domain(crtc_state));
}
-void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
+void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = encoder->port;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -1978,7 +2058,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
/* Make sure that the requested I_boost is valid */
if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
- DRM_ERROR("Invalid I_boost value %u\n", iboost);
+ drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost);
return;
}
@@ -2037,7 +2117,8 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
- n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+ icl_get_mg_buf_trans(dev_priv, encoder->type,
+ intel_dp->link_rate, &n_entries);
} else if (IS_CANNONLAKE(dev_priv)) {
if (encoder->type == INTEL_OUTPUT_EDP)
cnl_get_buf_trans_edp(dev_priv, &n_entries);
@@ -2237,7 +2318,9 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
return;
if (level >= n_entries) {
- DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
+ drm_dbg_kms(&dev_priv->drm,
+ "DDI translation not found for level %d. Using %d instead.",
+ level, n_entries - 1);
level = n_entries - 1;
}
@@ -2350,21 +2433,28 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
}
static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- int link_clock,
- u32 level)
+ int link_clock, u32 level,
+ enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val;
- int ln;
+ int ln, rate = 0;
+
+ if (type != INTEL_OUTPUT_HDMI) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ rate = intel_dp->link_rate;
+ }
- n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
- ddi_translations = icl_mg_phy_ddi_translations;
+ ddi_translations = icl_get_mg_buf_trans(dev_priv, type, rate,
+ &n_entries);
/* The table does not have values for level 3 and level 9. */
if (level >= n_entries || level == 3 || level == 9) {
- DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 2);
+ drm_dbg_kms(&dev_priv->drm,
+ "DDI translation not found for level %d. Using %d instead.",
+ level, n_entries - 2);
level = n_entries - 2;
}
@@ -2483,7 +2573,8 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
if (intel_phy_is_combo(dev_priv, phy))
icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
else
- icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
+ icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level,
+ type);
}
static void
@@ -2550,8 +2641,9 @@ static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
-static u32 translate_signal_level(int signal_levels)
+static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int i;
for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
@@ -2559,8 +2651,9 @@ static u32 translate_signal_level(int signal_levels)
return i;
}
- WARN(1, "Unsupported voltage swing/pre-emphasis level: 0x%x\n",
- signal_levels);
+ drm_WARN(&i915->drm, 1,
+ "Unsupported voltage swing/pre-emphasis level: 0x%x\n",
+ signal_levels);
return 0;
}
@@ -2571,46 +2664,73 @@ static u32 intel_ddi_dp_level(struct intel_dp *intel_dp)
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
- return translate_signal_level(signal_levels);
+ return translate_signal_level(intel_dp, signal_levels);
}
-u32 bxt_signal_levels(struct intel_dp *intel_dp)
+static void
+tgl_set_signal_levels(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- struct intel_encoder *encoder = &dport->base;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int level = intel_ddi_dp_level(intel_dp);
- if (INTEL_GEN(dev_priv) >= 12)
- tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
- level, encoder->type);
- else if (INTEL_GEN(dev_priv) >= 11)
- icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
- level, encoder->type);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_ddi_vswing_sequence(encoder, level, encoder->type);
- else
- bxt_ddi_vswing_sequence(encoder, level, encoder->type);
+ tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
+ level, encoder->type);
+}
- return 0;
+static void
+icl_set_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int level = intel_ddi_dp_level(intel_dp);
+
+ icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
+ level, encoder->type);
}
-u32 ddi_signal_levels(struct intel_dp *intel_dp)
+static void
+cnl_set_signal_levels(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- struct intel_encoder *encoder = &dport->base;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int level = intel_ddi_dp_level(intel_dp);
+ cnl_ddi_vswing_sequence(encoder, level, encoder->type);
+}
+
+static void
+bxt_set_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int level = intel_ddi_dp_level(intel_dp);
+
+ bxt_ddi_vswing_sequence(encoder, level, encoder->type);
+}
+
+static void
+hsw_set_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int level = intel_ddi_dp_level(intel_dp);
+ enum port port = encoder->port;
+ u32 signal_levels;
+
+ signal_levels = DDI_BUF_TRANS_SELECT(level);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~DDI_BUF_EMP_MASK;
+ intel_dp->DP |= signal_levels;
+
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, encoder->type);
- return DDI_BUF_TRANS_SELECT(level);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
}
-static inline
-u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
- enum phy phy)
+static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
+ enum phy phy)
{
if (intel_phy_is_combo(dev_priv, phy)) {
return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
@@ -2698,8 +2818,9 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
continue;
- DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
- phy_name(phy));
+ drm_notice(&dev_priv->drm,
+ "PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ phy_name(phy));
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
}
@@ -2936,11 +3057,14 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
if (!crtc_state->fec_enable)
return;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
- DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to set FEC_READY in the sink\n");
}
static void intel_ddi_enable_fec(struct intel_encoder *encoder,
@@ -2960,7 +3084,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
- DRM_ERROR("Timed out waiting for FEC Enable Status\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for FEC Enable Status\n");
}
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -2980,7 +3105,8 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
}
-static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
+static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3048,13 +3174,13 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
* 7.a Configure Transcoder Clock Select to direct the Port clock to the
* Transcoder.
*/
- intel_ddi_enable_pipe_clock(crtc_state);
+ intel_ddi_enable_pipe_clock(encoder, crtc_state);
/*
* 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
* Transport Select
*/
- intel_ddi_config_transcoder_func(crtc_state);
+ intel_ddi_config_transcoder_func(encoder, crtc_state);
/*
* 7.c Configure & enable DP_TP_CTL with link training pattern 1
@@ -3120,7 +3246,8 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dsc_enable(encoder, crtc_state);
}
-static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
+static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3185,21 +3312,22 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_enable_fec(encoder, crtc_state);
if (!is_mst)
- intel_ddi_enable_pipe_clock(crtc_state);
+ intel_ddi_enable_pipe_clock(encoder, crtc_state);
intel_dsc_enable(encoder, crtc_state);
}
-static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (INTEL_GEN(dev_priv) >= 12)
- tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
else
- hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
/* MST will call a setting of MSA after an allocating of Virtual Channel
* from MST encoder pre_enable callback.
@@ -3211,7 +3339,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
}
}
-static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
+static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3244,14 +3373,15 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
- intel_ddi_enable_pipe_clock(crtc_state);
+ intel_ddi_enable_pipe_clock(encoder, crtc_state);
intel_dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
crtc_state, conn_state);
}
-static void intel_ddi_pre_enable(struct intel_encoder *encoder,
+static void intel_ddi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3280,12 +3410,14 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
+ intel_ddi_pre_enable_hdmi(state, encoder, crtc_state,
+ conn_state);
} else {
struct intel_lspcon *lspcon =
enc_to_intel_lspcon(encoder);
- intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ intel_ddi_pre_enable_dp(state, encoder, crtc_state,
+ conn_state);
if (lspcon->active) {
struct intel_digital_port *dig_port =
enc_to_dig_port(encoder);
@@ -3328,7 +3460,8 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
intel_wait_ddi_buf_idle(dev_priv, port);
}
-static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
+static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3339,6 +3472,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
INTEL_OUTPUT_DP_MST);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ intel_dp_set_infoframes(encoder, false, old_crtc_state, old_conn_state);
+
/*
* Power down sink before disabling the port, otherwise we end
* up getting interrupts from the sink on detecting link loss.
@@ -3384,7 +3519,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_ddi_clk_disable(encoder);
}
-static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
+static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3407,22 +3543,8 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
-static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
- return;
-
- DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
- transcoder_name(old_crtc_state->cpu_transcoder));
-
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
-}
-
-static void intel_ddi_post_disable(struct intel_encoder *encoder,
+static void intel_ddi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3436,9 +3558,6 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
intel_disable_pipe(old_crtc_state);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_disable_transcoder_port_sync(old_crtc_state);
-
intel_ddi_disable_transcoder_func(old_crtc_state);
intel_dsc_disable(old_crtc_state);
@@ -3463,11 +3582,11 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
*/
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
- intel_ddi_post_disable_hdmi(encoder,
- old_crtc_state, old_conn_state);
+ intel_ddi_post_disable_hdmi(state, encoder, old_crtc_state,
+ old_conn_state);
else
- intel_ddi_post_disable_dp(encoder,
- old_crtc_state, old_conn_state);
+ intel_ddi_post_disable_dp(state, encoder, old_crtc_state,
+ old_conn_state);
if (INTEL_GEN(dev_priv) >= 11)
icl_unmap_plls_to_ports(encoder);
@@ -3480,7 +3599,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
intel_tc_port_put_link(dig_port);
}
-void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
+void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3514,7 +3634,43 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
}
-static void intel_enable_ddi_dp(struct intel_encoder *encoder,
+static void trans_port_sync_stop_link_train(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ if (!crtc_state->sync_mode_slaves_mask)
+ return;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ struct intel_encoder *slave_encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ struct intel_crtc *slave_crtc = to_intel_crtc(conn_state->crtc);
+ const struct intel_crtc_state *slave_crtc_state;
+
+ if (!slave_crtc)
+ continue;
+
+ slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+
+ if (slave_crtc_state->master_transcoder !=
+ crtc_state->cpu_transcoder)
+ continue;
+
+ intel_dp_stop_link_train(enc_to_intel_dp(slave_encoder));
+ }
+
+ usleep_range(200, 400);
+
+ intel_dp_stop_link_train(enc_to_intel_dp(encoder));
+}
+
+static void intel_enable_ddi_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3526,13 +3682,14 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(crtc_state, conn_state);
- intel_psr_enable(intel_dp, crtc_state);
- intel_dp_vsc_enable(intel_dp, crtc_state, conn_state);
- intel_dp_hdr_metadata_enable(intel_dp, crtc_state, conn_state);
+ intel_psr_enable(intel_dp, crtc_state, conn_state);
+ intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
intel_audio_codec_enable(encoder, crtc_state, conn_state);
+
+ trans_port_sync_stop_link_train(state, encoder, crtc_state);
}
static i915_reg_t
@@ -3555,7 +3712,8 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
return CHICKEN_TRANS(trans[port]);
}
-static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
+static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3567,9 +3725,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink "
- "scrambling/TMDS bit clock ratio\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
/* Display WA #1143: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv)) {
@@ -3617,20 +3775,23 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
-static void intel_enable_ddi(struct intel_encoder *encoder,
+static void intel_enable_ddi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- WARN_ON(crtc_state->has_pch_encoder);
+ drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
+
+ intel_ddi_enable_transcoder_func(encoder, crtc_state);
intel_enable_pipe(crtc_state);
intel_crtc_vblank_on(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
+ intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
else
- intel_enable_ddi_dp(encoder, crtc_state, conn_state);
+ intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
/* Enable hdcp if it's desired */
if (conn_state->content_protection ==
@@ -3640,7 +3801,8 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
(u8)conn_state->hdcp_content_type);
}
-static void intel_disable_ddi_dp(struct intel_encoder *encoder,
+static void intel_disable_ddi_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3660,10 +3822,12 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
false);
}
-static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
+static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_connector *connector = old_conn_state->connector;
if (old_crtc_state->has_audio)
@@ -3672,23 +3836,28 @@ static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
}
-static void intel_disable_ddi(struct intel_encoder *encoder,
+static void intel_disable_ddi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_hdcp_disable(to_intel_connector(old_conn_state->connector));
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
- intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state);
+ intel_disable_ddi_hdmi(state, encoder, old_crtc_state,
+ old_conn_state);
else
- intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
+ intel_disable_ddi_dp(state, encoder, old_crtc_state,
+ old_conn_state);
}
-static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
+static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3696,21 +3865,24 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
intel_ddi_set_dp_msa(crtc_state, conn_state);
- intel_psr_update(intel_dp, crtc_state);
+ intel_psr_update(intel_dp, crtc_state, conn_state);
+ intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
- intel_panel_update_backlight(encoder, crtc_state, conn_state);
+ intel_panel_update_backlight(state, encoder, crtc_state, conn_state);
}
-static void intel_ddi_update_pipe(struct intel_encoder *encoder,
+static void intel_ddi_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+ intel_ddi_update_pipe_dp(state, encoder, crtc_state,
+ conn_state);
- intel_hdcp_update_pipe(encoder, crtc_state, conn_state);
+ intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state);
}
static void
@@ -3722,7 +3894,7 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
int required_lanes = crtc_state ? crtc_state->lane_count : 1;
- WARN_ON(crtc && crtc->active);
+ drm_WARN_ON(state->base.dev, crtc && crtc->active);
intel_tc_port_get_link(enc_to_dig_port(encoder),
required_lanes);
@@ -3739,7 +3911,8 @@ intel_ddi_update_complete(struct intel_atomic_state *state,
}
static void
-intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
+intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3813,6 +3986,74 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
udelay(600);
}
+static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
+ u8 dp_train_pat)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
+
+ if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+ temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+ else
+ temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ switch (dp_train_pat & train_pat_mask) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+ break;
+ case DP_TRAINING_PATTERN_4:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
+ break;
+ }
+
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
+
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+}
+
+static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
+ val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ val |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
+
+ /*
+ * Until TGL on PORT_A we can have only eDP in SST mode. There the only
+ * reason we need to set idle transmission mode is to work around a HW
+ * issue where we enable the pipe while not in idle link-training mode.
+ * In this case there is requirement to wait for a minimum number of
+ * idle patterns to be sent.
+ */
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
+ return;
+
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
+ DP_TP_STATUS_IDLE_DONE, 1))
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DP idle patterns\n");
+}
+
static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
@@ -3839,6 +4080,66 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
crtc_state->min_voltage_level = 2;
}
+static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ u32 master_select;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ u32 ctl2 = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+
+ if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
+ return INVALID_TRANSCODER;
+
+ master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2);
+ } else {
+ u32 ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0)
+ return INVALID_TRANSCODER;
+
+ master_select = REG_FIELD_GET(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, ctl);
+ }
+
+ if (master_select == 0)
+ return TRANSCODER_EDP;
+ else
+ return master_select - 1;
+}
+
+static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
+ enum transcoder cpu_transcoder;
+
+ crtc_state->master_transcoder =
+ bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder);
+
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t trans_wakeref;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+
+ if (!trans_wakeref)
+ continue;
+
+ if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ crtc_state->cpu_transcoder)
+ crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
+
+ intel_display_power_put(dev_priv, power_domain, trans_wakeref);
+ }
+
+ drm_WARN_ON(&dev_priv->drm,
+ crtc_state->master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
+}
+
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -3930,11 +4231,15 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->fec_enable =
intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
- encoder->base.base.id, encoder->base.name,
- pipe_config->fec_enable);
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] Fec status: %u\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_config->fec_enable);
}
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
break;
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
@@ -3946,6 +4251,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
intel_dp_get_m_n(intel_crtc, pipe_config);
+
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
break;
default:
break;
@@ -3969,8 +4277,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
* up by the BIOS, and thus we can't get the mode at module
* load.
*/
- DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
}
@@ -3996,6 +4305,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_infoframe(encoder, pipe_config,
HDMI_INFOFRAME_TYPE_DRM,
&pipe_config->infoframes.drm);
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ bdw_get_trans_port_sync_config(pipe_config);
+
+ intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
+ intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
}
static enum intel_output_type
@@ -4025,7 +4340,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
enum port port = encoder->port;
int ret;
- if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
@@ -4097,7 +4412,11 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
u8 transcoders = 0;
int i;
- if (INTEL_GEN(dev_priv) < 11)
+ /*
+ * We don't enable port sync on BDW due to missing w/as and
+ * due to not having adjusted the modeset sequence appropriately.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
return 0;
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
@@ -4129,12 +4448,13 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_connector *connector = conn_state->connector;
u8 port_sync_transcoders = 0;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] [CRTC:%d:%s]",
- encoder->base.base.id, encoder->base.name,
- crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]",
+ encoder->base.base.id, encoder->base.name,
+ crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
if (connector->has_tile)
port_sync_transcoders = intel_ddi_port_sync_transcoders(crtc_state,
@@ -4187,6 +4507,20 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
intel_dig_port->dp.prepare_link_retrain =
intel_ddi_prepare_link_retrain;
+ intel_dig_port->dp.set_link_train = intel_ddi_set_link_train;
+ intel_dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ intel_dig_port->dp.set_signal_levels = tgl_set_signal_levels;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ intel_dig_port->dp.set_signal_levels = icl_set_signal_levels;
+ else if (IS_CANNONLAKE(dev_priv))
+ intel_dig_port->dp.set_signal_levels = cnl_set_signal_levels;
+ else if (IS_GEN9_LP(dev_priv))
+ intel_dig_port->dp.set_signal_levels = bxt_set_signal_levels;
+ else
+ intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels;
+
if (INTEL_GEN(dev_priv) < 12) {
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
@@ -4278,7 +4612,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- DRM_ERROR("Failed to read TMDS config: %d\n", ret);
+ drm_err(&dev_priv->drm, "Failed to read TMDS config: %d\n",
+ ret);
return 0;
}
@@ -4302,15 +4637,17 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
static enum intel_hotplug_state
intel_ddi_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received)
+ struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ bool is_tc = intel_phy_is_tc(i915, phy);
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
- state = intel_encoder_hotplug(encoder, connector, irq_received);
+ state = intel_encoder_hotplug(encoder, connector);
drm_modeset_acquire_init(&ctx, 0);
@@ -4348,14 +4685,45 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
* valid EDID. To solve this schedule another detection cycle if this
* time around we didn't detect any change in the sink's connection
* status.
+ *
+ * Type-c connectors which get their HPD signal deasserted then
+ * reasserted, without unplugging/replugging the sink from the
+ * connector, introduce a delay until the AUX channel communication
+ * becomes functional. Retry the detection for 5 seconds on type-c
+ * connectors to account for this delay.
*/
- if (state == INTEL_HOTPLUG_UNCHANGED && irq_received &&
+ if (state == INTEL_HOTPLUG_UNCHANGED &&
+ connector->hotplug_retries < (is_tc ? 5 : 1) &&
!dig_port->dp.is_mst)
state = INTEL_HOTPLUG_RETRY;
return state;
}
+static bool lpt_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, SDEISR) & bit;
+}
+
+static bool hsw_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, DEISR) & bit;
+}
+
+static bool bdw_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
+}
+
static struct intel_connector *
intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
{
@@ -4424,7 +4792,8 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
* so we use the proper lane count for our calculations.
*/
if (intel_ddi_a_force_4_lanes(intel_dport)) {
- DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Forcing DDI_A_4_LANES for port A\n");
intel_dport->saved_port_bits |= DDI_A_4_LANES;
max_lanes = 4;
}
@@ -4452,12 +4821,14 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
init_dp = true;
init_lspcon = true;
init_hdmi = false;
- DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n",
+ port_name(port));
}
if (!init_dp && !init_hdmi) {
- DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
+ port_name(port));
return;
}
@@ -4536,17 +4907,36 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
if (init_lspcon) {
if (lspcon_init(intel_dig_port))
/* TODO: handle hdmi info frame part */
- DRM_DEBUG_KMS("LSPCON init success on port %c\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "LSPCON init success on port %c\n",
+ port_name(port));
else
/*
* LSPCON init faied, but DP init was success, so
* lets try to drive as DP++ port.
*/
- DRM_ERROR("LSPCON init failed on port %c\n",
+ drm_err(&dev_priv->drm,
+ "LSPCON init failed on port %c\n",
port_name(port));
}
+ if (INTEL_GEN(dev_priv) >= 11) {
+ if (intel_phy_is_tc(dev_priv, phy))
+ intel_dig_port->connected = intel_tc_port_connected;
+ else
+ intel_dig_port->connected = lpt_digital_port_connected;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ if (port == PORT_A || IS_GEN9_LP(dev_priv))
+ intel_dig_port->connected = bdw_digital_port_connected;
+ else
+ intel_dig_port->connected = lpt_digital_port_connected;
+ } else {
+ if (port == PORT_A)
+ intel_dig_port->connected = hsw_digital_port_connected;
+ else
+ intel_dig_port->connected = lpt_digital_port_connected;
+ }
+
intel_infoframe_init(intel_dig_port);
return;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 55fd72b901fe..fbdf8ddde486 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -17,16 +17,19 @@ struct intel_dp;
struct intel_dpll_hw_state;
struct intel_encoder;
-void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 346846609f45..9ea1a397d1b5 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -238,9 +238,9 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq);
}
-static inline u32 /* units of 100MHz */
-intel_fdi_link_freq(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *pipe_config)
+/* units of 100MHz */
+static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
{
if (HAS_DDI(dev_priv))
return pipe_config->port_clock; /* SPLL */
@@ -525,7 +525,7 @@ skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
}
-/* Wa_2006604312:icl */
+/* Wa_2006604312:icl,ehl */
static void
icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
@@ -544,17 +544,23 @@ needs_modeset(const struct intel_crtc_state *state)
return drm_atomic_crtc_needs_modeset(&state->uapi);
}
-bool
-is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+static bool
+is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
- return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
- crtc_state->sync_mode_slaves_mask);
+ return crtc_state->master_transcoder != INVALID_TRANSCODER;
}
static bool
-is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
+is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->master_transcoder != INVALID_TRANSCODER;
+ return crtc_state->sync_mode_slaves_mask != 0;
+}
+
+bool
+is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+{
+ return is_trans_port_sync_master(crtc_state) ||
+ is_trans_port_sync_slave(crtc_state);
}
/*
@@ -620,45 +626,43 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot / 5;
}
-#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
-
/*
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
+static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
const struct intel_limit *limit,
const struct dpll *clock)
{
- if (clock->n < limit->n.min || limit->n.max < clock->n)
- INTELPllInvalid("n out of range\n");
- if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- INTELPllInvalid("p1 out of range\n");
- if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
- INTELPllInvalid("m2 out of range\n");
- if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
- INTELPllInvalid("m1 out of range\n");
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ return false;
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ return false;
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ return false;
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ return false;
if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
if (clock->m1 <= clock->m2)
- INTELPllInvalid("m1 <= m2\n");
+ return false;
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
!IS_GEN9_LP(dev_priv)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
- INTELPllInvalid("p out of range\n");
+ return false;
if (clock->m < limit->m.min || limit->m.max < clock->m)
- INTELPllInvalid("m out of range\n");
+ return false;
}
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- INTELPllInvalid("vco out of range\n");
+ return false;
/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
* connector, etc., rather than just a single range.
*/
if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- INTELPllInvalid("dot out of range\n");
+ return false;
return true;
}
@@ -725,7 +729,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(to_i915(dev),
limit,
&clock))
continue;
@@ -781,7 +785,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(to_i915(dev),
limit,
&clock))
continue;
@@ -842,7 +846,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(to_i915(dev),
limit,
&clock))
continue;
@@ -939,7 +943,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(to_i915(dev),
limit,
&clock))
continue;
@@ -1008,7 +1012,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
chv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
+ if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
continue;
if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
@@ -1969,16 +1973,16 @@ static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
{
- WARN_ON(!is_ccs_modifier(fb->modifier) ||
- (main_plane && main_plane >= fb->format->num_planes / 2));
+ drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
+ (main_plane && main_plane >= fb->format->num_planes / 2));
return fb->format->num_planes / 2 + main_plane;
}
static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
{
- WARN_ON(!is_ccs_modifier(fb->modifier) ||
- ccs_plane < fb->format->num_planes / 2);
+ drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
+ ccs_plane < fb->format->num_planes / 2);
return ccs_plane - fb->format->num_planes / 2;
}
@@ -2910,6 +2914,7 @@ intel_fb_plane_get_subsampling(int *hsub, int *vsub,
static int
intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
{
+ struct drm_i915_private *i915 = to_i915(fb->dev);
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
int main_plane;
int hsub, vsub;
@@ -2938,7 +2943,8 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
* x/y offsets must match between CCS and the main surface.
*/
if (main_x != ccs_x || main_y != ccs_y) {
- DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+ drm_dbg_kms(&i915->drm,
+ "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
main_x, main_y,
ccs_x, ccs_y,
intel_fb->normal[main_plane].x,
@@ -2986,7 +2992,7 @@ setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
fb->modifier != I915_FORMAT_MOD_Yf_TILED)
return 0;
- if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
+ if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
return 0;
rot_info->plane[plane] = *plane_info;
@@ -3336,6 +3342,8 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
return DRM_FORMAT_RGB565;
case PLANE_CTL_FORMAT_NV12:
return DRM_FORMAT_NV12;
+ case PLANE_CTL_FORMAT_XYUV:
+ return DRM_FORMAT_XYUV8888;
case PLANE_CTL_FORMAT_P010:
return DRM_FORMAT_P010;
case PLANE_CTL_FORMAT_P012:
@@ -4580,6 +4588,8 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_ARGB16161616F:
return PLANE_CTL_FORMAT_XRGB_16161616F;
+ case DRM_FORMAT_XYUV8888:
+ return PLANE_CTL_FORMAT_XYUV;
case DRM_FORMAT_YUYV:
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
case DRM_FORMAT_YVYU:
@@ -4998,37 +5008,6 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
-static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 trans_ddi_func_ctl2_val;
- u8 master_select;
-
- /*
- * Configure the master select and enable Transcoder Port Sync for
- * Slave CRTCs transcoder.
- */
- if (crtc_state->master_transcoder == INVALID_TRANSCODER)
- return;
-
- if (crtc_state->master_transcoder == TRANSCODER_EDP)
- master_select = 0;
- else
- master_select = crtc_state->master_transcoder + 1;
-
- /* Set the master select bits for Tranascoder Port Sync */
- trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
- PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
- PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
- /* Enable Transcoder Port Sync */
- trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
-
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
- trans_ddi_func_ctl2_val);
-}
-
static void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -6110,30 +6089,26 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
-/**
- * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
- *
- * @state: crtc's scaler state
- *
- * Return
- * 0 - scaler_usage updated successfully
- * error - requested scaling cannot be supported or other error condition
- */
-int skl_update_scaler_crtc(struct intel_crtc_state *state)
+static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
{
- const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
- bool need_scaler = false;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int width, height;
- if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- state->pch_pfit.enabled)
- need_scaler = true;
+ if (crtc_state->pch_pfit.enabled) {
+ width = drm_rect_width(&crtc_state->pch_pfit.dst);
+ height = drm_rect_height(&crtc_state->pch_pfit.dst);
+ } else {
+ width = adjusted_mode->crtc_hdisplay;
+ height = adjusted_mode->crtc_vdisplay;
+ }
- return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id,
- state->pipe_src_w, state->pipe_src_h,
- adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_vdisplay, NULL, 0,
- need_scaler);
+ return skl_update_scaler(crtc_state, !crtc_state->hw.active,
+ SKL_CRTC_INDEX,
+ &crtc_state->scaler_state.scaler_id,
+ crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ width, height, NULL, 0,
+ crtc_state->pch_pfit.enabled);
}
/**
@@ -6200,6 +6175,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
@@ -6241,70 +6217,80 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
+ struct drm_rect src = {
+ .x2 = crtc_state->pipe_src_w << 16,
+ .y2 = crtc_state->pipe_src_h << 16,
+ };
+ const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
+ u16 uv_rgb_hphase, uv_rgb_vphase;
+ enum pipe pipe = crtc->pipe;
+ int width = drm_rect_width(dst);
+ int height = drm_rect_height(dst);
+ int x = dst->x1;
+ int y = dst->y1;
+ int hscale, vscale;
+ unsigned long irqflags;
+ int id;
- if (crtc_state->pch_pfit.enabled) {
- u16 uv_rgb_hphase, uv_rgb_vphase;
- int pfit_w, pfit_h, hscale, vscale;
- unsigned long irqflags;
- int id;
-
- if (drm_WARN_ON(&dev_priv->drm,
- crtc_state->scaler_state.scaler_id < 0))
- return;
+ if (!crtc_state->pch_pfit.enabled)
+ return;
- pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
- pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
+ if (drm_WARN_ON(&dev_priv->drm,
+ crtc_state->scaler_state.scaler_id < 0))
+ return;
- hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
- vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
+ hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
+ vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
- uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
- uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+ uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
- id = scaler_state->scaler_id;
+ id = scaler_state->scaler_id;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
- PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
- intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
- crtc_state->pch_pfit.pos);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
- crtc_state->pch_pfit.size);
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+ PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+ x << 16 | y);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+ width << 16 | height);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
- }
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
enum pipe pipe = crtc->pipe;
+ int width = drm_rect_width(dst);
+ int height = drm_rect_height(dst);
+ int x = dst->x1;
+ int y = dst->y1;
- if (crtc_state->pch_pfit.enabled) {
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
- intel_de_write(dev_priv, PF_CTL(pipe),
- PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
- else
- intel_de_write(dev_priv, PF_CTL(pipe),
- PF_ENABLE | PF_FILTER_MED_3x3);
- intel_de_write(dev_priv, PF_WIN_POS(pipe),
- crtc_state->pch_pfit.pos);
- intel_de_write(dev_priv, PF_WIN_SZ(pipe),
- crtc_state->pch_pfit.size);
- }
+ if (!crtc_state->pch_pfit.enabled)
+ return;
+
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
+ intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
+ PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
+ else
+ intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
+ PF_FILTER_MED_3x3);
+ intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
+ intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
}
void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
@@ -6463,8 +6449,8 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- /* Wa_2006604312:icl */
- if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
+ /* Wa_2006604312:icl,ehl */
+ if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
return true;
return false;
@@ -6534,7 +6520,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
needs_nv12_wa(new_crtc_state))
skl_wa_827(dev_priv, pipe, true);
- /* Wa_2006604312:icl */
+ /* Wa_2006604312:icl,ehl */
if (!needs_scalerclk_wa(old_crtc_state) &&
needs_scalerclk_wa(new_crtc_state))
icl_wa_scalerclkgating(dev_priv, pipe, true);
@@ -6646,7 +6632,7 @@ intel_connector_primary_encoder(struct intel_connector *connector)
return &dp_to_dig_port(connector->mst_port)->base;
encoder = intel_attached_encoder(connector);
- WARN_ON(!encoder);
+ drm_WARN_ON(connector->base.dev, !encoder);
return encoder;
}
@@ -6720,7 +6706,8 @@ static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
continue;
if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder, crtc_state, conn_state);
+ encoder->pre_pll_enable(state, encoder,
+ crtc_state, conn_state);
}
}
@@ -6741,7 +6728,8 @@ static void intel_encoders_pre_enable(struct intel_atomic_state *state,
continue;
if (encoder->pre_enable)
- encoder->pre_enable(encoder, crtc_state, conn_state);
+ encoder->pre_enable(state, encoder,
+ crtc_state, conn_state);
}
}
@@ -6762,7 +6750,8 @@ static void intel_encoders_enable(struct intel_atomic_state *state,
continue;
if (encoder->enable)
- encoder->enable(encoder, crtc_state, conn_state);
+ encoder->enable(state, encoder,
+ crtc_state, conn_state);
intel_opregion_notify_encoder(encoder, true);
}
}
@@ -6785,7 +6774,8 @@ static void intel_encoders_disable(struct intel_atomic_state *state,
intel_opregion_notify_encoder(encoder, false);
if (encoder->disable)
- encoder->disable(encoder, old_crtc_state, old_conn_state);
+ encoder->disable(state, encoder,
+ old_crtc_state, old_conn_state);
}
}
@@ -6806,7 +6796,8 @@ static void intel_encoders_post_disable(struct intel_atomic_state *state,
continue;
if (encoder->post_disable)
- encoder->post_disable(encoder, old_crtc_state, old_conn_state);
+ encoder->post_disable(state, encoder,
+ old_crtc_state, old_conn_state);
}
}
@@ -6827,7 +6818,8 @@ static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
continue;
if (encoder->post_pll_disable)
- encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
+ encoder->post_pll_disable(state, encoder,
+ old_crtc_state, old_conn_state);
}
}
@@ -6848,7 +6840,8 @@ static void intel_encoders_update_pipe(struct intel_atomic_state *state,
continue;
if (encoder->update_pipe)
- encoder->update_pipe(encoder, crtc_state, conn_state);
+ encoder->update_pipe(state, encoder,
+ crtc_state, conn_state);
}
}
@@ -7037,9 +7030,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_set_pipe_timings(new_crtc_state);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_enable_trans_port_sync(new_crtc_state);
-
intel_set_pipe_src_size(new_crtc_state);
if (cpu_transcoder != TRANSCODER_EDP &&
@@ -7087,9 +7077,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_transcoder_func(new_crtc_state);
-
if (dev_priv->display.initial_watermarks)
dev_priv->display.initial_watermarks(state, crtc);
@@ -7120,11 +7107,12 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (old_crtc_state->pch_pfit.enabled) {
- intel_de_write(dev_priv, PF_CTL(pipe), 0);
- intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
- intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
- }
+ if (!old_crtc_state->pch_pfit.enabled)
+ return;
+
+ intel_de_write(dev_priv, PF_CTL(pipe), 0);
+ intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
+ intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
}
static void ilk_crtc_disable(struct intel_atomic_state *state,
@@ -7312,7 +7300,17 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
}
}
- switch (dig_port->aux_ch) {
+ return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
+}
+
+/*
+ * Converts aux_ch to power_domain without caring about TBT ports for that use
+ * intel_aux_power_domain()
+ */
+enum intel_display_power_domain
+intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
+{
+ switch (aux_ch) {
case AUX_CH_A:
return POWER_DOMAIN_AUX_A;
case AUX_CH_B:
@@ -7328,7 +7326,7 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
case AUX_CH_G:
return POWER_DOMAIN_AUX_G;
default:
- MISSING_CASE(dig_port->aux_ch);
+ MISSING_CASE(aux_ch);
return POWER_DOMAIN_AUX_A;
}
}
@@ -7942,39 +7940,36 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
-static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
+static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
{
- u32 pixel_rate;
-
- pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
+ u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock;
+ unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
/*
* We only use IF-ID interlacing. If we ever use
* PF-ID we'll need to adjust the pixel_rate here.
*/
- if (pipe_config->pch_pfit.enabled) {
- u64 pipe_w, pipe_h, pfit_w, pfit_h;
- u32 pfit_size = pipe_config->pch_pfit.size;
+ if (!crtc_state->pch_pfit.enabled)
+ return pixel_rate;
- pipe_w = pipe_config->pipe_src_w;
- pipe_h = pipe_config->pipe_src_h;
+ pipe_w = crtc_state->pipe_src_w;
+ pipe_h = crtc_state->pipe_src_h;
- pfit_w = (pfit_size >> 16) & 0xFFFF;
- pfit_h = pfit_size & 0xFFFF;
- if (pipe_w < pfit_w)
- pipe_w = pfit_w;
- if (pipe_h < pfit_h)
- pipe_h = pfit_h;
+ pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
+ pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
- if (WARN_ON(!pfit_w || !pfit_h))
- return pixel_rate;
+ if (pipe_w < pfit_w)
+ pipe_w = pfit_w;
+ if (pipe_h < pfit_h)
+ pipe_h = pfit_h;
- pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
- pfit_w * pfit_h);
- }
+ if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
+ !pfit_w || !pfit_h))
+ return pixel_rate;
- return pixel_rate;
+ return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
+ pfit_w * pfit_h);
}
static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
@@ -8143,7 +8138,7 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
}
}
-static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_modparams.panel_use_ssc >= 0)
return i915_modparams.panel_use_ssc != 0;
@@ -8891,7 +8886,6 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
- mode->hsync = drm_mode_hsync(mode);
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_name(mode);
}
@@ -9168,9 +9162,9 @@ static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}
-static void i9xx_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
@@ -9190,9 +9184,9 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
return;
}
- pipe_config->gmch_pfit.control = tmp;
- pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv,
- PFIT_PGM_RATIOS);
+ crtc_state->gmch_pfit.control = tmp;
+ crtc_state->gmch_pfit.pgm_ratios =
+ intel_de_read(dev_priv, PFIT_PGM_RATIOS);
}
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@@ -9398,7 +9392,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
- pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
@@ -9443,7 +9436,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- i9xx_get_pfit_config(crtc, pipe_config);
+ i9xx_get_pfit_config(pipe_config);
if (INTEL_GEN(dev_priv) >= 4) {
/* No way to read it out on pipes B and C */
@@ -10413,37 +10406,47 @@ static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
&pipe_config->fdi_m_n, NULL);
}
-static void skl_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
+ u32 pos, u32 size)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
- u32 ps_ctrl = 0;
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ pos >> 16, pos & 0xffff,
+ size >> 16, size & 0xffff);
+}
+
+static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
int id = -1;
int i;
/* find scaler attached to this pipe */
for (i = 0; i < crtc->num_scalers; i++) {
- ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
- if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
- id = i;
- pipe_config->pch_pfit.enabled = true;
- pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
- SKL_PS_WIN_POS(crtc->pipe, i));
- pipe_config->pch_pfit.size = intel_de_read(dev_priv,
- SKL_PS_WIN_SZ(crtc->pipe, i));
- scaler_state->scalers[i].in_use = true;
- break;
- }
+ u32 ctl, pos, size;
+
+ ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
+ if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
+ continue;
+
+ id = i;
+ crtc_state->pch_pfit.enabled = true;
+
+ pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
+ size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
+
+ ilk_get_pfit_pos_size(crtc_state, pos, size);
+
+ scaler_state->scalers[i].in_use = true;
+ break;
}
scaler_state->scaler_id = id;
- if (id >= 0) {
+ if (id >= 0)
scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
- } else {
+ else
scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
- }
}
static void
@@ -10579,30 +10582,30 @@ error:
kfree(intel_fb);
}
-static void ilk_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 tmp;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 ctl, pos, size;
- tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
-
- if (tmp & PF_ENABLE) {
- pipe_config->pch_pfit.enabled = true;
- pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
- PF_WIN_POS(crtc->pipe));
- pipe_config->pch_pfit.size = intel_de_read(dev_priv,
- PF_WIN_SZ(crtc->pipe));
-
- /* We currently do not free assignements of panel fitters on
- * ivb/hsw (since we don't use the higher upscaling modes which
- * differentiates them) so just WARN about this case for now. */
- if (IS_GEN(dev_priv, 7)) {
- drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) !=
- PF_PIPE_SEL_IVB(crtc->pipe));
- }
- }
+ ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
+ if ((ctl & PF_ENABLE) == 0)
+ return;
+
+ crtc_state->pch_pfit.enabled = true;
+
+ pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
+ size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
+
+ ilk_get_pfit_pos_size(crtc_state, pos, size);
+
+ /*
+ * We currently do not free assignements of panel fitters on
+ * ivb/hsw (since we don't use the higher upscaling modes which
+ * differentiates them) so just WARN about this case for now.
+ */
+ drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
+ (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
}
static bool ilk_get_pipe_config(struct intel_crtc *crtc,
@@ -10622,7 +10625,6 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
- pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
@@ -10714,7 +10716,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- ilk_get_pfit_config(crtc, pipe_config);
+ ilk_get_pfit_config(pipe_config);
ret = true;
@@ -10891,7 +10893,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
panel_transcoder_mask |=
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
- if (HAS_TRANSCODER_EDP(dev_priv))
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP))
panel_transcoder_mask |= BIT(TRANSCODER_EDP);
/*
@@ -11085,61 +11087,6 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
}
}
-static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
-{
- u32 trans_port_sync, master_select;
-
- trans_port_sync = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(cpu_transcoder));
-
- if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
- return INVALID_TRANSCODER;
-
- master_select = trans_port_sync &
- PORT_SYNC_MODE_MASTER_SELECT_MASK;
- if (master_select == 0)
- return TRANSCODER_EDP;
- else
- return master_select - 1;
-}
-
-static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 transcoders;
- enum transcoder cpu_transcoder;
-
- crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
- crtc_state->cpu_transcoder);
-
- transcoders = BIT(TRANSCODER_A) |
- BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C) |
- BIT(TRANSCODER_D);
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- enum intel_display_power_domain power_domain;
- intel_wakeref_t trans_wakeref;
-
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
-
- if (!trans_wakeref)
- continue;
-
- if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
- crtc_state->cpu_transcoder)
- crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
-
- intel_display_power_put(dev_priv, power_domain, trans_wakeref);
- }
-
- drm_WARN_ON(&dev_priv->drm,
- crtc_state->master_transcoder != INVALID_TRANSCODER &&
- crtc_state->sync_mode_slaves_mask);
-}
-
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -11243,9 +11190,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
power_domain_mask |= BIT_ULL(power_domain);
if (INTEL_GEN(dev_priv) >= 9)
- skl_get_pfit_config(crtc, pipe_config);
+ skl_get_pfit_config(pipe_config);
else
- ilk_get_pfit_config(crtc, pipe_config);
+ ilk_get_pfit_config(pipe_config);
}
if (hsw_crtc_supports_ips(crtc)) {
@@ -11271,10 +11218,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
- if (INTEL_GEN(dev_priv) >= 11 &&
- !transcoder_is_dsi(pipe_config->cpu_transcoder))
- icl_get_trans_port_sync_config(pipe_config);
-
out:
for_each_power_domain(power_domain, power_domain_mask)
intel_display_power_put(dev_priv,
@@ -12377,10 +12320,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled) {
- plane_state->uapi.visible = visible = false;
- crtc_state->active_planes &= ~BIT(plane->id);
- crtc_state->data_rate[plane->id] = 0;
- crtc_state->min_cdclk[plane->id] = 0;
+ intel_plane_set_invisible(crtc_state, plane_state);
+ visible = false;
}
if (!was_visible && !visible)
@@ -12510,8 +12451,10 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
if (IS_ERR(linked_plane_state))
return PTR_ERR(linked_plane_state);
- WARN_ON(linked_plane_state->planar_linked_plane != plane);
- WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
+ drm_WARN_ON(state->base.dev,
+ linked_plane_state->planar_linked_plane != plane);
+ drm_WARN_ON(state->base.dev,
+ linked_plane_state->planar_slave == plane_state->planar_slave);
}
return 0;
@@ -12886,19 +12829,20 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
return 0;
}
-static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
+static void intel_dump_crtc_timings(struct drm_i915_private *i915,
+ const struct drm_display_mode *mode)
{
- DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
- "type: 0x%x flags: 0x%x\n",
- mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hsync_start,
- mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vsync_start,
- mode->crtc_vsync_end, mode->crtc_vtotal,
- mode->type, mode->flags);
+ drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
+ "type: 0x%x flags: 0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hsync_start,
+ mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vsync_start,
+ mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->type, mode->flags);
}
-static inline void
+static void
intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
const char *id, unsigned int lane_count,
const struct intel_link_m_n *m_n)
@@ -12922,6 +12866,16 @@ intel_dump_infoframe(struct drm_i915_private *dev_priv,
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
}
+static void
+intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
+ const struct drm_dp_vsc_sdp *vsc)
+{
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
+}
+
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
@@ -13042,6 +12996,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
+ drm_dbg_kms(&dev_priv->drm,
+ "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
+ transcoder_name(pipe_config->master_transcoder),
+ pipe_config->sync_mode_slaves_mask);
+
if (pipe_config->has_pch_encoder)
intel_dump_m_n_config(pipe_config, "fdi",
pipe_config->fdi_lanes,
@@ -13074,12 +13033,21 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(DP_SDP_VSC))
+ intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.mode);
drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
- intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
+ intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
drm_dbg_kms(&dev_priv->drm,
"port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
pipe_config->port_clock,
@@ -13104,9 +13072,8 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
pipe_config->gmch_pfit.lvds_border_bits);
else
drm_dbg_kms(&dev_priv->drm,
- "pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
- pipe_config->pch_pfit.pos,
- pipe_config->pch_pfit.size,
+ "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
+ DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
enableddisabled(pipe_config->pch_pfit.enabled),
yesno(pipe_config->pch_pfit.force_thru));
@@ -13228,7 +13195,8 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
{
crtc_state->uapi.enable = crtc_state->hw.enable;
crtc_state->uapi.active = crtc_state->hw.active;
- WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
+ drm_WARN_ON(crtc_state->uapi.crtc->dev,
+ drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
@@ -13521,6 +13489,13 @@ intel_compare_infoframe(const union hdmi_infoframe *a,
return memcmp(a, b, sizeof(*a)) == 0;
}
+static bool
+intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
+ const struct drm_dp_vsc_sdp *b)
+{
+ return memcmp(a, b, sizeof(*a)) == 0;
+}
+
static void
pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
bool fastset, const char *name,
@@ -13546,6 +13521,31 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
}
}
+static void
+pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
+ bool fastset, const char *name,
+ const struct drm_dp_vsc_sdp *a,
+ const struct drm_dp_vsc_sdp *b)
+{
+ if (fastset) {
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "fastset mismatch in %s dp sdp\n", name);
+ drm_dbg_kms(&dev_priv->drm, "expected:\n");
+ drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
+ drm_dbg_kms(&dev_priv->drm, "found:\n");
+ drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
+ } else {
+ drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
+ drm_err(&dev_priv->drm, "expected:\n");
+ drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
+ drm_err(&dev_priv->drm, "found:\n");
+ drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
+ }
+}
+
static void __printf(4, 5)
pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
const char *name, const char *format, ...)
@@ -13747,6 +13747,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
+ if (!current_config->has_psr && !pipe_config->has_psr && \
+ !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
+ &pipe_config->infoframes.name)) { \
+ pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
+ &current_config->infoframes.name, \
+ &pipe_config->infoframes.name); \
+ ret = false; \
+ } \
+} while (0)
+
#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
if (current_config->name1 != pipe_config->name1) { \
pipe_config_mismatch(fastset, crtc, __stringify(name1), \
@@ -13847,8 +13858,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
- PIPE_CONF_CHECK_X(pch_pfit.pos);
- PIPE_CONF_CHECK_X(pch_pfit.size);
+ PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
+ PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
+ PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
+ PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
}
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
@@ -13922,6 +13935,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(spd);
PIPE_CONF_CHECK_INFOFRAME(hdmi);
PIPE_CONF_CHECK_INFOFRAME(drm);
+ PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
@@ -14010,7 +14024,9 @@ static void verify_wm_state(struct intel_crtc *crtc,
/* Watermarks */
for (level = 0; level <= max_level; level++) {
if (skl_wm_level_equals(&hw_plane_wm->wm[level],
- &sw_plane_wm->wm[level]))
+ &sw_plane_wm->wm[level]) ||
+ (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
+ &sw_plane_wm->sagv_wm0)))
continue;
drm_err(&dev_priv->drm,
@@ -14065,7 +14081,9 @@ static void verify_wm_state(struct intel_crtc *crtc,
/* Watermarks */
for (level = 0; level <= max_level; level++) {
if (skl_wm_level_equals(&hw_plane_wm->wm[level],
- &sw_plane_wm->wm[level]))
+ &sw_plane_wm->wm[level]) ||
+ (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
+ &sw_plane_wm->sagv_wm0)))
continue;
drm_err(&dev_priv->drm,
@@ -14999,11 +15017,13 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
}
static void commit_pipe_config(struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
bool modeset = needs_modeset(new_crtc_state);
/*
@@ -15029,22 +15049,35 @@ static void commit_pipe_config(struct intel_atomic_state *state,
dev_priv->display.atomic_update_watermarks(state, crtc);
}
-static void intel_update_crtc(struct intel_crtc *crtc,
- struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
+static void intel_enable_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- bool modeset = needs_modeset(new_crtc_state);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
- if (modeset) {
- intel_crtc_update_active_timings(new_crtc_state);
+ if (!needs_modeset(new_crtc_state))
+ return;
- dev_priv->display.crtc_enable(state, crtc);
+ intel_crtc_update_active_timings(new_crtc_state);
- /* vblanks work again, re-enable pipe CRC. */
- intel_crtc_enable_pipe_crc(crtc);
- } else {
+ dev_priv->display.crtc_enable(state, crtc);
+
+ /* vblanks work again, re-enable pipe CRC. */
+ intel_crtc_enable_pipe_crc(crtc);
+}
+
+static void intel_update_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool modeset = needs_modeset(new_crtc_state);
+
+ if (!modeset) {
if (new_crtc_state->preload_luts &&
(new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
@@ -15064,7 +15097,7 @@ static void intel_update_crtc(struct intel_crtc *crtc,
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
- commit_pipe_config(state, old_crtc_state, new_crtc_state);
+ commit_pipe_config(state, crtc);
if (INTEL_GEN(dev_priv) >= 9)
skl_update_planes_on_crtc(state, crtc);
@@ -15084,18 +15117,6 @@ static void intel_update_crtc(struct intel_crtc *crtc,
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
-static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
- enum transcoder slave_transcoder;
-
- drm_WARN_ON(&dev_priv->drm,
- !is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
-
- slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
- return intel_get_crtc_for_pipe(dev_priv,
- (enum pipe)slave_transcoder);
-}
static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
struct intel_crtc_state *old_crtc_state,
@@ -15171,129 +15192,19 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
static void intel_commit_modeset_enables(struct intel_atomic_state *state)
{
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
int i;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (!new_crtc_state->hw.active)
continue;
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
+ intel_enable_crtc(state, crtc);
+ intel_update_crtc(state, crtc);
}
}
-static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
- struct intel_atomic_state *state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-
- intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display.crtc_enable(state, crtc);
- intel_crtc_enable_pipe_crc(crtc);
-}
-
-static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
- struct intel_atomic_state *state)
-{
- struct drm_connector *uninitialized_var(conn);
- struct drm_connector_state *conn_state;
- struct intel_dp *intel_dp;
- int i;
-
- for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
- if (conn_state->crtc == &crtc->base)
- break;
- }
- intel_dp = intel_attached_dp(to_intel_connector(conn));
- intel_dp_stop_link_train(intel_dp);
-}
-
-/*
- * TODO: This is only called from port sync and it is identical to what will be
- * executed again in intel_update_crtc() over port sync pipes
- */
-static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
- struct intel_atomic_state *state)
-{
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
-
- if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
- intel_fbc_disable(crtc);
- else
- intel_fbc_enable(state, crtc);
-
- /* Perform vblank evasion around commit operation */
- intel_pipe_update_start(new_crtc_state);
- commit_pipe_config(state, old_crtc_state, new_crtc_state);
- skl_update_planes_on_crtc(state, crtc);
- intel_pipe_update_end(new_crtc_state);
-
- /*
- * We usually enable FIFO underrun interrupts as part of the
- * CRTC enable sequence during modesets. But when we inherit a
- * valid pipe configuration from the BIOS we need to take care
- * of enabling them on the CRTC's first fastset.
- */
- if (new_crtc_state->update_pipe && !modeset &&
- old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
- intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
-}
-
-static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
- struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
- struct intel_crtc_state *new_slave_crtc_state =
- intel_atomic_get_new_crtc_state(state, slave_crtc);
- struct intel_crtc_state *old_slave_crtc_state =
- intel_atomic_get_old_crtc_state(state, slave_crtc);
-
- drm_WARN_ON(&i915->drm, !slave_crtc || !new_slave_crtc_state ||
- !old_slave_crtc_state);
-
- drm_dbg_kms(&i915->drm,
- "Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
- crtc->base.base.id, crtc->base.name,
- slave_crtc->base.base.id, slave_crtc->base.name);
-
- /* Enable seq for slave with with DP_TP_CTL left Idle until the
- * master is ready
- */
- intel_crtc_enable_trans_port_sync(slave_crtc,
- state,
- new_slave_crtc_state);
-
- /* Enable seq for master with with DP_TP_CTL left Idle */
- intel_crtc_enable_trans_port_sync(crtc,
- state,
- new_crtc_state);
-
- /* Set Slave's DP_TP_CTL to Normal */
- intel_set_dp_tp_ctl_normal(slave_crtc,
- state);
-
- /* Set Master's DP_TP_CTL To Normal */
- usleep_range(200, 400);
- intel_set_dp_tp_ctl_normal(crtc,
- state);
-
- /* Now do the post crtc enable for all master and slaves */
- intel_post_crtc_enable_updates(slave_crtc,
- state);
- intel_post_crtc_enable_updates(crtc,
- state);
-}
-
static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -15365,8 +15276,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
entries[pipe] = new_crtc_state->wm.skl.ddb;
update_pipes &= ~BIT(pipe);
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
+ intel_update_crtc(state, crtc);
/*
* If this is an already active pipe, it's DDB changed,
@@ -15381,67 +15291,62 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
}
}
+ update_pipes = modeset_pipes;
+
/*
* Enable all pipes that needs a modeset and do not depends on other
* pipes
*/
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
- is_trans_port_sync_slave(new_crtc_state))
+ is_trans_port_sync_master(new_crtc_state))
continue;
- drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, I915_MAX_PIPES, pipe));
-
- entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
- if (is_trans_port_sync_mode(new_crtc_state)) {
- struct intel_crtc *slave_crtc;
+ intel_enable_crtc(state, crtc);
+ }
- intel_update_trans_port_sync_crtcs(crtc, state,
- old_crtc_state,
- new_crtc_state);
+ /*
+ * Then we enable all remaining pipes that depend on other
+ * pipes: MST slaves and port sync masters.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
- slave_crtc = intel_get_slave_crtc(new_crtc_state);
- /* TODO: update entries[] of slave */
- modeset_pipes &= ~BIT(slave_crtc->pipe);
+ if ((modeset_pipes & BIT(pipe)) == 0)
+ continue;
- } else {
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
- }
+ modeset_pipes &= ~BIT(pipe);
+
+ intel_enable_crtc(state, crtc);
}
/*
- * Finally enable all pipes that needs a modeset and depends on
- * other pipes, right now it is only MST slaves as both port sync slave
- * and master are enabled together
+ * Finally we do the plane updates/etc. for all pipes that got enabled.
*/
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
- if ((modeset_pipes & BIT(pipe)) == 0)
+ if ((update_pipes & BIT(pipe)) == 0)
continue;
drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
entries, I915_MAX_PIPES, pipe));
entries[pipe] = new_crtc_state->wm.skl.ddb;
- modeset_pipes &= ~BIT(pipe);
+ update_pipes &= ~BIT(pipe);
- intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
+ intel_update_crtc(state, crtc);
}
drm_WARN_ON(&dev_priv->drm, modeset_pipes);
-
+ drm_WARN_ON(&dev_priv->drm, update_pipes);
}
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
@@ -15540,16 +15445,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_set_cdclk_pre_plane_update(state);
- /*
- * SKL workaround: bspec recommends we disable the SAGV when we
- * have more then one pipe enabled
- */
- if (!intel_can_enable_sagv(state))
- intel_disable_sagv(dev_priv);
-
intel_modeset_verify_disabled(dev_priv, state);
}
+ intel_sagv_pre_plane_update(state);
+
/* Complete the events for pipes that have now been disabled */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
bool modeset = needs_modeset(new_crtc_state);
@@ -15645,8 +15545,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
intel_verify_planes(state);
- if (state->modeset && intel_can_enable_sagv(state))
- intel_enable_sagv(dev_priv);
+ intel_sagv_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
@@ -15982,7 +15881,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (new_plane_state->uapi.fence) { /* explicit fencing */
ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
new_plane_state->uapi.fence,
- I915_FENCE_TIMEOUT,
+ i915_fence_timeout(dev_priv),
GFP_KERNEL);
if (ret < 0)
return ret;
@@ -16009,7 +15908,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
ret = i915_sw_fence_await_reservation(&state->commit_ready,
obj->base.resv, NULL,
- false, I915_FENCE_TIMEOUT,
+ false,
+ i915_fence_timeout(dev_priv),
GFP_KERNEL);
if (ret < 0)
goto unpin_fb;
@@ -18261,11 +18161,12 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
best_encoder = connector->base.state->best_encoder;
connector->base.state->best_encoder = &encoder->base;
+ /* FIXME NULL atomic state passed! */
if (encoder->disable)
- encoder->disable(encoder, crtc_state,
+ encoder->disable(NULL, encoder, crtc_state,
connector->base.state);
if (encoder->post_disable)
- encoder->post_disable(encoder, crtc_state,
+ encoder->post_disable(NULL, encoder, crtc_state,
connector->base.state);
connector->base.state->best_encoder = best_encoder;
@@ -18802,15 +18703,6 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-static bool
-has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
-{
- if (cpu_transcoder == TRANSCODER_EDP)
- return HAS_TRANSCODER_EDP(dev_priv);
- else
- return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder);
-}
-
struct intel_display_error_state {
u32 power_well_driver;
@@ -18919,7 +18811,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
enum transcoder cpu_transcoder = transcoders[i];
- if (!has_transcoder(dev_priv, cpu_transcoder))
+ if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
continue;
error->transcoder[i].available = true;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index adb1225a3480..efb4da205ea2 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -320,9 +320,13 @@ enum phy_fia {
for_each_pipe(__dev_priv, __p) \
for_each_if((__mask) & BIT(__p))
-#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+#define for_each_cpu_transcoder(__dev_priv, __t) \
for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \
- for_each_if ((__mask) & (1 << (__t)))
+ for_each_if (INTEL_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t))
+
+#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+ for_each_cpu_transcoder(__dev_priv, __t) \
+ for_each_if ((__mask) & BIT(__t))
#define for_each_universal_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
@@ -579,13 +583,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
+enum intel_display_power_domain
+intel_legacy_aux_to_power_domain(enum aux_ch aux_ch);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
-int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 1e6eb7f2f72d..70525623bcdf 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -9,6 +9,7 @@
#include "i915_debugfs.h"
#include "intel_csr.h"
#include "intel_display_debugfs.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_fbc.h"
@@ -631,15 +632,9 @@ static void intel_dp_info(struct seq_file *m,
}
static void intel_dp_mst_info(struct seq_file *m,
- struct intel_connector *intel_connector)
+ struct intel_connector *intel_connector)
{
- struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
- struct intel_dp_mst_encoder *intel_mst =
- enc_to_mst(intel_encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
- intel_connector->port);
+ bool has_audio = intel_connector->port->has_audio;
seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
}
@@ -1149,6 +1144,51 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
return 0;
}
+#define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
+ seq_puts(m, "LPSP: disabled\n"))
+
+static bool
+intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
+ enum i915_power_well_id power_well_id)
+{
+ intel_wakeref_t wakeref;
+ bool is_enabled;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ is_enabled = intel_display_power_well_is_enabled(i915,
+ power_well_id);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return is_enabled;
+}
+
+static int i915_lpsp_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+
+ switch (INTEL_GEN(i915)) {
+ case 12:
+ case 11:
+ LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
+ break;
+ case 10:
+ case 9:
+ LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
+ break;
+ default:
+ /*
+ * Apart from HASWELL/BROADWELL other legacy platform doesn't
+ * support lpsp.
+ */
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
+ else
+ seq_puts(m, "LPSP: not supported\n");
+ }
+
+ return 0;
+}
+
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1326,6 +1366,16 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
intel_dp->compliance.test_data.vdisplay);
seq_printf(m, "bpc: %u\n",
intel_dp->compliance.test_data.bpc);
+ } else if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_PHY_TEST_PATTERN) {
+ seq_printf(m, "pattern: %d\n",
+ intel_dp->compliance.test_data.phytest.phy_pattern);
+ seq_printf(m, "Number of lanes: %d\n",
+ intel_dp->compliance.test_data.phytest.num_lanes);
+ seq_printf(m, "Link Rate: %d\n",
+ intel_dp->compliance.test_data.phytest.link_rate);
+ seq_printf(m, "level: %02x\n",
+ intel_dp->train_set[0]);
}
} else
seq_puts(m, "0");
@@ -1358,7 +1408,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
if (encoder && connector->status == connector_status_connected) {
intel_dp = enc_to_intel_dp(encoder);
- seq_printf(m, "%02lx", intel_dp->compliance.test_type);
+ seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
} else
seq_puts(m, "0");
}
@@ -1906,6 +1956,7 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
{"i915_drrs_status", i915_drrs_status, 0},
+ {"i915_lpsp_status", i915_lpsp_status, 0},
};
static const struct {
@@ -1927,7 +1978,7 @@ static const struct {
{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
};
-int intel_display_debugfs_register(struct drm_i915_private *i915)
+void intel_display_debugfs_register(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
int i;
@@ -1940,9 +1991,9 @@ int intel_display_debugfs_register(struct drm_i915_private *i915)
intel_display_debugfs_files[i].fops);
}
- return drm_debugfs_create_files(intel_display_debugfs_list,
- ARRAY_SIZE(intel_display_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(intel_display_debugfs_list,
+ ARRAY_SIZE(intel_display_debugfs_list),
+ minor->debugfs_root, minor);
}
static int i915_panel_show(struct seq_file *m, void *data)
@@ -1987,6 +2038,48 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
+ seq_puts(m, "LPSP: incapable\n"))
+
+static int i915_lpsp_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_encoder *encoder =
+ intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ switch (INTEL_GEN(i915)) {
+ case 12:
+ /*
+ * Actually TGL can drive LPSP on port till DDI_C
+ * but there is no physical connected DDI_C on TGL sku's,
+ * even driver is not initilizing DDI_C port for gen12.
+ */
+ LPSP_CAPABLE(encoder->port <= PORT_B);
+ break;
+ case 11:
+ LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP);
+ break;
+ case 10:
+ case 9:
+ LPSP_CAPABLE(encoder->port == PORT_A &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
+ break;
+ default:
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
+
static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
@@ -2130,5 +2223,16 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
connector, &i915_dsc_fec_support_fops);
+ /* Legacy panels doesn't lpsp on any platform */
+ if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv)) &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
+ debugfs_create_file("i915_lpsp_capability", 0444, root,
+ connector, &i915_lpsp_capability_fops);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index a3bea1ce04c2..c922c1745bfe 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -10,10 +10,10 @@ struct drm_connector;
struct drm_i915_private;
#ifdef CONFIG_DEBUG_FS
-int intel_display_debugfs_register(struct drm_i915_private *i915);
+void intel_display_debugfs_register(struct drm_i915_private *i915);
int intel_connector_debugfs_add(struct drm_connector *connector);
#else
-static inline int intel_display_debugfs_register(struct drm_i915_private *i915) { return 0; }
+static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 84ecf8e58523..49998906cc61 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -151,6 +151,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "GT_IRQ";
case POWER_DOMAIN_DPLL_DC_OFF:
return "DPLL_DC_OFF";
+ case POWER_DOMAIN_TC_COLD_OFF:
+ return "TC_COLD_OFF";
default:
MISSING_CASE(domain);
return "?";
@@ -282,8 +284,51 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
}
+#define ICL_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+
+static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = power_well->desc->hsw.idx;
+
+ return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+ ICL_AUX_PW_TO_CH(pw_idx);
+}
+
+static struct intel_digital_port *
+aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
+ enum aux_ch aux_ch)
+{
+ struct intel_digital_port *dig_port = NULL;
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ /* We'll check the MST primary port */
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ dig_port = enc_to_dig_port(encoder);
+ if (!dig_port)
+ continue;
+
+ if (dig_port->aux_ch != aux_ch) {
+ dig_port = NULL;
+ continue;
+ }
+
+ break;
+ }
+
+ return dig_port;
+}
+
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+ struct i915_power_well *power_well,
+ bool timeout_expected)
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
@@ -294,8 +339,8 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
power_well->desc->name);
- /* An AUX timeout is expected if the TBT DP tunnel is down. */
- drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
+ drm_WARN_ON(&dev_priv->drm, !timeout_expected);
+
}
}
@@ -358,11 +403,11 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
- bool wait_fuses = power_well->desc->hsw.has_fuses;
- enum skl_power_gate uninitialized_var(pg);
u32 val;
- if (wait_fuses) {
+ if (power_well->desc->hsw.has_fuses) {
+ enum skl_power_gate pg;
+
pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
/*
@@ -379,19 +424,27 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
val = intel_de_read(dev_priv, regs->driver);
intel_de_write(dev_priv, regs->driver,
val | HSW_PWR_WELL_CTL_REQ(pw_idx));
- hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: cnl */
if (IS_CANNONLAKE(dev_priv) &&
pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
+ u32 val;
+
val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
}
- if (wait_fuses)
+ if (power_well->desc->hsw.has_fuses) {
+ enum skl_power_gate pg;
+
+ pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ SKL_PW_CTL_IDX_TO_PG(pw_idx);
gen9_wait_for_power_well_fuses(dev_priv, pg);
+ }
hsw_power_well_post_enable(dev_priv,
power_well->desc->hsw.irq_pipe_mask,
@@ -437,7 +490,7 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
val | ICL_LANE_ENABLE_AUX);
}
- hsw_wait_for_power_well_enable(dev_priv, power_well);
+ hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
@@ -470,21 +523,6 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-#define ICL_AUX_PW_TO_CH(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
-
-#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
-
-static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- int pw_idx = power_well->desc->hsw.idx;
-
- return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
- ICL_AUX_PW_TO_CH(pw_idx);
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
@@ -501,51 +539,28 @@ static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
}
static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+ struct i915_power_well *power_well,
+ struct intel_digital_port *dig_port)
{
- enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
- struct intel_digital_port *dig_port = NULL;
- struct intel_encoder *encoder;
-
/* Bypass the check if all references are released asynchronously */
if (power_well_async_ref_count(dev_priv, power_well) ==
power_well->count)
return;
- aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
-
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- if (!intel_phy_is_tc(dev_priv, phy))
- continue;
-
- /* We'll check the MST primary port */
- if (encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- dig_port = enc_to_dig_port(encoder);
- if (drm_WARN_ON(&dev_priv->drm, !dig_port))
- continue;
-
- if (dig_port->aux_ch != aux_ch) {
- dig_port = NULL;
- continue;
- }
-
- break;
- }
-
if (drm_WARN_ON(&dev_priv->drm, !dig_port))
return;
+ if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
+ return;
+
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
}
#else
static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+ struct i915_power_well *power_well,
+ struct intel_digital_port *dig_port)
{
}
@@ -553,24 +568,65 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
+static void icl_tc_cold_exit(struct drm_i915_private *i915)
+{
+ int ret, tries = 0;
+
+ while (1) {
+ ret = sandybridge_pcode_write_timeout(i915,
+ ICL_PCODE_EXIT_TCCOLD,
+ 0, 250, 1);
+ if (ret != -EAGAIN || ++tries == 3)
+ break;
+ msleep(1);
+ }
+
+ /* Spec states that TC cold exit can take up to 1ms to complete */
+ if (!ret)
+ msleep(1);
+
+ /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
+ drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
+ "succeeded");
+}
+
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+ bool timeout_expected;
u32 val;
- icl_tc_port_assert_ref_held(dev_priv, power_well);
+ icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (power_well->desc->hsw.is_tc_tbt)
+ if (is_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
- hsw_power_well_enable(dev_priv, power_well);
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
+
+ /*
+ * An AUX timeout is expected if the TBT DP tunnel is down,
+ * or need to enable AUX on a legacy TypeC port as part of the TC-cold
+ * exit sequence.
+ */
+ timeout_expected = is_tbt;
+ if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) {
+ icl_tc_cold_exit(dev_priv);
+ timeout_expected = true;
+ }
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
- if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
+ if (INTEL_GEN(dev_priv) >= 12 && !is_tbt) {
enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
@@ -588,11 +644,48 @@ static void
icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- icl_tc_port_assert_ref_held(dev_priv, power_well);
+ enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
+
+ icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
hsw_power_well_disable(dev_priv, power_well);
}
+static void
+icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = power_well->desc->hsw.idx;
+ enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
+ bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+
+ if (is_tbt || intel_phy_is_tc(dev_priv, phy))
+ return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
+ else if (IS_ICELAKE(dev_priv))
+ return icl_combo_phy_aux_power_well_enable(dev_priv,
+ power_well);
+ else
+ return hsw_power_well_enable(dev_priv, power_well);
+}
+
+static void
+icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = power_well->desc->hsw.idx;
+ enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
+ bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+
+ if (is_tbt || intel_phy_is_tc(dev_priv, phy))
+ return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
+ else if (IS_ICELAKE(dev_priv))
+ return icl_combo_phy_aux_power_well_disable(dev_priv,
+ power_well);
+ else
+ return hsw_power_well_disable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -943,7 +1036,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
/* Power wells at this level and above must be disabled for DC5 entry */
if (INTEL_GEN(dev_priv) >= 12)
- high_pg = TGL_DISP_PW_3;
+ high_pg = ICL_DISP_PW_3;
else
high_pg = SKL_DISP_PW_2;
@@ -1873,20 +1966,27 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, u64 mask)
{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
enum intel_display_power_domain domain;
- DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
+ drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
for_each_power_domain(domain, mask)
- DRM_DEBUG_DRIVER("%s use_count %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
+ drm_dbg(&i915->drm, "%s use_count %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
}
static void
print_async_put_domains_state(struct i915_power_domains *power_domains)
{
- DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
- power_domains->async_put_wakeref);
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
+
+ drm_dbg(&i915->drm, "async_put_wakeref %u\n",
+ power_domains->async_put_wakeref);
print_power_domains(power_domains, "async_put_domains[0]",
power_domains->async_put_domains[0]);
@@ -2798,6 +2898,21 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
+#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUX_G) | \
+ BIT_ULL(POWER_DOMAIN_AUX_H) | \
+ BIT_ULL(POWER_DOMAIN_AUX_I) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
+ BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
+
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -3496,17 +3611,10 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
},
};
-static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = icl_combo_phy_aux_power_well_enable,
- .disable = icl_combo_phy_aux_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
+static const struct i915_power_well_ops icl_aux_power_well_ops = {
.sync_hw = hsw_power_well_sync_hw,
- .enable = icl_tc_phy_aux_power_well_enable,
- .disable = icl_tc_phy_aux_power_well_disable,
+ .enable = icl_aux_power_well_enable,
+ .disable = icl_aux_power_well_disable,
.is_enabled = hsw_power_well_enabled,
};
@@ -3564,7 +3672,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
.name = "power well 3",
.domains = ICL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = ICL_DISP_PW_3,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -3636,7 +3744,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX A",
.domains = ICL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_combo_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3646,7 +3754,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX B",
.domains = ICL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_combo_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3656,7 +3764,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX C TC1",
.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3667,7 +3775,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX D TC2",
.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3678,7 +3786,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX E TC3",
.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3689,7 +3797,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX F TC4",
.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3700,7 +3808,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX C TBT1",
.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3711,7 +3819,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX D TBT2",
.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3722,7 +3830,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX E TBT3",
.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3733,7 +3841,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX F TBT4",
.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3755,149 +3863,89 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
};
-static const struct i915_power_well_desc ehl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = ICL_PW_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_2,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well 3",
- .domains = ICL_PW_3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- },
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO",
- .domains = ICL_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
- },
- },
- {
- .name = "DDI D IO",
- .domains = ICL_DDI_IO_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
- },
- },
- {
- .name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C",
- .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
- },
- },
- {
- .name = "AUX D",
- .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
- },
- },
- {
- .name = "power well 4",
- .domains = ICL_PW_4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- },
- },
+static void
+tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
+{
+ u8 tries = 0;
+ int ret;
+
+ while (1) {
+ u32 low_val = 0, high_val;
+
+ if (block)
+ high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
+ else
+ high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
+
+ /*
+ * Spec states that we should timeout the request after 200us
+ * but the function below will timeout after 500us
+ */
+ ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
+ &high_val);
+ if (ret == 0) {
+ if (block &&
+ (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
+ ret = -EIO;
+ else
+ break;
+ }
+
+ if (++tries == 3)
+ break;
+
+ if (ret == -EAGAIN)
+ msleep(1);
+ }
+
+ if (ret)
+ drm_err(&i915->drm, "TC cold %sblock failed\n",
+ block ? "" : "un");
+ else
+ drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
+ block ? "" : "un");
+}
+
+static void
+tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ tgl_tc_cold_request(i915, true);
+}
+
+static void
+tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ tgl_tc_cold_request(i915, false);
+}
+
+static void
+tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ if (power_well->count > 0)
+ tgl_tc_cold_off_power_well_enable(i915, power_well);
+ else
+ tgl_tc_cold_off_power_well_disable(i915, power_well);
+}
+
+static bool
+tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ /*
+ * Not the correctly implementation but there is no way to just read it
+ * from PCODE, so returning count to avoid state mismatch errors
+ */
+ return power_well->count;
+}
+
+static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
+ .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
+ .enable = tgl_tc_cold_off_power_well_enable,
+ .disable = tgl_tc_cold_off_power_well_disable,
+ .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
};
static const struct i915_power_well_desc tgl_power_wells[] = {
@@ -3942,7 +3990,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.name = "power well 3",
.domains = TGL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = TGL_DISP_PW_3,
+ .id = ICL_DISP_PW_3,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -4044,7 +4092,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX A",
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4054,7 +4102,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX B",
.domains = TGL_AUX_B_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4064,7 +4112,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX C",
.domains = TGL_AUX_C_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4074,7 +4122,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX D TC1",
.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4085,7 +4133,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX E TC2",
.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4096,7 +4144,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX F TC3",
.domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4107,7 +4155,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX G TC4",
.domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4118,7 +4166,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX H TC5",
.domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4129,7 +4177,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX I TC6",
.domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4140,7 +4188,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX D TBT1",
.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4151,7 +4199,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX E TBT2",
.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4162,7 +4210,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX F TBT3",
.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4173,7 +4221,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX G TBT4",
.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4184,7 +4232,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX H TBT5",
.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4195,7 +4243,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX I TBT6",
.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
+ .ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4227,6 +4275,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.hsw.irq_pipe_mask = BIT(PIPE_D),
},
},
+ {
+ .name = "TC cold off",
+ .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
+ .ops = &tgl_tc_cold_off_ops,
+ .id = DISP_PW_ID_NONE,
+ },
};
static int
@@ -4376,8 +4430,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (IS_GEN(dev_priv, 12)) {
err = set_power_wells(power_domains, tgl_power_wells);
- } else if (IS_ELKHARTLAKE(dev_priv)) {
- err = set_power_wells(power_domains, ehl_power_wells);
} else if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
@@ -4439,9 +4491,8 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
-static inline
-bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
- i915_reg_t reg, bool enable)
+static bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, bool enable)
{
u32 val, status;
@@ -4480,7 +4531,8 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
"Invalid number of dbuf slices requested\n");
- DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
+ drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
+ req_slices);
/*
* Might be running this in parallel to gen9_dc_off_power_well_enable
@@ -5016,7 +5068,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
const struct buddy_page_mask *table;
int i;
- if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
/* Wa_1409767108: tgl */
table = wa_1409767108_buddy_page_masks;
else
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index da64a5edae7a..6c917699293b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -76,6 +76,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
POWER_DOMAIN_DPLL_DC_OFF,
+ POWER_DOMAIN_TC_COLD_OFF,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -100,7 +101,7 @@ enum i915_power_well_id {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
- TGL_DISP_PW_3,
+ ICL_DISP_PW_3,
SKL_DISP_DC_OFF,
};
@@ -266,6 +267,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id);
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 5e00e611f077..2bf3d4cb4ea9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -132,8 +132,7 @@ struct intel_encoder {
u16 cloneable;
u8 pipe_mask;
enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received);
+ struct intel_connector *connector);
enum intel_output_type (*compute_output_type)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
@@ -146,28 +145,35 @@ struct intel_encoder {
void (*update_prepare)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
- void (*pre_pll_enable)(struct intel_encoder *,
+ void (*pre_pll_enable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*pre_enable)(struct intel_encoder *,
+ void (*pre_enable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*enable)(struct intel_encoder *,
+ void (*enable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
void (*update_complete)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
- void (*disable)(struct intel_encoder *,
+ void (*disable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*post_disable)(struct intel_encoder *,
+ void (*post_disable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*post_pll_disable)(struct intel_encoder *,
+ void (*post_pll_disable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*update_pipe)(struct intel_encoder *,
+ void (*update_pipe)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
/* Read out the current hw state of this connector, returning true if
@@ -425,11 +431,14 @@ struct intel_connector {
struct edid *edid;
struct edid *detect_edid;
+ /* Number of times hotplug detection was tried after an HPD interrupt */
+ int hotplug_retries;
+
/* since POLL and HPD connectors may use the same HPD line keep the native
state of connector->polled in case hotplug storm detection changes it */
u8 polled;
- void *port; /* store this opaque as its illegal to dereference it */
+ struct drm_dp_mst_port *port;
struct intel_dp *mst_port;
@@ -640,6 +649,16 @@ struct intel_crtc_scaler_state {
#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
/* Flag to use the scanline counter instead of the pixel counter */
#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
+/*
+ * TE0 or TE1 flag is set if the crtc has a DSI encoder which
+ * is operating in command mode.
+ * Flag to use TE from DSI0 instead of VBI in command mode
+ */
+#define I915_MODE_FLAG_DSI_USE_TE0 (1<<3)
+/* Flag to use TE from DSI1 instead of VBI in command mode */
+#define I915_MODE_FLAG_DSI_USE_TE1 (1<<4)
+/* Flag to indicate mipi dsi periodic command mode where we do not get TE */
+#define I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE (1<<5)
struct intel_wm_level {
bool enable;
@@ -669,11 +688,13 @@ struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level uv_wm[8];
struct skl_wm_level trans_wm;
+ struct skl_wm_level sagv_wm0;
bool is_planar;
};
struct skl_pipe_wm {
struct skl_plane_wm planes[I915_MAX_PLANES];
+ bool use_sagv_wm;
};
enum vlv_wm_level {
@@ -955,8 +976,7 @@ struct intel_crtc_state {
/* Panel fitter placement and size for Ironlake+ */
struct {
- u32 pos;
- u32 size;
+ struct drm_rect dst;
bool enabled;
bool force_thru;
} pch_pfit;
@@ -1015,6 +1035,7 @@ struct intel_crtc_state {
union hdmi_infoframe spd;
union hdmi_infoframe hdmi;
union hdmi_infoframe drm;
+ struct drm_dp_vsc_sdp vsc;
} infoframes;
/* HDMI scrambling status */
@@ -1238,6 +1259,7 @@ struct intel_dp_compliance_data {
u8 video_pattern;
u16 hdisplay, vdisplay;
u8 bpc;
+ struct drm_dp_phy_test_params phytest;
};
struct intel_dp_compliance {
@@ -1347,6 +1369,9 @@ struct intel_dp {
/* This is called before a link training is starterd */
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
+ void (*set_link_train)(struct intel_dp *intel_dp, u8 dp_train_pat);
+ void (*set_idle_link_train)(struct intel_dp *intel_dp);
+ void (*set_signal_levels)(struct intel_dp *intel_dp);
/* Displayport compliance testing */
struct intel_dp_compliance compliance;
@@ -1401,6 +1426,7 @@ struct intel_digital_port {
const struct drm_connector_state *conn_state);
u32 (*infoframes_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
+ bool (*connected)(struct intel_encoder *encoder);
};
struct intel_dp_mst_encoder {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index a2fafd4499f2..ed9e53c373a7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -48,7 +48,6 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
-#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -164,6 +163,17 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
};
int i, max_rate;
+ if (drm_dp_has_quirk(&intel_dp->desc, 0,
+ DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
+ /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
+ static const int quirk_rates[] = { 162000, 270000, 324000 };
+
+ memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
+ intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
+
+ return;
+ }
+
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
@@ -452,6 +462,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int link_rate, u8 lane_count)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int index;
index = intel_dp_rate_index(intel_dp->common_rates,
@@ -462,7 +473,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
!intel_dp_can_link_train_fallback_for_edp(intel_dp,
intel_dp->common_rates[index - 1],
lane_count)) {
- DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with same parameters\n");
return 0;
}
intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
@@ -472,13 +484,14 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
!intel_dp_can_link_train_fallback_for_edp(intel_dp,
intel_dp_max_common_rate(intel_dp),
lane_count >> 1)) {
- DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with same parameters\n");
return 0;
}
intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
intel_dp->max_link_lane_count = lane_count >> 1;
} else {
- DRM_ERROR("Link Training Unsuccessful\n");
+ drm_err(&i915->drm, "Link Training Unsuccessful\n");
return -1;
}
@@ -553,6 +566,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
int mode_clock, int mode_hdisplay)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 min_slice_count, i;
int max_slice_width;
@@ -565,8 +579,9 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
- DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
- max_slice_width);
+ drm_dbg_kms(&i915->drm,
+ "Unsupported slice width %d by DP DSC Sink device\n",
+ max_slice_width);
return 0;
}
/* Also take into account max slice width */
@@ -584,7 +599,8 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return valid_dsc_slicecount[i];
}
- DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+ drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
+ min_slice_count);
return 0;
}
@@ -1343,8 +1359,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
- enum intel_display_power_domain aux_domain =
- intel_aux_power_domain(intel_dig_port);
+ enum intel_display_power_domain aux_domain;
intel_wakeref_t aux_wakeref;
intel_wakeref_t pps_wakeref;
int i, ret, recv_bytes;
@@ -1359,6 +1374,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
if (is_tc_port)
intel_tc_port_lock(intel_dig_port);
+ aux_domain = intel_aux_power_domain(intel_dig_port);
+
aux_wakeref = intel_display_power_get(i915, aux_domain);
pps_wakeref = pps_lock(intel_dp);
@@ -1832,6 +1849,7 @@ static void snprintf_int_array(char *str, size_t len,
static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
char str[128]; /* FIXME: too big for stack? */
if (!drm_debug_enabled(DRM_UT_KMS))
@@ -1839,15 +1857,15 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
snprintf_int_array(str, sizeof(str),
intel_dp->source_rates, intel_dp->num_source_rates);
- DRM_DEBUG_KMS("source rates: %s\n", str);
+ drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
snprintf_int_array(str, sizeof(str),
intel_dp->sink_rates, intel_dp->num_sink_rates);
- DRM_DEBUG_KMS("sink rates: %s\n", str);
+ drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
snprintf_int_array(str, sizeof(str),
intel_dp->common_rates, intel_dp->num_common_rates);
- DRM_DEBUG_KMS("common rates: %s\n", str);
+ drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
}
int
@@ -1954,6 +1972,8 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
/* For DP Compliance we override the computed bpp for the pipe */
if (intel_dp->compliance.test_data.bpc != 0) {
int bpp = 3 * intel_dp->compliance.test_data.bpc;
@@ -1961,7 +1981,7 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
limits->min_bpp = limits->max_bpp = bpp;
pipe_config->dither_force_disable = bpp == 6 * 3;
- DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
+ drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
}
/* Use values requested by Compliance Test Request */
@@ -2055,6 +2075,7 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u8 line_buf_depth;
@@ -2089,7 +2110,8 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
if (!line_buf_depth) {
- DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+ drm_dbg_kms(&i915->drm,
+ "DSC Sink Line Buffer Depth invalid\n");
return -EINVAL;
}
@@ -2114,7 +2136,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
u8 dsc_max_bpc;
int pipe_bpp;
int ret;
@@ -2229,7 +2252,9 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
int common_len;
@@ -2264,11 +2289,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
- DRM_DEBUG_KMS("DP link computation with max lane count %i "
- "max rate %d max bpp %d pixel clock %iKHz\n",
- limits.max_lane_count,
- intel_dp->common_rates[limits.max_clock],
- limits.max_bpp, adjusted_mode->crtc_clock);
+ drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
+ "max rate %d max bpp %d pixel clock %iKHz\n",
+ limits.max_lane_count,
+ intel_dp->common_rates[limits.max_clock],
+ limits.max_bpp, adjusted_mode->crtc_clock);
/*
* Optimize for slow and wide. This is the place to add alternative
@@ -2277,7 +2302,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
- DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
+ drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
if (ret || intel_dp->force_dsc_en) {
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits);
@@ -2286,40 +2311,42 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
}
if (pipe_config->dsc.compression_enable) {
- DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp);
-
- DRM_DEBUG_KMS("DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->dsc.compressed_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
+ drm_dbg_kms(&i915->drm,
+ "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp,
+ pipe_config->dsc.compressed_bpp);
+
+ drm_dbg_kms(&i915->drm,
+ "DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->dsc.compressed_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
} else {
- DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp);
+ drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp);
- DRM_DEBUG_KMS("DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->pipe_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
+ drm_dbg_kms(&i915->drm,
+ "DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->pipe_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
}
return 0;
}
static int
intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
- struct drm_connector *connector,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
+ struct drm_connector *connector = conn_state->connector;
const struct drm_display_info *info = &connector->display_info;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- int ret;
if (!drm_mode_is_420_only(info, adjusted_mode) ||
!intel_dp_get_colorimetry_status(intel_dp) ||
@@ -2328,16 +2355,7 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- /* YCBCR 420 output conversion needs a scaler */
- ret = skl_update_scaler_crtc(crtc_state);
- if (ret) {
- DRM_DEBUG_KMS("Scaler allocation for output failed\n");
- return ret;
- }
-
- intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
-
- return 0;
+ return intel_pch_panel_fitting(crtc_state, conn_state);
}
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
@@ -2384,6 +2402,164 @@ static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
return true;
}
+static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state,
+ struct drm_dp_vsc_sdp *vsc)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /*
+ * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
+ * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
+ * Colorimetry Format indication.
+ */
+ vsc->revision = 0x5;
+ vsc->length = 0x13;
+
+ /* DP 1.4a spec, Table 2-120 */
+ switch (crtc_state->output_format) {
+ case INTEL_OUTPUT_FORMAT_YCBCR444:
+ vsc->pixelformat = DP_PIXELFORMAT_YUV444;
+ break;
+ case INTEL_OUTPUT_FORMAT_YCBCR420:
+ vsc->pixelformat = DP_PIXELFORMAT_YUV420;
+ break;
+ case INTEL_OUTPUT_FORMAT_RGB:
+ default:
+ vsc->pixelformat = DP_PIXELFORMAT_RGB;
+ }
+
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_601:
+ vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_709:
+ vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
+ break;
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
+ break;
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
+ break;
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+ vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
+ break;
+ default:
+ /*
+ * RGB->YCBCR color conversion uses the BT.709
+ * color space.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
+ else
+ vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
+ break;
+ }
+
+ vsc->bpc = crtc_state->pipe_bpp / 3;
+
+ /* only RGB pixelformat supports 6 bpc */
+ drm_WARN_ON(&dev_priv->drm,
+ vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
+
+ /* all YCbCr are always limited range */
+ vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
+ vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
+}
+
+static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
+
+ /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
+ if (crtc_state->has_psr)
+ return;
+
+ if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ return;
+
+ crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
+ vsc->sdp_type = DP_SDP_VSC;
+ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+ &crtc_state->infoframes.vsc);
+}
+
+void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state,
+ struct drm_dp_vsc_sdp *vsc)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ vsc->sdp_type = DP_SDP_VSC;
+
+ if (dev_priv->psr.psr2_enabled) {
+ if (dev_priv->psr.colorimetry_support &&
+ intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
+ /* [PSR2, +Colorimetry] */
+ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+ vsc);
+ } else {
+ /*
+ * [PSR2, -Colorimetry]
+ * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
+ * 3D stereo + PSR/PSR2 + Y-coordinate.
+ */
+ vsc->revision = 0x4;
+ vsc->length = 0xe;
+ }
+ } else {
+ /*
+ * [PSR1]
+ * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
+ * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
+ * higher).
+ */
+ vsc->revision = 0x2;
+ vsc->length = 0x8;
+ }
+}
+
+static void
+intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ int ret;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
+
+ if (!conn_state->hdr_output_metadata)
+ return;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
+
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
+ return;
+ }
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2394,7 +2570,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
enum port port = encoder->port;
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
@@ -2410,9 +2585,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (lspcon->active)
lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
else
- ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
- pipe_config);
-
+ ret = intel_dp_ycbcr420_config(intel_dp, pipe_config,
+ conn_state);
if (ret)
return ret;
@@ -2428,18 +2602,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
- if (INTEL_GEN(dev_priv) >= 9) {
- ret = skl_update_scaler_crtc(pipe_config);
- if (ret)
- return ret;
- }
-
if (HAS_GMCH(dev_priv))
- intel_gmch_panel_fitting(intel_crtc, pipe_config,
- conn_state->scaling_mode);
+ ret = intel_gmch_panel_fitting(pipe_config, conn_state);
else
- intel_pch_panel_fitting(intel_crtc, pipe_config,
- conn_state->scaling_mode);
+ ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -2489,6 +2657,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp_set_clock(encoder, pipe_config);
intel_psr_compute_config(intel_dp, pipe_config);
+ intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
+ intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
return 0;
}
@@ -2630,22 +2800,27 @@ static void wait_panel_status(struct intel_dp *intel_dp,
static void wait_panel_on(struct intel_dp *intel_dp)
{
- DRM_DEBUG_KMS("Wait for panel power on\n");
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
static void wait_panel_off(struct intel_dp *intel_dp)
{
- DRM_DEBUG_KMS("Wait for panel power off time\n");
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
ktime_t panel_power_on_time;
s64 panel_power_off_duration;
- DRM_DEBUG_KMS("Wait for panel power cycle\n");
+ drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
/* take the difference of currrent time and panel power off time
* and then make panel wait for t11_t12 if needed. */
@@ -3009,11 +3184,12 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
intel_panel_enable_backlight(crtc_state, conn_state);
_intel_edp_backlight_on(intel_dp);
@@ -3047,11 +3223,12 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
_intel_edp_backlight_off(intel_dp);
intel_panel_disable_backlight(old_conn_state);
@@ -3064,6 +3241,7 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
static void intel_edp_backlight_power(struct intel_connector *connector,
bool enable)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(connector);
intel_wakeref_t wakeref;
bool is_enabled;
@@ -3074,8 +3252,8 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
if (is_enabled == enable)
return;
- DRM_DEBUG_KMS("panel power control backlight %s\n",
- enable ? "enable" : "disable");
+ drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+ enable ? "enable" : "disable");
if (enable)
_intel_edp_backlight_on(intel_dp);
@@ -3185,6 +3363,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int ret;
if (!crtc_state->dsc.compression_enable)
@@ -3193,13 +3372,15 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
enable ? DP_DECOMPRESSION_EN : 0);
if (ret < 0)
- DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
- enable ? "enable" : "disable");
+ drm_dbg_kms(&i915->drm,
+ "Failed to %s sink decompression state\n",
+ enable ? "enable" : "disable");
}
/* If the sink supports it, try to set the power state appropriately */
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int ret, i;
/* Should have a valid DPCD by this point */
@@ -3232,8 +3413,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
}
if (ret != 1)
- DRM_DEBUG_KMS("failed to %s sink power state\n",
- mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
+ drm_dbg_kms(&i915->drm, "failed to %s sink power state\n",
+ mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
}
static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
@@ -3390,7 +3571,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static void intel_disable_dp(struct intel_encoder *encoder,
+static void intel_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3410,21 +3592,24 @@ static void intel_disable_dp(struct intel_encoder *encoder,
intel_edp_panel_off(intel_dp);
}
-static void g4x_disable_dp(struct intel_encoder *encoder,
+static void g4x_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_dp(encoder, old_crtc_state, old_conn_state);
+ intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
}
-static void vlv_disable_dp(struct intel_encoder *encoder,
+static void vlv_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_dp(encoder, old_crtc_state, old_conn_state);
+ intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
}
-static void g4x_post_disable_dp(struct intel_encoder *encoder,
+static void g4x_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3444,14 +3629,16 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
ilk_edp_pll_off(intel_dp, old_crtc_state);
}
-static void vlv_post_disable_dp(struct intel_encoder *encoder,
+static void vlv_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_dp_link_down(encoder, old_crtc_state);
}
-static void chv_post_disable_dp(struct intel_encoder *encoder,
+static void chv_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3468,90 +3655,63 @@ static void chv_post_disable_dp(struct intel_encoder *encoder,
}
static void
-_intel_dp_set_link_train(struct intel_dp *intel_dp,
- u32 *DP,
- u8 dp_train_pat)
+cpt_set_link_train(struct intel_dp *intel_dp,
+ u8 dp_train_pat)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->base.port;
- u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
-
- if (dp_train_pat & train_pat_mask)
- drm_dbg_kms(&dev_priv->drm,
- "Using DP training pattern TPS%d\n",
- dp_train_pat & train_pat_mask);
-
- if (HAS_DDI(dev_priv)) {
- u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
+ u32 *DP = &intel_dp->DP;
- if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
- temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
- else
- temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
-
- temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- switch (dp_train_pat & train_pat_mask) {
- case DP_TRAINING_PATTERN_DISABLE:
- temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+ *DP &= ~DP_LINK_TRAIN_MASK_CPT;
- break;
- case DP_TRAINING_PATTERN_1:
- temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
- break;
- case DP_TRAINING_PATTERN_2:
- temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
- break;
- case DP_TRAINING_PATTERN_3:
- temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
- break;
- case DP_TRAINING_PATTERN_4:
- temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
- break;
- }
- intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ *DP |= DP_LINK_TRAIN_OFF_CPT;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ *DP |= DP_LINK_TRAIN_PAT_1_CPT;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ drm_dbg_kms(&dev_priv->drm,
+ "TPS3 not supported, using TPS2 instead\n");
+ *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ }
- } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
- *DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
- switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
- case DP_TRAINING_PATTERN_DISABLE:
- *DP |= DP_LINK_TRAIN_OFF_CPT;
- break;
- case DP_TRAINING_PATTERN_1:
- *DP |= DP_LINK_TRAIN_PAT_1_CPT;
- break;
- case DP_TRAINING_PATTERN_2:
- *DP |= DP_LINK_TRAIN_PAT_2_CPT;
- break;
- case DP_TRAINING_PATTERN_3:
- drm_dbg_kms(&dev_priv->drm,
- "TPS3 not supported, using TPS2 instead\n");
- *DP |= DP_LINK_TRAIN_PAT_2_CPT;
- break;
- }
+static void
+g4x_set_link_train(struct intel_dp *intel_dp,
+ u8 dp_train_pat)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 *DP = &intel_dp->DP;
- } else {
- *DP &= ~DP_LINK_TRAIN_MASK;
+ *DP &= ~DP_LINK_TRAIN_MASK;
- switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
- case DP_TRAINING_PATTERN_DISABLE:
- *DP |= DP_LINK_TRAIN_OFF;
- break;
- case DP_TRAINING_PATTERN_1:
- *DP |= DP_LINK_TRAIN_PAT_1;
- break;
- case DP_TRAINING_PATTERN_2:
- *DP |= DP_LINK_TRAIN_PAT_2;
- break;
- case DP_TRAINING_PATTERN_3:
- drm_dbg_kms(&dev_priv->drm,
- "TPS3 not supported, using TPS2 instead\n");
- *DP |= DP_LINK_TRAIN_PAT_2;
- break;
- }
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ *DP |= DP_LINK_TRAIN_OFF;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ *DP |= DP_LINK_TRAIN_PAT_1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ *DP |= DP_LINK_TRAIN_PAT_2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ drm_dbg_kms(&dev_priv->drm,
+ "TPS3 not supported, using TPS2 instead\n");
+ *DP |= DP_LINK_TRAIN_PAT_2;
+ break;
}
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
static void intel_dp_enable_port(struct intel_dp *intel_dp,
@@ -3577,7 +3737,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-static void intel_enable_dp(struct intel_encoder *encoder,
+static void intel_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -3623,22 +3784,25 @@ static void intel_enable_dp(struct intel_encoder *encoder,
}
}
-static void g4x_enable_dp(struct intel_encoder *encoder,
+static void g4x_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- intel_enable_dp(encoder, pipe_config, conn_state);
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
-static void vlv_enable_dp(struct intel_encoder *encoder,
+static void vlv_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
intel_edp_backlight_on(pipe_config, conn_state);
}
-static void g4x_pre_enable_dp(struct intel_encoder *encoder,
+static void g4x_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -3758,16 +3922,18 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
}
-static void vlv_pre_enable_dp(struct intel_encoder *encoder,
+static void vlv_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
vlv_phy_pre_encoder_enable(encoder, pipe_config);
- intel_enable_dp(encoder, pipe_config, conn_state);
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
}
-static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
+static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -3776,19 +3942,21 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
vlv_phy_pre_pll_enable(encoder, pipe_config);
}
-static void chv_pre_enable_dp(struct intel_encoder *encoder,
+static void chv_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
chv_phy_pre_encoder_enable(encoder, pipe_config);
- intel_enable_dp(encoder, pipe_config, conn_state);
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
}
-static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
+static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -3797,7 +3965,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
chv_phy_pre_pll_enable(encoder, pipe_config);
}
-static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
+static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3881,7 +4050,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
}
}
-static u32 vlv_signal_levels(struct intel_dp *intel_dp)
+static void vlv_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
unsigned long demph_reg_value, preemph_reg_value,
@@ -3909,7 +4078,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
uniqtranscale_reg_value = 0x5598DA3A;
break;
default:
- return 0;
+ return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_1:
@@ -3928,7 +4097,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
- return 0;
+ return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_2:
@@ -3943,7 +4112,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
- return 0;
+ return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_3:
@@ -3954,20 +4123,18 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
uniqtranscale_reg_value = 0x55ADDA3A;
break;
default:
- return 0;
+ return;
}
break;
default:
- return 0;
+ return;
}
vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value, 0);
-
- return 0;
}
-static u32 chv_signal_levels(struct intel_dp *intel_dp)
+static void chv_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u32 deemph_reg_value, margin_reg_value;
@@ -3995,7 +4162,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
uniq_trans_scale = true;
break;
default:
- return 0;
+ return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_1:
@@ -4013,7 +4180,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
margin_reg_value = 154;
break;
default:
- return 0;
+ return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_2:
@@ -4027,7 +4194,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
margin_reg_value = 154;
break;
default:
- return 0;
+ return;
}
break;
case DP_TRAIN_PRE_EMPH_LEVEL_3:
@@ -4037,21 +4204,18 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
margin_reg_value = 154;
break;
default:
- return 0;
+ return;
}
break;
default:
- return 0;
+ return;
}
chv_set_phy_signal_level(encoder, deemph_reg_value,
margin_reg_value, uniq_trans_scale);
-
- return 0;
}
-static u32
-g4x_signal_levels(u8 train_set)
+static u32 g4x_signal_levels(u8 train_set)
{
u32 signal_levels = 0;
@@ -4088,12 +4252,31 @@ g4x_signal_levels(u8 train_set)
return signal_levels;
}
+static void
+g4x_set_signal_levels(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ u32 signal_levels;
+
+ signal_levels = g4x_signal_levels(train_set);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
+ intel_dp->DP |= signal_levels;
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
/* SNB CPU eDP voltage swing and pre-emphasis control */
-static u32
-snb_cpu_edp_signal_levels(u8 train_set)
+static u32 snb_cpu_edp_signal_levels(u8 train_set)
{
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
+ u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -4116,12 +4299,31 @@ snb_cpu_edp_signal_levels(u8 train_set)
}
}
+static void
+snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ u32 signal_levels;
+
+ signal_levels = snb_cpu_edp_signal_levels(train_set);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+ intel_dp->DP |= signal_levels;
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
/* IVB CPU eDP voltage swing and pre-emphasis control */
-static u32
-ivb_cpu_edp_signal_levels(u8 train_set)
+static u32 ivb_cpu_edp_signal_levels(u8 train_set)
{
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
+ u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return EDP_LINK_TRAIN_400MV_0DB_IVB;
@@ -4147,38 +4349,29 @@ ivb_cpu_edp_signal_levels(u8 train_set)
}
}
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp)
+static void
+ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->base.port;
- u32 signal_levels, mask = 0;
u8 train_set = intel_dp->train_set[0];
+ u32 signal_levels;
- if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- signal_levels = bxt_signal_levels(intel_dp);
- } else if (HAS_DDI(dev_priv)) {
- signal_levels = ddi_signal_levels(intel_dp);
- mask = DDI_BUF_EMP_MASK;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- signal_levels = chv_signal_levels(intel_dp);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- signal_levels = vlv_signal_levels(intel_dp);
- } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
- signal_levels = ivb_cpu_edp_signal_levels(train_set);
- mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
- } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
- signal_levels = snb_cpu_edp_signal_levels(train_set);
- mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
- } else {
- signal_levels = g4x_signal_levels(train_set);
- mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
- }
+ signal_levels = ivb_cpu_edp_signal_levels(train_set);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+ intel_dp->DP |= signal_levels;
- if (mask)
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
- signal_levels);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
@@ -4189,55 +4382,28 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
" (max)" : "");
- intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
-
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_dp->set_signal_levels(intel_dp);
}
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
- to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
- _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
+ if (dp_train_pat & train_pat_mask)
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DP training pattern TPS%d\n",
+ dp_train_pat & train_pat_mask);
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_dp->set_link_train(intel_dp, dp_train_pat);
}
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->base.port;
- u32 val;
-
- if (!HAS_DDI(dev_priv))
- return;
-
- val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
- val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
-
- /*
- * Until TGL on PORT_A we can have only eDP in SST mode. There the only
- * reason we need to set idle transmission mode is to work around a HW
- * issue where we enable the pipe while not in idle link-training mode.
- * In this case there is requirement to wait for a minimum number of
- * idle patterns to be sent.
- */
- if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
- return;
-
- if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
- DP_TP_STATUS_IDLE_DONE, 1))
- drm_err(&dev_priv->drm,
- "Timed out waiting for DP idle patterns\n");
+ if (intel_dp->set_idle_link_train)
+ intel_dp->set_idle_link_train(intel_dp);
}
static void
@@ -4316,6 +4482,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
static void
intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 dpcd_ext[6];
/*
@@ -4331,20 +4498,22 @@ intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
&dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
- DRM_ERROR("DPCD failed read at extended capabilities\n");
+ drm_err(&i915->drm,
+ "DPCD failed read at extended capabilities\n");
return;
}
if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
- DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
+ drm_dbg_kms(&i915->drm,
+ "DPCD extended DPCD rev less than base DPCD rev\n");
return;
}
if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
return;
- DRM_DEBUG_KMS("Base DPCD: %*ph\n",
- (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
+ drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n",
+ (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
}
@@ -4352,13 +4521,16 @@ intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
bool
intel_dp_read_dpcd(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */
intel_dp_extended_receiver_capabilities(intel_dp);
- DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
+ drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd),
+ intel_dp->dpcd);
return intel_dp->dpcd[DP_DPCD_REV] != 0;
}
@@ -4375,6 +4547,8 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
/*
* Clear the cached register set to avoid using stale values
* for the sinks that do not support DSC.
@@ -4390,20 +4564,23 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
intel_dp->dsc_dpcd,
sizeof(intel_dp->dsc_dpcd)) < 0)
- DRM_ERROR("Failed to read DPCD register 0x%x\n",
- DP_DSC_SUPPORT);
+ drm_err(&i915->drm,
+ "Failed to read DPCD register 0x%x\n",
+ DP_DSC_SUPPORT);
- DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
- (int)sizeof(intel_dp->dsc_dpcd),
- intel_dp->dsc_dpcd);
+ drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
+ (int)sizeof(intel_dp->dsc_dpcd),
+ intel_dp->dsc_dpcd);
/* FEC is supported only on DP 1.4 */
if (!intel_dp_is_edp(intel_dp) &&
drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
&intel_dp->fec_capable) < 0)
- DRM_ERROR("Failed to read FEC DPCD register\n");
+ drm_err(&i915->drm,
+ "Failed to read FEC DPCD register\n");
- DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
+ drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
+ intel_dp->fec_capable);
}
}
@@ -4577,14 +4754,16 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
static void
intel_dp_configure_mst(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_encoder *encoder =
&dp_to_dig_port(intel_dp)->base;
bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
- DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
- encoder->base.base.id, encoder->base.name,
- yesno(intel_dp->can_mst), yesno(sink_can_mst),
- yesno(i915_modparams.enable_dp_mst));
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
+ encoder->base.base.id, encoder->base.name,
+ yesno(intel_dp->can_mst), yesno(sink_can_mst),
+ yesno(i915_modparams.enable_dp_mst));
if (!intel_dp->can_mst)
return;
@@ -4630,158 +4809,92 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
return false;
}
-static void
-intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
+ struct dp_sdp *sdp, size_t size)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct dp_sdp vsc_sdp = {};
+ size_t length = sizeof(struct dp_sdp);
- /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
- vsc_sdp.sdp_header.HB0 = 0;
- vsc_sdp.sdp_header.HB1 = 0x7;
+ if (size < length)
+ return -ENOSPC;
+
+ memset(sdp, 0, size);
/*
- * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
- * Colorimetry Format indication.
+ * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
+ * VSC SDP Header Bytes
*/
- vsc_sdp.sdp_header.HB2 = 0x5;
+ sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
+ sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
+ sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
+ sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
/*
- * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
- * Colorimetry Format indication (HB2 = 05h).
+ * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
+ * per DP 1.4a spec.
*/
- vsc_sdp.sdp_header.HB3 = 0x13;
-
- /* DP 1.4a spec, Table 2-120 */
- switch (crtc_state->output_format) {
- case INTEL_OUTPUT_FORMAT_YCBCR444:
- vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */
- break;
- case INTEL_OUTPUT_FORMAT_YCBCR420:
- vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */
- break;
- case INTEL_OUTPUT_FORMAT_RGB:
- default:
- /* RGB: DB16[7:4] = 0h */
- break;
- }
+ if (vsc->revision != 0x5)
+ goto out;
- switch (conn_state->colorspace) {
- case DRM_MODE_COLORIMETRY_BT709_YCC:
- vsc_sdp.db[16] |= 0x1;
- break;
- case DRM_MODE_COLORIMETRY_XVYCC_601:
- vsc_sdp.db[16] |= 0x2;
- break;
- case DRM_MODE_COLORIMETRY_XVYCC_709:
- vsc_sdp.db[16] |= 0x3;
- break;
- case DRM_MODE_COLORIMETRY_SYCC_601:
- vsc_sdp.db[16] |= 0x4;
- break;
- case DRM_MODE_COLORIMETRY_OPYCC_601:
- vsc_sdp.db[16] |= 0x5;
- break;
- case DRM_MODE_COLORIMETRY_BT2020_CYCC:
- case DRM_MODE_COLORIMETRY_BT2020_RGB:
- vsc_sdp.db[16] |= 0x6;
- break;
- case DRM_MODE_COLORIMETRY_BT2020_YCC:
- vsc_sdp.db[16] |= 0x7;
- break;
- case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
- case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
- vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */
- break;
- default:
- /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */
+ /* VSC SDP Payload for DB16 through DB18 */
+ /* Pixel Encoding and Colorimetry Formats */
+ sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
+ sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
- /* RGB->YCBCR color conversion uses the BT.709 color space. */
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+ switch (vsc->bpc) {
+ case 6:
+ /* 6bpc: 0x0 */
break;
- }
-
- /*
- * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
- * the following Component Bit Depth values are defined:
- * 001b = 8bpc.
- * 010b = 10bpc.
- * 011b = 12bpc.
- * 100b = 16bpc.
- */
- switch (crtc_state->pipe_bpp) {
- case 24: /* 8bpc */
- vsc_sdp.db[17] = 0x1;
+ case 8:
+ sdp->db[17] = 0x1; /* DB17[3:0] */
break;
- case 30: /* 10bpc */
- vsc_sdp.db[17] = 0x2;
+ case 10:
+ sdp->db[17] = 0x2;
break;
- case 36: /* 12bpc */
- vsc_sdp.db[17] = 0x3;
+ case 12:
+ sdp->db[17] = 0x3;
break;
- case 48: /* 16bpc */
- vsc_sdp.db[17] = 0x4;
+ case 16:
+ sdp->db[17] = 0x4;
break;
default:
- MISSING_CASE(crtc_state->pipe_bpp);
+ MISSING_CASE(vsc->bpc);
break;
}
+ /* Dynamic Range and Component Bit Depth */
+ if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
+ sdp->db[17] |= 0x80; /* DB17[7] */
- /*
- * Dynamic Range (Bit 7)
- * 0 = VESA range, 1 = CTA range.
- * all YCbCr are always limited range
- */
- vsc_sdp.db[17] |= 0x80;
-
- /*
- * Content Type (Bits 2:0)
- * 000b = Not defined.
- * 001b = Graphics.
- * 010b = Photo.
- * 011b = Video.
- * 100b = Game
- * All other values are RESERVED.
- * Note: See CTA-861-G for the definition and expected
- * processing by a stream sink for the above contect types.
- */
- vsc_sdp.db[18] = 0;
+ /* Content Type */
+ sdp->db[18] = vsc->content_type & 0x7;
- intel_dig_port->write_infoframe(&intel_dig_port->base,
- crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
+out:
+ return length;
}
-static void
-intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static ssize_t
+intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
+ struct dp_sdp *sdp,
+ size_t size)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct dp_sdp infoframe_sdp = {};
- struct hdmi_drm_infoframe drm_infoframe = {};
+ size_t length = sizeof(struct dp_sdp);
const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
ssize_t len;
- int ret;
- ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state);
- if (ret) {
- DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
- return;
- }
+ if (size < length)
+ return -ENOSPC;
+
+ memset(sdp, 0, size);
- len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf));
+ len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
if (len < 0) {
DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
- return;
+ return -ENOSPC;
}
if (len != infoframe_size) {
DRM_DEBUG_KMS("wrong static hdr metadata size\n");
- return;
+ return -ENOSPC;
}
/*
@@ -4790,34 +4903,37 @@ intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
* Table 2-100 and Table 2-101
*/
- /* Packet ID, 00h for non-Audio INFOFRAME */
- infoframe_sdp.sdp_header.HB0 = 0;
+ /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
+ sdp->sdp_header.HB0 = 0;
/*
* Packet Type 80h + Non-audio INFOFRAME Type value
- * HDMI_INFOFRAME_TYPE_DRM: 0x87,
+ * HDMI_INFOFRAME_TYPE_DRM: 0x87
+ * - 80h + Non-audio INFOFRAME Type value
+ * - InfoFrame Type: 0x07
+ * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
*/
- infoframe_sdp.sdp_header.HB1 = drm_infoframe.type;
+ sdp->sdp_header.HB1 = drm_infoframe->type;
/*
* Least Significant Eight Bits of (Data Byte Count – 1)
- * infoframe_size - 1,
+ * infoframe_size - 1
*/
- infoframe_sdp.sdp_header.HB2 = 0x1D;
+ sdp->sdp_header.HB2 = 0x1D;
/* INFOFRAME SDP Version Number */
- infoframe_sdp.sdp_header.HB3 = (0x13 << 2);
+ sdp->sdp_header.HB3 = (0x13 << 2);
/* CTA Header Byte 2 (INFOFRAME Version Number) */
- infoframe_sdp.db[0] = drm_infoframe.version;
+ sdp->db[0] = drm_infoframe->version;
/* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
- infoframe_sdp.db[1] = drm_infoframe.length;
+ sdp->db[1] = drm_infoframe->length;
/*
* Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
* HDMI_INFOFRAME_HEADER_SIZE
*/
- BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2);
- memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
+ BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
+ memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
HDMI_DRM_INFOFRAME_SIZE);
/*
- * Size of DP infoframe sdp packet for HDR static metadata is consist of
+ * Size of DP infoframe sdp packet for HDR static metadata consists of
* - DP SDP Header(struct dp_sdp_header): 4 bytes
* - Two Data Blocks: 2 bytes
* CTA Header Byte2 (INFOFRAME Version Number)
@@ -4828,36 +4944,289 @@ intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
* infoframe size. But GEN11+ has larger than that size, write_infoframe
* will pad rest of the size.
*/
- intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state,
- HDMI_PACKET_TYPE_GAMUT_METADATA,
- &infoframe_sdp,
- sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE);
+ return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
}
-void intel_dp_vsc_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void intel_write_dp_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type)
{
- if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct dp_sdp sdp = {};
+ ssize_t len;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
return;
- intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state);
+ switch (type) {
+ case DP_SDP_VSC:
+ len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
+ sizeof(sdp));
+ break;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
+ &sdp, sizeof(sdp));
+ break;
+ default:
+ MISSING_CASE(type);
+ return;
+ }
+
+ if (drm_WARN_ON(&dev_priv->drm, len < 0))
+ return;
+
+ intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
}
-void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ struct drm_dp_vsc_sdp *vsc)
{
- if (!conn_state->hdr_output_metadata)
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct dp_sdp sdp = {};
+ ssize_t len;
+
+ len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
+
+ if (drm_WARN_ON(&dev_priv->drm, len < 0))
+ return;
+
+ intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
+ &sdp, len);
+}
+
+void intel_dp_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
+ u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
+ VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
+ u32 val = intel_de_read(dev_priv, reg);
+
+ /* TODO: Add DSC case (DIP_ENABLE_PPS) */
+ /* When PSR is enabled, this routine doesn't disable VSC DIP */
+ if (intel_psr_enabled(intel_dp))
+ val &= ~dip_enable;
+ else
+ val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);
+
+ if (!enable) {
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+ return;
+ }
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+
+ /* When PSR is enabled, VSC SDP is handled by PSR routine */
+ if (!intel_psr_enabled(intel_dp))
+ intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
+
+ intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
+}
+
+static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
+ const void *buffer, size_t size)
+{
+ const struct dp_sdp *sdp = buffer;
+
+ if (size < sizeof(struct dp_sdp))
+ return -EINVAL;
+
+ memset(vsc, 0, size);
+
+ if (sdp->sdp_header.HB0 != 0)
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB1 != DP_SDP_VSC)
+ return -EINVAL;
+
+ vsc->sdp_type = sdp->sdp_header.HB1;
+ vsc->revision = sdp->sdp_header.HB2;
+ vsc->length = sdp->sdp_header.HB3;
+
+ if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
+ (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
+ /*
+ * - HB2 = 0x2, HB3 = 0x8
+ * VSC SDP supporting 3D stereo + PSR
+ * - HB2 = 0x4, HB3 = 0xe
+ * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
+ * first scan line of the SU region (applies to eDP v1.4b
+ * and higher).
+ */
+ return 0;
+ } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
+ /*
+ * - HB2 = 0x5, HB3 = 0x13
+ * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
+ * Format.
+ */
+ vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
+ vsc->colorimetry = sdp->db[16] & 0xf;
+ vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
+
+ switch (sdp->db[17] & 0x7) {
+ case 0x0:
+ vsc->bpc = 6;
+ break;
+ case 0x1:
+ vsc->bpc = 8;
+ break;
+ case 0x2:
+ vsc->bpc = 10;
+ break;
+ case 0x3:
+ vsc->bpc = 12;
+ break;
+ case 0x4:
+ vsc->bpc = 16;
+ break;
+ default:
+ MISSING_CASE(sdp->db[17] & 0x7);
+ return -EINVAL;
+ }
+
+ vsc->content_type = sdp->db[18] & 0x7;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
+ const void *buffer, size_t size)
+{
+ int ret;
+
+ const struct dp_sdp *sdp = buffer;
+
+ if (size < sizeof(struct dp_sdp))
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB0 != 0)
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
+ return -EINVAL;
+
+ /*
+ * Least Significant Eight Bits of (Data Byte Count – 1)
+ * 1Dh (i.e., Data Byte Count = 30 bytes).
+ */
+ if (sdp->sdp_header.HB2 != 0x1D)
+ return -EINVAL;
+
+ /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
+ if ((sdp->sdp_header.HB3 & 0x3) != 0)
+ return -EINVAL;
+
+ /* INFOFRAME SDP Version Number */
+ if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
+ return -EINVAL;
+
+ /* CTA Header Byte 2 (INFOFRAME Version Number) */
+ if (sdp->db[0] != 1)
+ return -EINVAL;
+
+ /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
+ if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
+ HDMI_DRM_INFOFRAME_SIZE);
+
+ return ret;
+}
+
+static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_dp_vsc_sdp *vsc)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ unsigned int type = DP_SDP_VSC;
+ struct dp_sdp sdp = {};
+ int ret;
+
+ /* When PSR is enabled, VSC SDP is handled by PSR routine */
+ if (intel_psr_enabled(intel_dp))
+ return;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
+
+ ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
+
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
+}
+
+static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct hdmi_drm_infoframe *drm_infoframe)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
+ struct dp_sdp sdp = {};
+ int ret;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
return;
- intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp,
- crtc_state,
- conn_state);
+ intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
+ sizeof(sdp));
+
+ ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
+ sizeof(sdp));
+
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to unpack DP HDR Metadata Infoframe SDP\n");
+}
+
+void intel_read_dp_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ unsigned int type)
+{
+ if (encoder->type != INTEL_OUTPUT_DDI)
+ return;
+
+ switch (type) {
+ case DP_SDP_VSC:
+ intel_read_dp_vsc_sdp(encoder, crtc_state,
+ &crtc_state->infoframes.vsc);
+ break;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
+ &crtc_state->infoframes.drm.drm);
+ break;
+ default:
+ MISSING_CASE(type);
+ break;
+ }
}
static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int status = 0;
int test_link_rate;
u8 test_lane_count, test_link_bw;
@@ -4869,7 +5238,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
&test_lane_count);
if (status <= 0) {
- DRM_DEBUG_KMS("Lane count read failed\n");
+ drm_dbg_kms(&i915->drm, "Lane count read failed\n");
return DP_TEST_NAK;
}
test_lane_count &= DP_MAX_LANE_COUNT_MASK;
@@ -4877,7 +5246,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
&test_link_bw);
if (status <= 0) {
- DRM_DEBUG_KMS("Link Rate read failed\n");
+ drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
return DP_TEST_NAK;
}
test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
@@ -4895,6 +5264,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 test_pattern;
u8 test_misc;
__be16 h_width, v_height;
@@ -4904,7 +5274,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
&test_pattern);
if (status <= 0) {
- DRM_DEBUG_KMS("Test pattern read failed\n");
+ drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
return DP_TEST_NAK;
}
if (test_pattern != DP_COLOR_RAMP)
@@ -4913,21 +5283,21 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
&h_width, 2);
if (status <= 0) {
- DRM_DEBUG_KMS("H Width read failed\n");
+ drm_dbg_kms(&i915->drm, "H Width read failed\n");
return DP_TEST_NAK;
}
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
&v_height, 2);
if (status <= 0) {
- DRM_DEBUG_KMS("V Height read failed\n");
+ drm_dbg_kms(&i915->drm, "V Height read failed\n");
return DP_TEST_NAK;
}
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
&test_misc);
if (status <= 0) {
- DRM_DEBUG_KMS("TEST MISC read failed\n");
+ drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
return DP_TEST_NAK;
}
if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
@@ -4956,6 +5326,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 test_result = DP_TEST_ACK;
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_connector *connector = &intel_connector->base;
@@ -4972,9 +5343,10 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
*/
if (intel_dp->aux.i2c_nack_count > 0 ||
intel_dp->aux.i2c_defer_count > 0)
- DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
- intel_dp->aux.i2c_nack_count,
- intel_dp->aux.i2c_defer_count);
+ drm_dbg_kms(&i915->drm,
+ "EDID read had %d NACKs, %d DEFERs\n",
+ intel_dp->aux.i2c_nack_count,
+ intel_dp->aux.i2c_defer_count);
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
} else {
struct edid *block = intel_connector->detect_edid;
@@ -4986,7 +5358,8 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
block->checksum) <= 0)
- DRM_DEBUG_KMS("Failed to write EDID checksum\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write EDID checksum\n");
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
@@ -4998,43 +5371,217 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
return test_result;
}
+static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp)
+{
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+
+ if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
+ DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
+ return DP_TEST_NAK;
+ }
+
+ /*
+ * link_mst is set to false to avoid executing mst related code
+ * during compliance testing.
+ */
+ intel_dp->link_mst = false;
+
+ return DP_TEST_ACK;
+}
+
+static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+ struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 pattern_val;
+
+ switch (data->phy_pattern) {
+ case DP_PHY_TEST_PATTERN_NONE:
+ DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
+ break;
+ case DP_PHY_TEST_PATTERN_D10_2:
+ DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
+ break;
+ case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE |
+ DDI_DP_COMP_CTL_SCRAMBLED_0);
+ break;
+ case DP_PHY_TEST_PATTERN_PRBS7:
+ DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
+ break;
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ /*
+ * FIXME: Ideally pattern should come from DPCD 0x250. As
+ * current firmware of DPR-100 could not set it, so hardcoding
+ * now for complaince test.
+ */
+ DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
+ pattern_val = 0x3e0f83e0;
+ intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
+ pattern_val = 0x0f83e0f8;
+ intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
+ pattern_val = 0x0000f83e;
+ intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE |
+ DDI_DP_COMP_CTL_CUSTOM80);
+ break;
+ case DP_PHY_TEST_PATTERN_CP2520:
+ /*
+ * FIXME: Ideally pattern should come from DPCD 0x24A. As
+ * current firmware of DPR-100 could not set it, so hardcoding
+ * now for complaince test.
+ */
+ DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
+ pattern_val = 0xFB;
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
+ pattern_val);
+ break;
+ default:
+ WARN(1, "Invalid Phy Test Pattern\n");
+ }
+}
+
+static void
+intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
+
+ trans_ddi_func_ctl_value = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(pipe));
+ trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
+ dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
+
+ trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
+ TGL_TRANS_DDI_PORT_MASK);
+ trans_conf_value &= ~PIPECONF_ENABLE;
+ dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
+
+ intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
+ trans_ddi_func_ctl_value);
+ intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
+}
+
+static void
+intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = intel_dig_port->base.port;
+ struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
+
+ trans_ddi_func_ctl_value = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(pipe));
+ trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
+ dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
+
+ trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
+ TGL_TRANS_DDI_SELECT_PORT(port);
+ trans_conf_value |= PIPECONF_ENABLE;
+ dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
+
+ intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
+ intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
+ trans_ddi_func_ctl_value);
+}
+
+void intel_dp_process_phy_request(struct intel_dp *intel_dp)
+{
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_DEBUG_KMS("failed to get link status\n");
+ return;
+ }
+
+ /* retrieve vswing & pre-emphasis setting */
+ intel_dp_get_adjust_train(intel_dp, link_status);
+
+ intel_dp_autotest_phy_ddi_disable(intel_dp);
+
+ intel_dp_set_signal_levels(intel_dp);
+
+ intel_dp_phy_pattern_update(intel_dp);
+
+ intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes);
+
+ drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
+ link_status[DP_DPCD_REV]);
+}
+
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
{
- u8 test_result = DP_TEST_NAK;
+ u8 test_result;
+
+ test_result = intel_dp_prepare_phytest(intel_dp);
+ if (test_result != DP_TEST_ACK)
+ DRM_ERROR("Phy test preparation failed\n");
+
+ intel_dp_process_phy_request(intel_dp);
+
return test_result;
}
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 response = DP_TEST_NAK;
u8 request = 0;
int status;
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
if (status <= 0) {
- DRM_DEBUG_KMS("Could not read test request from sink\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not read test request from sink\n");
goto update_status;
}
switch (request) {
case DP_TEST_LINK_TRAINING:
- DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
+ drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
response = intel_dp_autotest_link_training(intel_dp);
break;
case DP_TEST_LINK_VIDEO_PATTERN:
- DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
+ drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
response = intel_dp_autotest_video_pattern(intel_dp);
break;
case DP_TEST_LINK_EDID_READ:
- DRM_DEBUG_KMS("EDID test requested\n");
+ drm_dbg_kms(&i915->drm, "EDID test requested\n");
response = intel_dp_autotest_edid(intel_dp);
break;
case DP_TEST_LINK_PHY_TEST_PATTERN:
- DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
+ drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
response = intel_dp_autotest_phy_pattern(intel_dp);
break;
default:
- DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
+ drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
+ request);
break;
}
@@ -5044,64 +5591,59 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
update_status:
status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
if (status <= 0)
- DRM_DEBUG_KMS("Could not write test response to sink\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not write test response to sink\n");
}
static int
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
- bool bret;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool need_retrain = false;
- if (intel_dp->is_mst) {
- u8 esi[DP_DPRX_ESI_LEN] = { 0 };
- int ret = 0;
+ if (!intel_dp->is_mst)
+ return -EINVAL;
+
+ WARN_ON_ONCE(intel_dp->active_mst_links < 0);
+
+ for (;;) {
+ u8 esi[DP_DPRX_ESI_LEN] = {};
+ bool bret, handled;
int retry;
- bool handled;
- WARN_ON_ONCE(intel_dp->active_mst_links < 0);
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
-go_again:
- if (bret == true) {
-
- /* check link status - esi[10] = 0x200c */
- if (intel_dp->active_mst_links > 0 &&
- !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
- DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
- intel_dp_start_link_train(intel_dp);
- intel_dp_stop_link_train(intel_dp);
- }
+ if (!bret) {
+ drm_dbg_kms(&i915->drm,
+ "failed to get ESI - device may have failed\n");
+ return -EINVAL;
+ }
- DRM_DEBUG_KMS("got esi %3ph\n", esi);
- ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
-
- if (handled) {
- for (retry = 0; retry < 3; retry++) {
- int wret;
- wret = drm_dp_dpcd_write(&intel_dp->aux,
- DP_SINK_COUNT_ESI+1,
- &esi[1], 3);
- if (wret == 3) {
- break;
- }
- }
+ /* check link status - esi[10] = 0x200c */
+ if (intel_dp->active_mst_links > 0 && !need_retrain &&
+ !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
+ drm_dbg_kms(&i915->drm,
+ "channel EQ not ok, retraining\n");
+ need_retrain = true;
+ }
- bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
- if (bret == true) {
- DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
- goto go_again;
- }
- } else
- ret = 0;
+ drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
- return ret;
- } else {
- DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
- intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- intel_dp->is_mst);
+ drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+ if (!handled)
+ break;
+
+ for (retry = 0; retry < 3; retry++) {
+ int wret;
+
+ wret = drm_dp_dpcd_write(&intel_dp->aux,
+ DP_SINK_COUNT_ESI+1,
+ &esi[1], 3);
+ if (wret == 3)
+ break;
}
}
- return -EINVAL;
+
+ return need_retrain;
}
static bool
@@ -5138,20 +5680,102 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
}
+static bool intel_dp_has_connector(struct intel_dp *intel_dp,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder;
+ enum pipe pipe;
+
+ if (!conn_state->best_encoder)
+ return false;
+
+ /* SST */
+ encoder = &dp_to_dig_port(intel_dp)->base;
+ if (conn_state->best_encoder == &encoder->base)
+ return true;
+
+ /* MST */
+ for_each_pipe(i915, pipe) {
+ encoder = &intel_dp->mst_encoders[pipe]->base;
+ if (conn_state->best_encoder == &encoder->base)
+ return true;
+ }
+
+ return false;
+}
+
+static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
+ struct drm_modeset_acquire_ctx *ctx,
+ u32 *crtc_mask)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
+ int ret = 0;
+
+ *crtc_mask = 0;
+
+ if (!intel_dp_needs_link_retrain(intel_dp))
+ return 0;
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state =
+ connector->base.state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (!intel_dp_has_connector(intel_dp, conn_state))
+ continue;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (!crtc)
+ continue;
+
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ if (ret)
+ break;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ continue;
+
+ *crtc_mask |= drm_crtc_mask(&crtc->base);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (!intel_dp_needs_link_retrain(intel_dp))
+ *crtc_mask = 0;
+
+ return ret;
+}
+
+static bool intel_dp_is_connected(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ return connector->base.status == connector_status_connected ||
+ intel_dp->is_mst;
+}
+
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_connector_state *conn_state;
- struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
+ u32 crtc_mask;
int ret;
- /* FIXME handle the MST connectors as well */
-
- if (!connector || connector->base.status != connector_status_connected)
+ if (!intel_dp_is_connected(intel_dp))
return 0;
ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
@@ -5159,46 +5783,42 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
if (ret)
return ret;
- conn_state = connector->base.state;
-
- crtc = to_intel_crtc(conn_state->crtc);
- if (!crtc)
- return 0;
-
- ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
if (ret)
return ret;
- crtc_state = to_intel_crtc_state(crtc->base.state);
-
- drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state));
-
- if (!crtc_state->hw.active)
+ if (crtc_mask == 0)
return 0;
- if (conn_state->commit &&
- !try_wait_for_completion(&conn_state->commit->hw_done))
- return 0;
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
+ encoder->base.base.id, encoder->base.name);
- if (!intel_dp_needs_link_retrain(intel_dp))
- return 0;
+ for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
- /* Suppress underruns caused by re-training */
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- if (crtc_state->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv,
- intel_crtc_pch_transcoder(crtc), false);
+ /* Suppress underruns caused by re-training */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+ if (crtc_state->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), false);
+ }
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
- /* Keep underrun reporting disabled until things are stable */
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ /* Keep underrun reporting disabled until things are stable */
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
- if (crtc_state->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv,
- intel_crtc_pch_transcoder(crtc), true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+ if (crtc_state->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), true);
+ }
return 0;
}
@@ -5217,14 +5837,13 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
*/
static enum intel_hotplug_state
intel_dp_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received)
+ struct intel_connector *connector)
{
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
- state = intel_encoder_hotplug(encoder, connector, irq_received);
+ state = intel_encoder_hotplug(encoder, connector);
drm_modeset_acquire_init(&ctx, 0);
@@ -5248,7 +5867,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
* Keeping it consistent with intel_ddi_hotplug() and
* intel_hdmi_hotplug().
*/
- if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+ if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
state = INTEL_HOTPLUG_RETRY;
return state;
@@ -5256,6 +5875,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 val;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
@@ -5274,7 +5894,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
if (val & DP_SINK_SPECIFIC_IRQ)
- DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
+ drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
}
/*
@@ -5341,6 +5961,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
u8 *dpcd = intel_dp->dpcd;
u8 type;
@@ -5388,7 +6009,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
}
/* Anything else is out of spec, warn and ignore */
- DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
+ drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
return connector_status_disconnected;
}
@@ -5401,64 +6022,7 @@ edp_detect(struct intel_dp *intel_dp)
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_B:
- bit = SDE_PORTB_HOTPLUG;
- break;
- case HPD_PORT_C:
- bit = SDE_PORTC_HOTPLUG;
- break;
- case HPD_PORT_D:
- bit = SDE_PORTD_HOTPLUG;
- break;
- default:
- MISSING_CASE(encoder->hpd_pin);
- return false;
- }
-
- return intel_de_read(dev_priv, SDEISR) & bit;
-}
-
-static bool cpt_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_B:
- bit = SDE_PORTB_HOTPLUG_CPT;
- break;
- case HPD_PORT_C:
- bit = SDE_PORTC_HOTPLUG_CPT;
- break;
- case HPD_PORT_D:
- bit = SDE_PORTD_HOTPLUG_CPT;
- break;
- default:
- MISSING_CASE(encoder->hpd_pin);
- return false;
- }
-
- return intel_de_read(dev_priv, SDEISR) & bit;
-}
-
-static bool spt_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_A:
- bit = SDE_PORTA_HOTPLUG_SPT;
- break;
- case HPD_PORT_E:
- bit = SDE_PORTE_HOTPLUG_SPT;
- break;
- default:
- return cpt_digital_port_connected(encoder);
- }
+ u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, SDEISR) & bit;
}
@@ -5512,89 +6076,9 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder)
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
- if (encoder->hpd_pin == HPD_PORT_A)
- return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
- else
- return ibx_digital_port_connected(encoder);
-}
-
-static bool snb_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (encoder->hpd_pin == HPD_PORT_A)
- return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
- else
- return cpt_digital_port_connected(encoder);
-}
-
-static bool ivb_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (encoder->hpd_pin == HPD_PORT_A)
- return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG_IVB;
- else
- return cpt_digital_port_connected(encoder);
-}
-
-static bool bdw_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (encoder->hpd_pin == HPD_PORT_A)
- return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
- else
- return cpt_digital_port_connected(encoder);
-}
-
-static bool bxt_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_A:
- bit = BXT_DE_PORT_HP_DDIA;
- break;
- case HPD_PORT_B:
- bit = BXT_DE_PORT_HP_DDIB;
- break;
- case HPD_PORT_C:
- bit = BXT_DE_PORT_HP_DDIC;
- break;
- default:
- MISSING_CASE(encoder->hpd_pin);
- return false;
- }
-
- return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
-}
-
-static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
- enum phy phy)
-{
- if (HAS_PCH_MCC(dev_priv) && phy == PHY_C)
- return intel_de_read(dev_priv, SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
-
- return intel_de_read(dev_priv, SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
-}
-
-static bool icp_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- if (intel_phy_is_combo(dev_priv, phy))
- return intel_combo_phy_connected(dev_priv, phy);
- else if (intel_phy_is_tc(dev_priv, phy))
- return intel_tc_port_connected(dig_port);
- else
- MISSING_CASE(encoder->hpd_pin);
-
- return false;
+ return intel_de_read(dev_priv, DEISR) & bit;
}
/*
@@ -5608,44 +6092,15 @@ static bool icp_digital_port_connected(struct intel_encoder *encoder)
*
* Return %true if port is connected, %false otherwise.
*/
-static bool __intel_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (HAS_GMCH(dev_priv)) {
- if (IS_GM45(dev_priv))
- return gm45_digital_port_connected(encoder);
- else
- return g4x_digital_port_connected(encoder);
- }
-
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- return icp_digital_port_connected(encoder);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- return spt_digital_port_connected(encoder);
- else if (IS_GEN9_LP(dev_priv))
- return bxt_digital_port_connected(encoder);
- else if (IS_GEN(dev_priv, 8))
- return bdw_digital_port_connected(encoder);
- else if (IS_GEN(dev_priv, 7))
- return ivb_digital_port_connected(encoder);
- else if (IS_GEN(dev_priv, 6))
- return snb_digital_port_connected(encoder);
- else if (IS_GEN(dev_priv, 5))
- return ilk_digital_port_connected(encoder);
-
- MISSING_CASE(INTEL_GEN(dev_priv));
- return false;
-}
-
bool intel_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_connected = false;
intel_wakeref_t wakeref;
with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- is_connected = __intel_digital_port_connected(encoder);
+ is_connected = dig_port->connected(encoder);
return is_connected;
}
@@ -5860,6 +6315,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static int
intel_dp_connector_register(struct drm_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
int ret;
@@ -5867,10 +6323,8 @@ intel_dp_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- intel_connector_debugfs_add(connector);
-
- DRM_DEBUG_KMS("registering %s bus for %s\n",
- intel_dp->aux.name, connector->kdev->kobj.name);
+ drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
+ intel_dp->aux.name, connector->kdev->kobj.name);
intel_dp->aux.dev = connector->kdev;
ret = drm_dp_aux_register(&intel_dp->aux);
@@ -5956,6 +6410,7 @@ static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
static const struct drm_dp_aux_msg msg = {
.request = DP_AUX_NATIVE_WRITE,
@@ -5970,8 +6425,9 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
an, DRM_HDCP_AN_LEN);
if (dpcd_ret != DRM_HDCP_AN_LEN) {
- DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
- dpcd_ret);
+ drm_dbg_kms(&i915->drm,
+ "Failed to write An over DP/AUX (%zd)\n",
+ dpcd_ret);
return dpcd_ret >= 0 ? -EIO : dpcd_ret;
}
@@ -5987,17 +6443,19 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
rxbuf, sizeof(rxbuf),
DP_AUX_CH_CTL_AUX_AKSV_SELECT);
if (ret < 0) {
- DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Write Aksv over DP/AUX failed (%d)\n", ret);
return ret;
} else if (ret == 0) {
- DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
+ drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n");
return -EIO;
}
reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
if (reply != DP_AUX_NATIVE_REPLY_ACK) {
- DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
- reply);
+ drm_dbg_kms(&i915->drm,
+ "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
+ reply);
return -EIO;
}
return 0;
@@ -6006,11 +6464,14 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
u8 *bksv)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
+
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret != DRM_HDCP_KSV_LEN) {
- DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read Bksv from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -6019,7 +6480,9 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
u8 *bstatus)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
+
/*
* For some reason the HDMI and DP HDCP specs call this register
* definition by different names. In the HDMI spec, it's called BSTATUS,
@@ -6028,7 +6491,8 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret != DRM_HDCP_BSTATUS_LEN) {
- DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -6038,12 +6502,14 @@ static
int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
u8 *bcaps)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
bcaps, 1);
if (ret != 1) {
- DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bcaps from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -6069,11 +6535,14 @@ static
int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
u8 *ri_prime)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
+
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret != DRM_HDCP_RI_LEN) {
- DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
+ ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -6083,12 +6552,15 @@ static
int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
bool *ksv_ready)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
u8 bstatus;
+
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
*ksv_ready = bstatus & DP_BSTATUS_READY;
@@ -6099,6 +6571,7 @@ static
int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
int num_downstream, u8 *ksv_fifo)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
int i;
@@ -6110,8 +6583,9 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
ksv_fifo + i * DRM_HDCP_KSV_LEN,
len);
if (ret != len) {
- DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
- i, ret);
+ drm_dbg_kms(&i915->drm,
+ "Read ksv[%d] from DP/AUX failed (%zd)\n",
+ i, ret);
return ret >= 0 ? -EIO : ret;
}
}
@@ -6122,6 +6596,7 @@ static
int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
int i, u32 *part)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
@@ -6131,7 +6606,8 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
DP_AUX_HDCP_V_PRIME(i), part,
DRM_HDCP_V_PRIME_PART_LEN);
if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
- DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+ drm_dbg_kms(&i915->drm,
+ "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -6148,13 +6624,15 @@ int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
static
bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
u8 bstatus;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
return false;
}
@@ -6225,17 +6703,19 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
0, 0 },
};
-static inline
-int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
- u8 *rx_status)
+static int
+intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+ u8 *rx_status)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
HDCP_2_2_DP_RXSTATUS_LEN);
if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
- DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -6279,6 +6759,7 @@ static ssize_t
intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *dp = &intel_dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
u8 msg_id = hdcp2_msg_data->msg_id;
@@ -6310,8 +6791,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
}
if (ret)
- DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
- hdcp2_msg_data->msg_id, ret, timeout);
+ drm_dbg_kms(&i915->drm,
+ "msg_id %d, ret %d, timeout(mSec): %d\n",
+ hdcp2_msg_data->msg_id, ret, timeout);
return ret;
}
@@ -6397,6 +6879,7 @@ static
int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
u8 msg_id, void *buf, size_t size)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
@@ -6430,7 +6913,8 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
(void *)byte, len);
if (ret < 0) {
- DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
+ drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
+ msg_id, ret);
return ret;
}
@@ -6721,7 +7205,11 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
if (ret)
return ret;
- if (INTEL_GEN(dev_priv) < 11)
+ /*
+ * We don't enable port sync on BDW due to missing w/as and
+ * due to not having adjusted the modeset sequence appropriately.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
return 0;
if (!intel_connector_needs_modeset(state, conn))
@@ -6760,28 +7248,45 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
+static bool intel_edp_have_power(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool have_power = false;
+
+ with_pps_lock(intel_dp, wakeref) {
+ have_power = edp_have_panel_power(intel_dp) &&
+ edp_have_panel_vdd(intel_dp);
+ }
+
+ return have_power;
+}
+
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *intel_dp = &intel_dig_port->dp;
- if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
+ if (intel_dig_port->base.type == INTEL_OUTPUT_EDP &&
+ (long_hpd || !intel_edp_have_power(intel_dp))) {
/*
- * vdd off can generate a long pulse on eDP which
+ * vdd off can generate a long/short pulse on eDP which
* would require vdd on to handle it, and thus we
* would end up in an endless cycle of
- * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
+ * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
*/
- DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&i915->drm,
+ "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
+ long_hpd ? "long" : "short",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return IRQ_HANDLED;
}
- DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name,
- long_hpd ? "long" : "short");
+ drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
+ long_hpd ? "long" : "short");
if (long_hpd) {
intel_dp->reset_link_params = true;
@@ -6789,18 +7294,25 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
}
if (intel_dp->is_mst) {
- if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
+ switch (intel_dp_check_mst_status(intel_dp)) {
+ case -EINVAL:
/*
* If we were in MST mode, and device is not
* there, get out of MST mode
*/
- DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ drm_dbg_kms(&i915->drm,
+ "MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst,
+ intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
return IRQ_NONE;
+ case 1:
+ return IRQ_NONE;
+ default:
+ break;
}
}
@@ -7831,6 +8343,23 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->post_disable = g4x_post_disable_dp;
}
+ if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A))
+ intel_dig_port->dp.set_link_train = cpt_set_link_train;
+ else
+ intel_dig_port->dp.set_link_train = g4x_set_link_train;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ intel_dig_port->dp.set_signal_levels = chv_set_signal_levels;
+ else if (IS_VALLEYVIEW(dev_priv))
+ intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels;
+ else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
+ else if (IS_GEN(dev_priv, 6) && port == PORT_A)
+ intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
+ else
+ intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels;
+
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
@@ -7851,6 +8380,18 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_GM45(dev_priv))
+ intel_dig_port->connected = gm45_digital_port_connected;
+ else
+ intel_dig_port->connected = g4x_digital_port_connected;
+ } else {
+ if (port == PORT_A)
+ intel_dig_port->connected = ilk_digital_port_connected;
+ else
+ intel_dig_port->connected = ibx_digital_port_connected;
+ }
+
if (port != PORT_A)
intel_infoframe_init(intel_dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 0c7be8ed1423..1702959ca079 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -16,6 +16,7 @@ struct drm_connector_state;
struct drm_encoder;
struct drm_i915_private;
struct drm_modeset_acquire_ctx;
+struct drm_dp_vsc_sdp;
struct intel_connector;
struct intel_crtc_state;
struct intel_digital_port;
@@ -108,13 +109,21 @@ int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-void intel_dp_vsc_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
+void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
+ const struct drm_connector_state *conn_state,
+ struct drm_dp_vsc_sdp *vsc);
+void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ struct drm_dp_vsc_sdp *vsc);
+void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_read_dp_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ unsigned int type);
bool intel_digital_port_connected(struct intel_encoder *encoder);
+void intel_dp_process_phy_request(struct intel_dp *intel_dp);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index dbfa6895795b..0722540d64ad 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -27,6 +27,7 @@
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 reg_val = 0;
/* Early return when display use other mechanism to enable backlight. */
@@ -35,8 +36,8 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
&reg_val) < 0) {
- DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
- DP_EDP_DISPLAY_CONTROL_REGISTER);
+ drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_EDP_DISPLAY_CONTROL_REGISTER);
return;
}
if (enable)
@@ -46,8 +47,8 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
reg_val) != 1) {
- DRM_DEBUG_KMS("Failed to %s aux backlight\n",
- enable ? "enable" : "disable");
+ drm_dbg_kms(&i915->drm, "Failed to %s aux backlight\n",
+ enable ? "enable" : "disable");
}
}
@@ -58,6 +59,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 read_val[2] = { 0x0 };
u8 mode_reg;
u16 level = 0;
@@ -65,8 +67,9 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
&mode_reg) != 1) {
- DRM_DEBUG_KMS("Failed to read the DPCD register 0x%x\n",
- DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+ drm_dbg_kms(&i915->drm,
+ "Failed to read the DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
return 0;
}
@@ -80,8 +83,8 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
&read_val, sizeof(read_val)) < 0) {
- DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
- DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
+ drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
return 0;
}
level = read_val[0];
@@ -100,6 +103,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 vals[2] = { 0x0 };
vals[0] = level;
@@ -111,7 +115,8 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
}
if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
vals, sizeof(vals)) < 0) {
- DRM_DEBUG_KMS("Failed to write aux backlight level\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write aux backlight level\n");
return;
}
}
@@ -133,7 +138,8 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
freq = dev_priv->vbt.backlight.pwm_freq_hz;
if (!freq) {
- DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Use panel default backlight frequency\n");
return false;
}
@@ -146,13 +152,14 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
if (fxp_min > fxp_actual || fxp_actual > fxp_max) {
- DRM_DEBUG_KMS("Actual frequency out of range\n");
+ drm_dbg_kms(&dev_priv->drm, "Actual frequency out of range\n");
return false;
}
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
- DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to write aux backlight freq\n");
return false;
}
return true;
@@ -163,13 +170,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_panel *panel = &connector->panel;
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
- DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
- DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+ drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
return;
}
@@ -186,7 +194,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT,
panel->backlight.pwmgen_bit_count) < 0)
- DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write aux pwmgen bit count\n");
break;
@@ -203,7 +212,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
if (new_dpcd_buf != dpcd_buf) {
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) {
- DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write aux backlight mode\n");
}
}
@@ -237,9 +247,11 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
* minimum value will applied automatically. So no need to check that.
*/
freq = i915->vbt.backlight.pwm_freq_hz;
- DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
+ drm_dbg_kms(&i915->drm, "VBT defined backlight frequency %u Hz\n",
+ freq);
if (!freq) {
- DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+ drm_dbg_kms(&i915->drm,
+ "Use panel default backlight frequency\n");
return max_backlight;
}
@@ -254,12 +266,14 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
*/
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
- DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to read pwmgen bit count cap min\n");
return max_backlight;
}
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
- DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to read pwmgen bit count cap max\n");
return max_backlight;
}
pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
@@ -268,7 +282,8 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
- DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT defined backlight frequency out of range\n");
return max_backlight;
}
@@ -279,10 +294,11 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
break;
}
- DRM_DEBUG_KMS("Using eDP pwmgen bit count of %d\n", pn);
+ drm_dbg_kms(&i915->drm, "Using eDP pwmgen bit count of %d\n", pn);
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
- DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write aux pwmgen bit count\n");
return max_backlight;
}
panel->backlight.pwmgen_bit_count = pn;
@@ -312,6 +328,7 @@ static bool
intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
@@ -319,7 +336,7 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) &&
!(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
- DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
+ drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
return true;
}
return false;
@@ -329,8 +346,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
- struct drm_device *dev = intel_connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (i915_modparams.enable_dpcd_backlight == 0 ||
!intel_dp_aux_display_control_capable(intel_connector))
@@ -340,18 +356,18 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
* There are a lot of machines that don't advertise the backlight
* control interface to use properly in their VBIOS, :\
*/
- if (dev_priv->vbt.backlight.type !=
+ if (i915->vbt.backlight.type !=
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
i915_modparams.enable_dpcd_backlight != 1 &&
!drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
- DRM_DEV_INFO(dev->dev,
- "Panel advertises DPCD backlight support, but "
- "VBT disagrees. If your backlight controls "
- "don't work try booting with "
- "i915.enable_dpcd_backlight=1. If your machine "
- "needs this, please file a _new_ bug report on "
- "drm/i915, see " FDO_BUG_URL " for details.\n");
+ drm_info(&i915->drm,
+ "Panel advertises DPCD backlight support, but "
+ "VBT disagrees. If your backlight controls "
+ "don't work try booting with "
+ "i915.enable_dpcd_backlight=1. If your machine "
+ "needs this, please file a _new_ bug report on "
+ "drm/i915, see " FDO_BUG_URL " for details.\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index a7defb37ab00..e4f1843170b7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -34,9 +34,8 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
-static void
-intel_get_adjust_train(struct intel_dp *intel_dp,
- const u8 link_status[DP_LINK_STATUS_SIZE])
+void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+ const u8 link_status[DP_LINK_STATUS_SIZE])
{
u8 v = 0;
u8 p = 0;
@@ -219,7 +218,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Update training set as requested by target */
- intel_get_adjust_train(intel_dp, link_status);
+ intel_dp_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
drm_err(&i915->drm,
"failed to update link training\n");
@@ -338,7 +337,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
}
/* Update training set as requested by target */
- intel_get_adjust_train(intel_dp, link_status);
+ intel_dp_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
drm_err(&i915->drm,
"failed to update link training\n");
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 174566adcc92..01f1dabbb060 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -6,8 +6,12 @@
#ifndef __INTEL_DP_LINK_TRAINING_H__
#define __INTEL_DP_LINK_TRAINING_H__
+#include <drm/drm_dp_helper.h>
+
struct intel_dp;
+void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+ const u8 link_status[DP_LINK_STATUS_SIZE]);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 44f3fd251ca1..d18b406f2a7d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -47,9 +47,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- void *port = connector->port;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
@@ -65,7 +65,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
false);
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
- port, crtc_state->pbn, 0);
+ connector->port,
+ crtc_state->pbn, 0);
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
@@ -73,7 +74,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
}
if (slots < 0) {
- DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
+ drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
+ slots);
return slots;
}
@@ -88,56 +90,10 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
return 0;
}
-/*
- * Iterate over all connectors and return the smallest transcoder in the MST
- * stream
- */
-static enum transcoder
-intel_dp_mst_master_trans_compute(struct intel_atomic_state *state,
- struct intel_dp *mst_port)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_digital_connector_state *conn_state;
- struct intel_connector *connector;
- enum pipe ret = I915_MAX_PIPES;
- int i;
-
- if (INTEL_GEN(dev_priv) < 12)
- return INVALID_TRANSCODER;
-
- for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
-
- if (connector->mst_port != mst_port || !conn_state->base.crtc)
- continue;
-
- crtc = to_intel_crtc(conn_state->base.crtc);
- crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- if (!crtc_state->uapi.active)
- continue;
-
- /*
- * Using crtc->pipe because crtc_state->cpu_transcoder is
- * computed, so others CRTCs could have non-computed
- * cpu_transcoder
- */
- if (crtc->pipe < ret)
- ret = crtc->pipe;
- }
-
- if (ret == I915_MAX_PIPES)
- return INVALID_TRANSCODER;
-
- /* Simple cast works because TGL don't have a eDP transcoder */
- return (enum transcoder)ret;
-}
-
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
@@ -147,7 +103,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
- void *port = connector->port;
struct link_config_limits limits;
int ret;
@@ -158,8 +113,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = false;
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- pipe_config->has_audio =
- drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port);
+ pipe_config->has_audio = connector->port->has_audio;
else
pipe_config->has_audio =
intel_conn_state->force_audio == HDMI_AUDIO_ON;
@@ -201,7 +155,56 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
- pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp);
+ return 0;
+}
+
+/*
+ * Iterate over all connectors and return a mask of
+ * all CPU transcoders streaming over the same DP link.
+ */
+static unsigned int
+intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
+ struct intel_dp *mst_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ u8 transcoders = 0;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return 0;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (connector->mst_port != mst_port || !conn_state->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(conn_state->base.crtc);
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ transcoders |= BIT(crtc_state->cpu_transcoder);
+ }
+
+ return transcoders;
+}
+
+static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+
+ /* lowest numbered transcoder will be designated master */
+ crtc_state->mst_master_transcoder =
+ ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1;
return 0;
}
@@ -313,7 +316,8 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
return ret;
}
-static void intel_mst_disable_dp(struct intel_encoder *encoder,
+static void intel_mst_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -322,22 +326,25 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
- DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dbg_kms(&i915->drm, "active links %d\n",
+ intel_dp->active_mst_links);
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
- DRM_DEBUG_KMS("failed to update payload %d\n", ret);
+ drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
}
-static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
+static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -371,7 +378,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
- DRM_ERROR("Timed out waiting for ACT sent when disabling\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for ACT sent when disabling\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
@@ -402,13 +410,15 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
intel_mst->connector = NULL;
if (last_mst_stream)
- intel_dig_port->base.post_disable(&intel_dig_port->base,
+ intel_dig_port->base.post_disable(state, &intel_dig_port->base,
old_crtc_state, NULL);
- DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ intel_dp->active_mst_links);
}
-static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
+static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -417,11 +427,12 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
if (intel_dp->active_mst_links == 0)
- intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
+ intel_dig_port->base.pre_pll_enable(state, &intel_dig_port->base,
pipe_config, NULL);
}
-static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
+static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -445,7 +456,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
- DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ intel_dp->active_mst_links);
if (first_mst_stream)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -453,7 +465,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
if (first_mst_stream)
- intel_dig_port->base.pre_enable(&intel_dig_port->base,
+ intel_dig_port->base.pre_enable(state, &intel_dig_port->base,
pipe_config, NULL);
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
@@ -461,7 +473,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
pipe_config->pbn,
pipe_config->dp_m_n.tu);
if (!ret)
- DRM_ERROR("failed to allocate vcpi\n");
+ drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
intel_dp->active_mst_links++;
temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
@@ -477,14 +489,15 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
* here for the following ones.
*/
if (INTEL_GEN(dev_priv) < 12 || !first_mst_stream)
- intel_ddi_enable_pipe_clock(pipe_config);
+ intel_ddi_enable_pipe_clock(encoder, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
intel_dp_set_m_n(pipe_config, M1_N1);
}
-static void intel_mst_enable_dp(struct intel_encoder *encoder,
+static void intel_mst_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -495,19 +508,23 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
- intel_enable_pipe(pipe_config);
-
- intel_crtc_vblank_on(pipe_config);
+ intel_ddi_enable_transcoder_func(encoder, pipe_config);
- DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ intel_dp->active_mst_links);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
- DRM_ERROR("Timed out waiting for ACT sent\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for ACT sent\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+
+ intel_enable_pipe(pipe_config);
+
+ intel_crtc_vblank_on(pipe_config);
+
if (pipe_config->has_audio)
intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
@@ -786,6 +803,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->pipe_mask = ~0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
+ intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 2d47f1f756a2..b45185b80bec 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -80,7 +80,7 @@ intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
- WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+ drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
if (!state->dpll_set) {
state->dpll_set = true;
@@ -979,7 +979,7 @@ hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+ if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
return NULL;
crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
@@ -1616,7 +1616,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
ref_clock / 0x8000;
- if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+ if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
return 0;
return dco_freq / (p0 * p1 * p2 * 5);
@@ -2074,7 +2074,7 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
clk_div->p1 = best_clock.p1;
clk_div->p2 = best_clock.p2;
- WARN_ON(best_clock.m1 != 2);
+ drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
clk_div->n = best_clock.n;
clk_div->m2_int = best_clock.m2 >> 22;
clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index d7a6bf2277df..29fec6a92d17 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -34,7 +34,7 @@
#define DSB_BYTE_EN_SHIFT 20
#define DSB_REG_VALUE_MASK 0xfffff
-static inline bool is_dsb_busy(struct intel_dsb *dsb)
+static bool is_dsb_busy(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -43,7 +43,7 @@ static inline bool is_dsb_busy(struct intel_dsb *dsb)
return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
}
-static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
+static bool intel_dsb_enable_engine(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -63,7 +63,7 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
return true;
}
-static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
+static bool intel_dsb_disable_engine(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index a2a937109a5a..afa4e6817e8c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -31,20 +31,21 @@ int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
int intel_dsi_get_modes(struct drm_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *mode;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
if (!intel_connector->panel.fixed_mode) {
- DRM_DEBUG_KMS("no fixed mode\n");
+ drm_dbg_kms(&i915->drm, "no fixed mode\n");
return 0;
}
mode = drm_mode_duplicate(connector->dev,
intel_connector->panel.fixed_mode);
if (!mode) {
- DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+ drm_dbg_kms(&i915->drm, "drm_mode_duplicate failed\n");
return 0;
}
@@ -60,7 +61,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 574dcfec9577..eed037ec0b29 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -121,7 +121,7 @@ struct i2c_adapter_lookup {
#define ICL_GPIO_DDPA_CTRLCLK_2 8
#define ICL_GPIO_DDPA_CTRLDATA_2 9
-static inline enum port intel_dsi_seq_port_to_port(u8 port)
+static enum port intel_dsi_seq_port_to_port(u8 port)
{
return port ? PORT_C : PORT_A;
}
@@ -453,8 +453,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_device *drm_dev = intel_dsi->base.base.dev;
- struct device *dev = &drm_dev->pdev->dev;
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
struct i2c_adapter *adapter;
struct i2c_msg msg;
int ret;
@@ -471,7 +470,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
if (!adapter) {
- DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n");
+ drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n");
goto err_bus;
}
@@ -489,9 +488,9 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
ret = i2c_transfer(adapter, &msg, 1);
if (ret < 0)
- DRM_DEV_ERROR(dev,
- "Failed to xfer payload of size (%u) to reg (%u)\n",
- payload_size, reg_offset);
+ drm_err(&i915->drm,
+ "Failed to xfer payload of size (%u) to reg (%u)\n",
+ payload_size, reg_offset);
kfree(payload_data);
err_alloc:
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 341d5ce8b062..5cd09034519b 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -183,7 +183,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
-static void intel_disable_dvo(struct intel_encoder *encoder,
+static void intel_disable_dvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -197,7 +198,8 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
intel_de_read(dev_priv, dvo_reg);
}
-static void intel_enable_dvo(struct intel_encoder *encoder,
+static void intel_enable_dvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -272,7 +274,8 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
return 0;
}
-static void intel_dvo_pre_enable(struct intel_encoder *encoder,
+static void intel_dvo_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index c125ca9ab9b3..1c26673acb2d 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -104,7 +104,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
/* Wait for compressing bit to clear */
if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
FBC_STAT_COMPRESSING, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
+ drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n");
return;
}
}
@@ -485,7 +485,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
if (!ret)
goto err_llb;
else if (ret > 1) {
- DRM_INFO_ONCE("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+ drm_info_once(&dev_priv->drm,
+ "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
}
fbc->threshold = ret;
@@ -520,8 +521,9 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
dev_priv->dsm.start + compressed_llb->start);
}
- DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
- fbc->compressed_fb.size, fbc->threshold);
+ drm_dbg_kms(&dev_priv->drm,
+ "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
+ fbc->compressed_fb.size, fbc->threshold);
return 0;
@@ -530,7 +532,7 @@ err_fb:
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
err_llb:
if (drm_mm_initialized(&dev_priv->mm.stolen))
- pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+ drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@@ -538,6 +540,9 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
+ if (WARN_ON(intel_fbc_hw_is_active(dev_priv)))
+ return;
+
if (!drm_mm_node_allocated(&fbc->compressed_fb))
return;
@@ -562,7 +567,7 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
}
static bool stride_is_valid(struct drm_i915_private *dev_priv,
- unsigned int stride)
+ u64 modifier, unsigned int stride)
{
/* This should have been caught earlier. */
if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0))
@@ -578,6 +583,11 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
return false;
+ /* Display WA #1105: skl,bxt,kbl,cfl,glk */
+ if (IS_GEN(dev_priv, 9) &&
+ modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
+ return false;
+
if (stride > 16384)
return false;
@@ -605,6 +615,19 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
}
}
+static bool rotation_is_valid(struct drm_i915_private *dev_priv,
+ u32 pixel_format, unsigned int rotation)
+{
+ if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 &&
+ drm_rotation_90_or_270(rotation))
+ return false;
+ else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
+ rotation != DRM_MODE_ROTATE_0)
+ return false;
+
+ return true;
+}
+
/*
* For some reason, the hardware tracking starts looking at whatever we
* programmed as the display plane base address register. It does not look at
@@ -639,6 +662,22 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
return effective_w <= max_w && effective_h <= max_h;
}
+static bool tiling_is_valid(struct drm_i915_private *dev_priv,
+ uint64_t modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ if (INTEL_GEN(dev_priv) >= 9)
+ return true;
+ return false;
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -672,6 +711,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.format = fb->format;
cache->fb.stride = fb->pitches[0];
+ cache->fb.modifier = fb->modifier;
drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
!plane_state->vma->fence);
@@ -745,30 +785,40 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- /* The use of a CPU fence is mandatory in order to detect writes
- * by the CPU to the scanout and trigger updates to the FBC.
+ /* The use of a CPU fence is one of two ways to detect writes by the
+ * CPU to the scanout and trigger updates to the FBC.
+ *
+ * The other method is by software tracking (see
+ * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
+ * the current compressed buffer and recompress it.
*
* Note that is possible for a tiled surface to be unmappable (and
- * so have no fence associated with it) due to aperture constaints
+ * so have no fence associated with it) due to aperture constraints
* at the time of pinning.
*
* FIXME with 90/270 degree rotation we should use the fence on
* the normal GTT view (the rotated view doesn't even have a
* fence). Would need changes to the FBC fence Y offset as well.
- * For now this will effecively disable FBC with 90/270 degree
+ * For now this will effectively disable FBC with 90/270 degree
* rotation.
*/
- if (cache->fence_id < 0) {
+ if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
- if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
- cache->plane.rotation != DRM_MODE_ROTATE_0) {
+
+ if (!rotation_is_valid(dev_priv, cache->fb.format->format,
+ cache->plane.rotation)) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
}
- if (!stride_is_valid(dev_priv, cache->fb.stride)) {
+ if (!tiling_is_valid(dev_priv, cache->fb.modifier)) {
+ fbc->no_fbc_reason = "tiling unsupported";
+ return false;
+ }
+
+ if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) {
fbc->no_fbc_reason = "framebuffer stride not supported";
return false;
}
@@ -947,7 +997,8 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
drm_WARN_ON(&dev_priv->drm, !fbc->crtc);
drm_WARN_ON(&dev_priv->drm, fbc->active);
- DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n",
+ pipe_name(crtc->pipe));
__intel_fbc_cleanup_cfb(dev_priv);
@@ -1175,7 +1226,8 @@ void intel_fbc_enable(struct intel_atomic_state *state,
else
cache->gen9_wa_cfb_stride = 0;
- DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
+ pipe_name(crtc->pipe));
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
fbc->crtc = crtc;
@@ -1237,7 +1289,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
if (fbc->underrun_detected || !fbc->crtc)
goto out;
- DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n");
fbc->underrun_detected = true;
intel_fbc_deactivate(dev_priv, "FIFO underrun");
@@ -1263,7 +1315,8 @@ int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
return ret;
if (dev_priv->fbc.underrun_detected) {
- DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Re-allowing FBC after fifo underrun\n");
dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
}
@@ -1334,7 +1387,8 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
if (intel_vtd_active() &&
(IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
- DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
+ drm_info(&dev_priv->drm,
+ "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
return true;
}
@@ -1362,8 +1416,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
mkwrite_device_info(dev_priv)->display.has_fbc = false;
i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
- DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
- i915_modparams.enable_fbc);
+ drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n",
+ i915_modparams.enable_fbc);
if (!HAS_FBC(dev_priv)) {
fbc->no_fbc_reason = "unsupported by this chipset";
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 3bc804212a99..bd39eb6a21b8 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -146,7 +146,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
- DRM_ERROR("failed to allocate framebuffer\n");
+ drm_err(&dev_priv->drm, "failed to allocate framebuffer\n");
return PTR_ERR(obj);
}
@@ -183,21 +183,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
sizes->fb_height > intel_fb->base.height)) {
- DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
- " releasing it\n",
- intel_fb->base.width, intel_fb->base.height,
- sizes->fb_width, sizes->fb_height);
+ drm_dbg_kms(&dev_priv->drm,
+ "BIOS fb too small (%dx%d), we require (%dx%d),"
+ " releasing it\n",
+ intel_fb->base.width, intel_fb->base.height,
+ sizes->fb_width, sizes->fb_height);
drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
- DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
return ret;
intel_fb = ifbdev->fb;
} else {
- DRM_DEBUG_KMS("re-using BIOS fb\n");
+ drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
prealloc = true;
sizes->fb_width = intel_fb->base.width;
sizes->fb_height = intel_fb->base.height;
@@ -220,7 +222,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
- DRM_ERROR("Failed to allocate fb_info\n");
+ drm_err(&dev_priv->drm, "Failed to allocate fb_info\n");
ret = PTR_ERR(info);
goto out_unpin;
}
@@ -240,7 +242,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
- DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
+ drm_err(&dev_priv->drm,
+ "Failed to remap framebuffer into virtual memory\n");
ret = PTR_ERR(vaddr);
goto out_unpin;
}
@@ -258,9 +261,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
- ifbdev->fb->base.width, ifbdev->fb->base.height,
- i915_ggtt_offset(vma));
+ drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n",
+ ifbdev->fb->base.width, ifbdev->fb->base.height,
+ i915_ggtt_offset(vma));
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
@@ -309,6 +312,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
@@ -321,21 +325,24 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
intel_crtc = to_intel_crtc(crtc);
if (!crtc->state->active || !obj) {
- DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm,
+ "pipe %c not active or no fb, skipping\n",
+ pipe_name(intel_crtc->pipe));
continue;
}
if (obj->base.size > max_size) {
- DRM_DEBUG_KMS("found possible fb from plane %c\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm,
+ "found possible fb from plane %c\n",
+ pipe_name(intel_crtc->pipe));
fb = to_intel_framebuffer(crtc->primary->state->fb);
max_size = obj->base.size;
}
}
if (!fb) {
- DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
+ drm_dbg_kms(&i915->drm,
+ "no active fbs found, not using BIOS config\n");
goto out;
}
@@ -346,13 +353,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
intel_crtc = to_intel_crtc(crtc);
if (!crtc->state->active) {
- DRM_DEBUG_KMS("pipe %c not active, skipping\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm,
+ "pipe %c not active, skipping\n",
+ pipe_name(intel_crtc->pipe));
continue;
}
- DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm, "checking plane %c for BIOS fb\n",
+ pipe_name(intel_crtc->pipe));
/*
* See if the plane fb we found above will fit on this
@@ -362,9 +370,10 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
- DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
- pipe_name(intel_crtc->pipe),
- cur_size, fb->base.pitches[0]);
+ drm_dbg_kms(&i915->drm,
+ "fb not wide enough for plane %c (%d vs %d)\n",
+ pipe_name(intel_crtc->pipe),
+ cur_size, fb->base.pitches[0]);
fb = NULL;
break;
}
@@ -372,28 +381,32 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
- DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
- pipe_name(intel_crtc->pipe),
- crtc->state->adjusted_mode.crtc_hdisplay,
- crtc->state->adjusted_mode.crtc_vdisplay,
- fb->base.format->cpp[0] * 8,
- cur_size);
+ drm_dbg_kms(&i915->drm,
+ "pipe %c area: %dx%d, bpp: %d, size: %d\n",
+ pipe_name(intel_crtc->pipe),
+ crtc->state->adjusted_mode.crtc_hdisplay,
+ crtc->state->adjusted_mode.crtc_vdisplay,
+ fb->base.format->cpp[0] * 8,
+ cur_size);
if (cur_size > max_size) {
- DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
- pipe_name(intel_crtc->pipe),
- cur_size, max_size);
+ drm_dbg_kms(&i915->drm,
+ "fb not big enough for plane %c (%d vs %d)\n",
+ pipe_name(intel_crtc->pipe),
+ cur_size, max_size);
fb = NULL;
break;
}
- DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
- pipe_name(intel_crtc->pipe),
- max_size, cur_size);
+ drm_dbg_kms(&i915->drm,
+ "fb big enough for plane %c (%d >= %d)\n",
+ pipe_name(intel_crtc->pipe),
+ max_size, cur_size);
}
if (!fb) {
- DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
+ drm_dbg_kms(&i915->drm,
+ "BIOS fb not suitable for all pipes, not using\n");
goto out;
}
@@ -415,7 +428,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
}
- DRM_DEBUG_KMS("using BIOS fb for initial console\n");
+ drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n");
return true;
out:
@@ -522,8 +535,9 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
* processing, fbdev will perform a full connector reprobe if a hotplug event
* was received while HPD was suspended.
*/
-static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
+static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
{
+ struct intel_fbdev *ifbdev = i915->fbdev;
bool send_hpd = false;
mutex_lock(&ifbdev->hpd_lock);
@@ -533,7 +547,7 @@ static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
mutex_unlock(&ifbdev->hpd_lock);
if (send_hpd) {
- DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
+ drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n");
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
}
@@ -588,7 +602,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
drm_fb_helper_set_suspend(&ifbdev->helper, state);
console_unlock();
- intel_fbdev_hpd_set_suspend(ifbdev, state);
+ intel_fbdev_hpd_set_suspend(dev_priv, state);
}
void intel_fbdev_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 6cb02c912acc..2979ed2588eb 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -302,12 +302,14 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
BITS_PER_TYPE(atomic_t));
if (old) {
- WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
+ drm_WARN_ON(old->obj->base.dev,
+ !(atomic_read(&old->bits) & frontbuffer_bits));
atomic_andnot(frontbuffer_bits, &old->bits);
}
if (new) {
- WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
+ drm_WARN_ON(new->obj->base.dev,
+ atomic_read(&new->bits) & frontbuffer_bits);
atomic_or(frontbuffer_bits, &new->bits);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index a0cc894c3868..7a19215ad844 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -10,6 +10,28 @@
#include "intel_display_types.h"
#include "intel_global_state.h"
+static void __intel_atomic_global_state_free(struct kref *kref)
+{
+ struct intel_global_state *obj_state =
+ container_of(kref, struct intel_global_state, ref);
+ struct intel_global_obj *obj = obj_state->obj;
+
+ obj->funcs->atomic_destroy_state(obj, obj_state);
+}
+
+static void intel_atomic_global_state_put(struct intel_global_state *obj_state)
+{
+ kref_put(&obj_state->ref, __intel_atomic_global_state_free);
+}
+
+static struct intel_global_state *
+intel_atomic_global_state_get(struct intel_global_state *obj_state)
+{
+ kref_get(&obj_state->ref);
+
+ return obj_state;
+}
+
void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
struct intel_global_obj *obj,
struct intel_global_state *state,
@@ -17,6 +39,10 @@ void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
{
memset(obj, 0, sizeof(*obj));
+ state->obj = obj;
+
+ kref_init(&state->ref);
+
obj->state = state;
obj->funcs = funcs;
list_add_tail(&obj->head, &dev_priv->global_obj_list);
@@ -28,7 +54,9 @@ void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
list_del(&obj->head);
- obj->funcs->atomic_destroy_state(obj, obj->state);
+
+ drm_WARN_ON(&dev_priv->drm, kref_read(&obj->state->ref) != 1);
+ intel_atomic_global_state_put(obj->state);
}
}
@@ -64,13 +92,14 @@ static void assert_global_state_read_locked(struct intel_atomic_state *state)
return;
}
- WARN(1, "Global state not read locked\n");
+ drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n");
}
struct intel_global_state *
intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
struct intel_global_obj *obj)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
int index, num_objs, i;
size_t size;
struct __intel_global_objs_state *arr;
@@ -96,18 +125,22 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
if (!obj_state)
return ERR_PTR(-ENOMEM);
+ obj_state->obj = obj;
obj_state->changed = false;
+ kref_init(&obj_state->ref);
+
state->global_objs[index].state = obj_state;
- state->global_objs[index].old_state = obj->state;
+ state->global_objs[index].old_state =
+ intel_atomic_global_state_get(obj->state);
state->global_objs[index].new_state = obj_state;
state->global_objs[index].ptr = obj;
obj_state->state = state;
state->num_global_objs = num_objs;
- DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n",
- obj, obj_state, state);
+ drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n",
+ obj, obj_state, state);
return obj_state;
}
@@ -147,7 +180,7 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state)
for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
new_obj_state, i) {
- WARN_ON(obj->state != old_obj_state);
+ drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state);
/*
* If the new state wasn't modified (and properly
@@ -162,7 +195,9 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state)
new_obj_state->state = NULL;
state->global_objs[i].state = old_obj_state;
- obj->state = new_obj_state;
+
+ intel_atomic_global_state_put(obj->state);
+ obj->state = intel_atomic_global_state_get(new_obj_state);
}
}
@@ -171,10 +206,9 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state)
int i;
for (i = 0; i < state->num_global_objs; i++) {
- struct intel_global_obj *obj = state->global_objs[i].ptr;
+ intel_atomic_global_state_put(state->global_objs[i].old_state);
+ intel_atomic_global_state_put(state->global_objs[i].new_state);
- obj->funcs->atomic_destroy_state(obj,
- state->global_objs[i].state);
state->global_objs[i].ptr = NULL;
state->global_objs[i].state = NULL;
state->global_objs[i].old_state = NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
index e6163a469029..1f16fa3073c9 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.h
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_GLOBAL_STATE_H__
#define __INTEL_GLOBAL_STATE_H__
+#include <linux/kref.h>
#include <linux/list.h>
struct drm_i915_private;
@@ -54,7 +55,9 @@ struct intel_global_obj {
for_each_if(obj)
struct intel_global_state {
+ struct intel_global_obj *obj;
struct intel_atomic_state *state;
+ struct kref ref;
bool changed;
};
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 1fd3a5a6296b..a8d119b6b45c 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -379,8 +379,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
return ret;
}
-static inline
-unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
+static unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
{
return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
GMBUS_BYTE_COUNT_MAX;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index ee0f27ea2810..2cbc4619b4ce 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -109,18 +109,16 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
return capable;
}
-static inline
-bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder, enum port port)
+static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
return intel_de_read(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
HDCP_STATUS_ENC;
}
-static inline
-bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder, enum port port)
+static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
return intel_de_read(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
@@ -853,8 +851,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
return ret;
}
-static inline
-struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
+static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
{
return container_of(hdcp, struct intel_connector, hdcp);
}
@@ -1391,6 +1388,7 @@ static
int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_stream_manage stream_manage;
@@ -1431,7 +1429,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
hdcp->seq_num_m++;
if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
- DRM_DEBUG_KMS("seq_num_m roll over.\n");
+ drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n");
return -1;
}
@@ -1855,8 +1853,7 @@ static const struct component_ops i915_hdcp_component_ops = {
.unbind = i915_hdcp_component_unbind,
};
-static inline
-enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
+static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
{
switch (port) {
case PORT_A:
@@ -1868,8 +1865,7 @@ enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
}
}
-static inline
-enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
+static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
{
switch (cpu_transcoder) {
case TRANSCODER_A ... TRANSCODER_D:
@@ -1879,8 +1875,8 @@ enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
}
}
-static inline int initialize_hdcp_port_data(struct intel_connector *connector,
- const struct intel_hdcp_shim *shim)
+static int initialize_hdcp_port_data(struct intel_connector *connector,
+ const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
@@ -2075,7 +2071,8 @@ int intel_hdcp_disable(struct intel_connector *connector)
return ret;
}
-void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 7c12ad609b1f..86bbaec120cc 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -11,6 +11,7 @@
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
+struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_encoder;
@@ -26,7 +27,8 @@ int intel_hdcp_init(struct intel_connector *connector,
int intel_hdcp_enable(struct intel_connector *connector,
enum transcoder cpu_transcoder, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
-void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 821411b93dac..010f37240710 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -44,7 +44,6 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
-#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
@@ -707,13 +706,15 @@ void intel_read_infoframe(struct intel_encoder *encoder,
/* see comment above for the reason for this offset */
ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1);
if (ret) {
- DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type);
+ drm_dbg_kms(encoder->base.dev,
+ "Failed to unpack infoframe type 0x%02x\n", type);
return;
}
if (frame->any.type != type)
- DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
- frame->any.type, type);
+ drm_dbg_kms(encoder->base.dev,
+ "Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+ frame->any.type, type);
}
static bool
@@ -853,7 +854,8 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state);
if (ret < 0) {
- DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "couldn't set HDR metadata in infoframe\n");
return false;
}
@@ -893,8 +895,9 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
if (!(val & VIDEO_DIP_ENABLE))
return;
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
- (val & VIDEO_DIP_PORT_MASK) >> 29);
+ drm_dbg_kms(&dev_priv->drm,
+ "video DIP still enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
return;
}
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
@@ -906,8 +909,9 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
- DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
- (val & VIDEO_DIP_PORT_MASK) >> 29);
+ drm_dbg_kms(&dev_priv->drm,
+ "video DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
return;
}
val &= ~VIDEO_DIP_PORT_MASK;
@@ -1264,8 +1268,8 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return;
- DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n",
- enable ? "Enabling" : "Disabling");
+ drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
+ enable ? "Enabling" : "Disabling");
drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
adapter, enable);
@@ -1346,13 +1350,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
DRM_HDCP_AN_LEN);
if (ret) {
- DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
+ ret);
return ret;
}
ret = intel_gmbus_output_aksv(adapter);
if (ret < 0) {
- DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Failed to output aksv (%d)\n", ret);
return ret;
}
return 0;
@@ -1361,11 +1366,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
u8 *bksv)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+
int ret;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret)
- DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
+ ret);
return ret;
}
@@ -1373,11 +1381,14 @@ static
int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
u8 *bstatus)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+
int ret;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret)
- DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
+ ret);
return ret;
}
@@ -1385,12 +1396,14 @@ static
int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
bool *repeater_present)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret;
u8 val;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ ret);
return ret;
}
*repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
@@ -1401,11 +1414,14 @@ static
int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
u8 *ri_prime)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+
int ret;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret)
- DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
+ ret);
return ret;
}
@@ -1413,12 +1429,14 @@ static
int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
bool *ksv_ready)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret;
u8 val;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ ret);
return ret;
}
*ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
@@ -1429,11 +1447,13 @@ static
int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
int num_downstream, u8 *ksv_fifo)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
if (ret) {
- DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read ksv fifo over DDC failed (%d)\n", ret);
return ret;
}
return 0;
@@ -1443,6 +1463,7 @@ static
int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
int i, u32 *part)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
@@ -1451,7 +1472,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
part, DRM_HDCP_V_PRIME_PART_LEN);
if (ret)
- DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret);
+ drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
+ i, ret);
return ret;
}
@@ -1474,12 +1496,14 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
if (ret) {
- DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
if (ret) {
- DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "Enable HDCP signalling failed (%d)\n", ret);
return ret;
}
@@ -1500,8 +1524,8 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable);
if (ret) {
- DRM_ERROR("%s HDCP signalling failed (%d)\n",
- enable ? "Enable" : "Disable", ret);
+ drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
+ enable ? "Enable" : "Disable", ret);
return ret;
}
@@ -1539,8 +1563,10 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
- DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
- intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
+ drm_err(&i915->drm,
+ "Ri' mismatch detected, link check failed (%x)\n",
+ intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
+ port)));
return false;
}
return true;
@@ -1588,17 +1614,19 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
return -EINVAL;
}
-static inline
-int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port,
- u8 msg_id, bool *msg_ready,
- ssize_t *msg_sz)
+static int
+hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, bool *msg_ready,
+ ssize_t *msg_sz)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
- ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status);
+ ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
if (ret < 0) {
- DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret);
+ drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
+ ret);
return ret;
}
@@ -1618,6 +1646,7 @@ static ssize_t
intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
u8 msg_id, bool paired)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
bool msg_ready = false;
int timeout, ret;
ssize_t msg_sz = 0;
@@ -1632,8 +1661,8 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
!ret && msg_ready && msg_sz, timeout * 1000,
1000, 5 * 1000);
if (ret)
- DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n",
- msg_id, ret, timeout);
+ drm_dbg_kms(&i915->drm, "msg_id: %d, ret: %d, timeout: %d\n",
+ msg_id, ret, timeout);
return ret ? ret : msg_sz;
}
@@ -1652,6 +1681,7 @@ static
int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
u8 msg_id, void *buf, size_t size)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
unsigned int offset;
@@ -1667,15 +1697,17 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
* available buffer.
*/
if (ret > size) {
- DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n",
- ret, size);
+ drm_dbg_kms(&i915->drm,
+ "msg_sz(%zd) is more than exp size(%zu)\n",
+ ret, size);
return -1;
}
offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
if (ret)
- DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret);
+ drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
+ msg_id, ret);
return ret;
}
@@ -1718,12 +1750,6 @@ int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
return ret;
}
-static inline
-enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void)
-{
- return HDCP_PROTOCOL_HDMI;
-}
-
static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
.write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
.read_bksv = intel_hdmi_hdcp_read_bksv,
@@ -1871,15 +1897,17 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- drm_WARN_ON(encoder->base.dev, !pipe_config->has_hdmi_sink);
- DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_WARN_ON(&i915->drm, !pipe_config->has_hdmi_sink);
+ drm_dbg_kms(&i915->drm, "Enabling HDMI audio on pipe %c\n",
+ pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
-static void g4x_enable_hdmi(struct intel_encoder *encoder,
+static void g4x_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1901,7 +1929,8 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
-static void ibx_enable_hdmi(struct intel_encoder *encoder,
+static void ibx_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1952,7 +1981,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
-static void cpt_enable_hdmi(struct intel_encoder *encoder,
+static void cpt_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2005,13 +2035,15 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
-static void vlv_enable_hdmi(struct intel_encoder *encoder,
+static void vlv_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
}
-static void intel_disable_hdmi(struct intel_encoder *encoder,
+static void intel_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2069,7 +2101,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
-static void g4x_disable_hdmi(struct intel_encoder *encoder,
+static void g4x_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2077,10 +2110,11 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
- intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
+ intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
}
-static void pch_disable_hdmi(struct intel_encoder *encoder,
+static void pch_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2089,11 +2123,12 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
old_crtc_state, old_conn_state);
}
-static void pch_post_disable_hdmi(struct intel_encoder *encoder,
+static void pch_post_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
+ intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
}
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
@@ -2286,29 +2321,27 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
return true;
}
-static bool
-intel_hdmi_ycbcr420_config(struct drm_connector *connector,
- struct intel_crtc_state *config)
+static int
+intel_hdmi_ycbcr420_config(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc);
-
- if (!connector->ycbcr_420_allowed) {
- DRM_ERROR("Platform doesn't support YCBCR420 output\n");
- return false;
- }
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
- config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
+ if (!drm_mode_is_420_only(&connector->display_info, adjusted_mode))
+ return 0;
- /* YCBCR 420 output conversion needs a scaler */
- if (skl_update_scaler_crtc(config)) {
- DRM_DEBUG_KMS("Scaler allocation for output failed\n");
- return false;
+ if (!connector->ycbcr_420_allowed) {
+ drm_err(&i915->drm,
+ "Platform doesn't support YCBCR420 output\n");
+ return -EINVAL;
}
- intel_pch_panel_fitting(intel_crtc, config,
- DRM_MODE_SCALE_FULLSCREEN);
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- return true;
+ return intel_pch_panel_fitting(crtc_state, conn_state);
}
static int intel_hdmi_port_clock(int clock, int bpc)
@@ -2342,6 +2375,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -2366,13 +2400,15 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
if (crtc_state->pipe_bpp > bpc * 3)
crtc_state->pipe_bpp = bpc * 3;
- DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n",
- bpc, crtc_state->pipe_bpp);
+ drm_dbg_kms(&i915->drm,
+ "picking %d bpc for HDMI output (pipe bpp: %d)\n",
+ bpc, crtc_state->pipe_bpp);
if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
false, crtc_state->has_hdmi_sink) != MODE_OK) {
- DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
- crtc_state->port_clock);
+ drm_dbg_kms(&i915->drm,
+ "unsupported HDMI clock (%d kHz), rejecting mode\n",
+ crtc_state->port_clock);
return -EINVAL;
}
@@ -2433,12 +2469,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
pipe_config->pixel_multiplier = 2;
- if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
- if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
- DRM_ERROR("Can't support YCBCR420 output\n");
- return -EINVAL;
- }
- }
+ ret = intel_hdmi_ycbcr420_config(pipe_config, conn_state);
+ if (ret)
+ return ret;
pipe_config->limited_color_range =
intel_hdmi_limited_color_range(pipe_config, conn_state);
@@ -2475,25 +2508,26 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
}
- intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state);
+ intel_hdmi_compute_gcp_infoframe(encoder, pipe_config,
+ conn_state);
if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad AVI infoframe\n");
+ drm_dbg_kms(&dev_priv->drm, "bad AVI infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad SPD infoframe\n");
+ drm_dbg_kms(&dev_priv->drm, "bad SPD infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad HDMI infoframe\n");
+ drm_dbg_kms(&dev_priv->drm, "bad HDMI infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad DRM infoframe\n");
+ drm_dbg_kms(&dev_priv->drm, "bad DRM infoframe\n");
return -EINVAL;
}
@@ -2543,7 +2577,8 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
*/
if (has_edid && !connector->override_edid &&
intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
- DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Assuming DP dual mode adaptor presence based on VBT\n");
type = DRM_DP_DUAL_MODE_TYPE1_DVI;
} else {
type = DRM_DP_DUAL_MODE_NONE;
@@ -2557,9 +2592,10 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
hdmi->dp_dual_mode.max_tmds_clock =
drm_dp_dual_mode_max_tmds_clock(type, adapter);
- DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
- drm_dp_get_dual_mode_type_name(type),
- hdmi->dp_dual_mode.max_tmds_clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
+ drm_dp_get_dual_mode_type_name(type),
+ hdmi->dp_dual_mode.max_tmds_clock);
}
static bool
@@ -2579,7 +2615,8 @@ intel_hdmi_set_edid(struct drm_connector *connector)
edid = drm_get_edid(connector, i2c);
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
- DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
edid = drm_get_edid(connector, i2c);
intel_gmbus_force_bit(i2c, false);
@@ -2611,8 +2648,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
@@ -2643,8 +2680,10 @@ out:
static void
intel_hdmi_force(struct drm_connector *connector)
{
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
intel_hdmi_unset_edid(connector);
@@ -2665,7 +2704,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
return intel_connector_update_modes(connector, edid);
}
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
+static void intel_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2679,7 +2719,8 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
pipe_config, conn_state);
}
-static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
+static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2696,12 +2737,13 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
- g4x_enable_hdmi(encoder, pipe_config, conn_state);
+ g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
vlv_wait_port_ready(dev_priv, dport, 0x0);
}
-static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2710,7 +2752,8 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
vlv_phy_pre_pll_enable(encoder, pipe_config);
}
-static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+static void chv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2719,14 +2762,16 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
chv_phy_pre_pll_enable(encoder, pipe_config);
}
-static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
+static void chv_hdmi_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
chv_phy_post_pll_disable(encoder, old_crtc_state);
}
-static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
+static void vlv_hdmi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2734,7 +2779,8 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
vlv_phy_reset_lanes(encoder, old_crtc_state);
}
-static void chv_hdmi_post_disable(struct intel_encoder *encoder,
+static void chv_hdmi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2749,7 +2795,8 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
vlv_dpio_put(dev_priv);
}
-static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
+static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2767,7 +2814,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
- g4x_enable_hdmi(encoder, pipe_config, conn_state);
+ g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
vlv_wait_port_ready(dev_priv, dport, 0x0);
@@ -2786,6 +2833,7 @@ intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector);
struct kobject *i2c_kobj = &adapter->dev.kobj;
struct kobject *connector_kobj = &connector->kdev->kobj;
@@ -2793,7 +2841,7 @@ static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
ret = sysfs_create_link(connector_kobj, i2c_kobj, i2c_kobj->name);
if (ret)
- DRM_ERROR("Failed to create i2c symlink (%d)\n", ret);
+ drm_err(&i915->drm, "Failed to create i2c symlink (%d)\n", ret);
}
static void intel_hdmi_remove_i2c_symlink(struct drm_connector *connector)
@@ -2814,8 +2862,6 @@ intel_hdmi_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- intel_connector_debugfs_add(connector);
-
intel_hdmi_create_i2c_symlink(connector);
return ret;
@@ -2922,9 +2968,10 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
if (!sink_scrambling->supported)
return true;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
- connector->base.id, connector->name,
- yesno(scrambling), high_tmds_clock_ratio ? 40 : 10);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
+ connector->base.id, connector->name,
+ yesno(scrambling), high_tmds_clock_ratio ? 40 : 10);
/* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
return drm_scdc_set_high_tmds_clock_ratio(adapter,
@@ -3066,8 +3113,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
ddc_pin = intel_bios_alternate_ddc_pin(encoder);
if (ddc_pin) {
- DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
- ddc_pin, port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DDC pin 0x%x for port %c (VBT)\n",
+ ddc_pin, port_name(port));
return ddc_pin;
}
@@ -3084,8 +3132,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
else
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
- DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
- ddc_pin, port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DDC pin 0x%x for port %c (platform default)\n",
+ ddc_pin, port_name(port));
return ddc_pin;
}
@@ -3142,8 +3191,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_encoder->port;
struct cec_connector_info conn_info;
- DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
- intel_encoder->base.base.id, intel_encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Adding HDMI connector on [ENCODER:%d:%s]\n",
+ intel_encoder->base.base.id, intel_encoder->base.name);
if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
return;
@@ -3187,7 +3237,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
int ret = intel_hdcp_init(intel_connector,
&intel_hdmi_hdcp_shim);
if (ret)
- DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP init failed, skipping.\n");
}
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -3206,16 +3257,16 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
cec_notifier_conn_register(dev->dev, port_identifier(port),
&conn_info);
if (!intel_hdmi->cec_notifier)
- DRM_DEBUG_KMS("CEC notifier get failed\n");
+ drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n");
}
static enum intel_hotplug_state
intel_hdmi_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector, bool irq_received)
+ struct intel_connector *connector)
{
enum intel_hotplug_state state;
- state = intel_encoder_hotplug(encoder, connector, irq_received);
+ state = intel_encoder_hotplug(encoder, connector);
/*
* On many platforms the HDMI live state signal is known to be
@@ -3229,7 +3280,7 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
* time around we didn't detect any change in the sink's connection
* status.
*/
- if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+ if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
state = INTEL_HOTPLUG_RETRY;
return state;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index a091442efba4..4f6f560e093e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -270,8 +270,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
enum intel_hotplug_state
intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received)
+ struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
@@ -392,12 +391,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct intel_encoder *encoder =
intel_attached_encoder(connector);
+ if (hpd_event_bits & hpd_bit)
+ connector->hotplug_retries = 0;
+ else
+ connector->hotplug_retries++;
+
drm_dbg_kms(&dev_priv->drm,
- "Connector %s (pin %i) received hotplug event.\n",
- connector->base.name, pin);
+ "Connector %s (pin %i) received hotplug event. (retry %d)\n",
+ connector->base.name, pin,
+ connector->hotplug_retries);
- switch (encoder->hotplug(encoder, connector,
- hpd_event_bits & hpd_bit)) {
+ switch (encoder->hotplug(encoder, connector)) {
case INTEL_HOTPLUG_UNCHANGED:
break;
case INTEL_HOTPLUG_CHANGED:
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 1e6b4fda2900..777b0743257e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -15,8 +15,7 @@ enum port;
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received);
+ struct intel_connector *connector);
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index d807c5648c87..6ff7b226f0a1 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -522,7 +522,7 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
/* FIXME actually read this from the hw */
- return enc_to_intel_lspcon(encoder)->active;
+ return 0;
}
void lspcon_resume(struct intel_lspcon *lspcon)
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 9a067effcfa0..872f2a489339 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -220,7 +220,8 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
}
-static void intel_pre_enable_lvds(struct intel_encoder *encoder,
+static void intel_pre_enable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -301,7 +302,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
/*
* Sets the power state for the panel.
*/
-static void intel_enable_lvds(struct intel_encoder *encoder,
+static void intel_enable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -323,7 +325,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
intel_panel_enable_backlight(pipe_config, conn_state);
}
-static void intel_disable_lvds(struct intel_encoder *encoder,
+static void intel_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -341,28 +344,31 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, lvds_encoder->reg);
}
-static void gmch_disable_lvds(struct intel_encoder *encoder,
+static void gmch_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
- intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
+ intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
}
-static void pch_disable_lvds(struct intel_encoder *encoder,
+static void pch_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
}
-static void pch_post_disable_lvds(struct intel_encoder *encoder,
+static void pch_post_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
+ intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
}
static enum drm_mode_status
@@ -397,6 +403,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
unsigned int lvds_bpp;
+ int ret;
/* Should never happen!! */
if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
@@ -430,16 +437,15 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(dev_priv))
pipe_config->has_pch_encoder = true;
- intel_pch_panel_fitting(intel_crtc, pipe_config,
- conn_state->scaling_mode);
- } else {
- intel_gmch_panel_fitting(intel_crtc, pipe_config,
- conn_state->scaling_mode);
-
- }
+ if (HAS_GMCH(dev_priv))
+ ret = intel_gmch_panel_fitting(pipe_config, conn_state);
+ else
+ ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
/*
* XXX: It would be nice to support lower refresh rates on the
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 481187223101..66711e62fa71 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -281,7 +281,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
enum pipe pipe = overlay->crtc->pipe;
struct intel_frontbuffer *from = NULL, *to = NULL;
- WARN_ON(overlay->old_vma);
+ drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
if (overlay->vma)
from = intel_frontbuffer_get(overlay->vma->obj);
@@ -350,7 +350,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
struct i915_vma *vma;
vma = fetch_and_zero(&overlay->old_vma);
- if (WARN_ON(!vma))
+ if (drm_WARN_ON(&overlay->i915->drm, !vma))
return;
intel_frontbuffer_flip_complete(overlay->i915,
@@ -396,7 +396,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
struct i915_request *rq;
u32 *cs, flip_addr = overlay->flip_addr;
- WARN_ON(!overlay->active);
+ drm_WARN_ON(&overlay->i915->drm, !overlay->active);
/* According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
@@ -1342,7 +1342,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (!HAS_OVERLAY(dev_priv))
return;
- engine = dev_priv->engine[RCS0];
+ engine = dev_priv->gt.engine[RCS0];
if (!engine || !engine->kernel_context)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 276f43870802..3c5056dbf607 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -176,24 +176,23 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector)
}
/* adjusted_mode has been preset to be the panel's fixed mode */
-void
-intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode)
+int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int x = 0, y = 0, width = 0, height = 0;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int x, y, width, height;
/* Native modes don't need fitting */
- if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
- adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
- pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
- goto done;
+ if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w &&
+ adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
+ return 0;
- switch (fitting_mode) {
+ switch (conn_state->scaling_mode) {
case DRM_MODE_SCALE_CENTER:
- width = pipe_config->pipe_src_w;
- height = pipe_config->pipe_src_h;
+ width = crtc_state->pipe_src_w;
+ height = crtc_state->pipe_src_h;
x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
break;
@@ -202,18 +201,18 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
/* Scale but preserve the aspect ratio */
{
u32 scaled_width = adjusted_mode->crtc_hdisplay
- * pipe_config->pipe_src_h;
- u32 scaled_height = pipe_config->pipe_src_w
+ * crtc_state->pipe_src_h;
+ u32 scaled_height = crtc_state->pipe_src_w
* adjusted_mode->crtc_vdisplay;
if (scaled_width > scaled_height) { /* pillar */
- width = scaled_height / pipe_config->pipe_src_h;
+ width = scaled_height / crtc_state->pipe_src_h;
if (width & 1)
width++;
x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->crtc_vdisplay;
} else if (scaled_width < scaled_height) { /* letter */
- height = scaled_width / pipe_config->pipe_src_w;
+ height = scaled_width / crtc_state->pipe_src_w;
if (height & 1)
height++;
y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
@@ -227,6 +226,10 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
}
break;
+ case DRM_MODE_SCALE_NONE:
+ WARN_ON(adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w);
+ WARN_ON(adjusted_mode->crtc_vdisplay != crtc_state->pipe_src_h);
+ /* fall through */
case DRM_MODE_SCALE_FULLSCREEN:
x = y = 0;
width = adjusted_mode->crtc_hdisplay;
@@ -234,14 +237,15 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
break;
default:
- WARN(1, "bad panel fit mode: %d\n", fitting_mode);
- return;
+ MISSING_CASE(conn_state->scaling_mode);
+ return -EINVAL;
}
-done:
- pipe_config->pch_pfit.pos = (x << 16) | y;
- pipe_config->pch_pfit.size = (width << 16) | height;
- pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ x, y, width, height);
+ crtc_state->pch_pfit.enabled = true;
+
+ return 0;
}
static void
@@ -287,7 +291,7 @@ centre_vertically(struct drm_display_mode *adjusted_mode,
adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width;
}
-static inline u32 panel_fitter_scaling(u32 source, u32 target)
+static u32 panel_fitter_scaling(u32 source, u32 target)
{
/*
* Floating point operation is not supported. So the FACTOR
@@ -300,13 +304,14 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
-static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
+static void i965_scale_aspect(struct intel_crtc_state *crtc_state,
u32 *pfit_control)
{
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
- pipe_config->pipe_src_h;
- u32 scaled_height = pipe_config->pipe_src_w *
+ crtc_state->pipe_src_h;
+ u32 scaled_height = crtc_state->pipe_src_w *
adjusted_mode->crtc_vdisplay;
/* 965+ is easy, it does everything in hw */
@@ -316,18 +321,18 @@ static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
else if (scaled_width < scaled_height)
*pfit_control |= PFIT_ENABLE |
PFIT_SCALING_LETTER;
- else if (adjusted_mode->crtc_hdisplay != pipe_config->pipe_src_w)
+ else if (adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w)
*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
}
-static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
+static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
u32 *pfit_control, u32 *pfit_pgm_ratios,
u32 *border)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
- pipe_config->pipe_src_h;
- u32 scaled_height = pipe_config->pipe_src_w *
+ crtc_state->pipe_src_h;
+ u32 scaled_height = crtc_state->pipe_src_w *
adjusted_mode->crtc_vdisplay;
u32 bits;
@@ -339,11 +344,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
if (scaled_width > scaled_height) { /* pillar */
centre_horizontally(adjusted_mode,
scaled_height /
- pipe_config->pipe_src_h);
+ crtc_state->pipe_src_h);
*border = LVDS_BORDER_ENABLE;
- if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay) {
- bits = panel_fitter_scaling(pipe_config->pipe_src_h,
+ if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay) {
+ bits = panel_fitter_scaling(crtc_state->pipe_src_h,
adjusted_mode->crtc_vdisplay);
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
@@ -355,11 +360,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
} else if (scaled_width < scaled_height) { /* letter */
centre_vertically(adjusted_mode,
scaled_width /
- pipe_config->pipe_src_w);
+ crtc_state->pipe_src_w);
*border = LVDS_BORDER_ENABLE;
- if (pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
- bits = panel_fitter_scaling(pipe_config->pipe_src_w,
+ if (crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
+ bits = panel_fitter_scaling(crtc_state->pipe_src_w,
adjusted_mode->crtc_hdisplay);
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
@@ -377,35 +382,35 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
}
}
-void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode)
+int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
/* Native modes don't need fitting */
- if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
- adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
+ if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w &&
+ adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h)
goto out;
- switch (fitting_mode) {
+ switch (conn_state->scaling_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
- centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
- centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
+ centre_horizontally(adjusted_mode, crtc_state->pipe_src_w);
+ centre_vertically(adjusted_mode, crtc_state->pipe_src_h);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
if (INTEL_GEN(dev_priv) >= 4)
- i965_scale_aspect(pipe_config, &pfit_control);
+ i965_scale_aspect(crtc_state, &pfit_control);
else
- i9xx_scale_aspect(pipe_config, &pfit_control,
+ i9xx_scale_aspect(crtc_state, &pfit_control,
&pfit_pgm_ratios, &border);
break;
case DRM_MODE_SCALE_FULLSCREEN:
@@ -413,8 +418,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
- if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
- pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
+ if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay ||
+ crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= PFIT_SCALING_AUTO;
@@ -426,15 +431,14 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
}
break;
default:
- drm_WARN(&dev_priv->drm, 1, "bad panel fit mode: %d\n",
- fitting_mode);
- return;
+ MISSING_CASE(conn_state->scaling_mode);
+ return -EINVAL;
}
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
if (INTEL_GEN(dev_priv) >= 4)
- pfit_control |= PFIT_PIPE(intel_crtc->pipe) | PFIT_FILTER_FUZZY;
+ pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY;
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
@@ -443,12 +447,14 @@ out:
}
/* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18)
+ if (INTEL_GEN(dev_priv) < 4 && crtc_state->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- pipe_config->gmch_pfit.control = pfit_control;
- pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
- pipe_config->gmch_pfit.lvds_border_bits = border;
+ crtc_state->gmch_pfit.control = pfit_control;
+ crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
+ crtc_state->gmch_pfit.lvds_border_bits = border;
+
+ return 0;
}
/**
@@ -483,20 +489,10 @@ static u32 scale(u32 source_val,
return target_val;
}
-/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
-static inline u32 scale_user_to_hw(struct intel_connector *connector,
- u32 user_level, u32 user_max)
-{
- struct intel_panel *panel = &connector->panel;
-
- return scale(user_level, 0, user_max,
- panel->backlight.min, panel->backlight.max);
-}
-
/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
* to [hw_min..hw_max]. */
-static inline u32 clamp_user_to_hw(struct intel_connector *connector,
- u32 user_level, u32 user_max)
+static u32 clamp_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
{
struct intel_panel *panel = &connector->panel;
u32 hw_level;
@@ -508,8 +504,8 @@ static inline u32 clamp_user_to_hw(struct intel_connector *connector,
}
/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
-static inline u32 scale_hw_to_user(struct intel_connector *connector,
- u32 hw_level, u32 user_max)
+static u32 scale_hw_to_user(struct intel_connector *connector,
+ u32 hw_level, u32 user_max)
{
struct intel_panel *panel = &connector->panel;
@@ -684,9 +680,10 @@ static void
intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+ drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level);
level = intel_panel_compute_brightness(connector, level);
panel->backlight.set(conn_state, level);
@@ -867,8 +864,8 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
* another client is not activated.
*/
if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- drm_dbg(&dev_priv->drm,
- "Skipping backlight disable on vga switch\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Skipping backlight disable on vga switch\n");
return;
}
@@ -1244,10 +1241,20 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
mutex_unlock(&dev_priv->backlight_lock);
- drm_dbg(&dev_priv->drm, "get backlight PWM = %d\n", val);
+ drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
return val;
}
+/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
+static u32 scale_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(user_level, 0, user_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
/* set backlight brightness to level in range [0..max], scaling wrt hw min */
static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
u32 user_level, u32 user_max)
@@ -1335,6 +1342,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
int intel_backlight_device_register(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
struct backlight_properties props;
@@ -1374,14 +1382,15 @@ int intel_backlight_device_register(struct intel_connector *connector)
&intel_backlight_device_ops, &props);
if (IS_ERR(panel->backlight.device)) {
- DRM_ERROR("Failed to register backlight: %ld\n",
- PTR_ERR(panel->backlight.device));
+ drm_err(&i915->drm, "Failed to register backlight: %ld\n",
+ PTR_ERR(panel->backlight.device));
panel->backlight.device = NULL;
return -ENODEV;
}
- DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
- connector->base.name);
+ drm_dbg_kms(&i915->drm,
+ "Connector %s backlight sysfs interface registered\n",
+ connector->base.name);
return 0;
}
@@ -1931,7 +1940,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return 0;
}
-void intel_panel_update_backlight(struct intel_encoder *encoder,
+void intel_panel_update_backlight(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index cedeea443336..968b95281cb4 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -25,19 +25,18 @@ int intel_panel_init(struct intel_panel *panel,
void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
-void intel_pch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode);
-void intel_gmch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode);
+int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
u32 level, u32 max);
int intel_panel_setup_backlight(struct drm_connector *connector,
enum pipe pipe);
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-void intel_panel_update_backlight(struct intel_encoder *encoder,
+void intel_panel_update_backlight(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index fd9b146e3aba..b7a2c102648a 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -30,6 +30,7 @@
#include "intel_display_types.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "intel_hdmi.h"
/**
* DOC: Panel Self Refresh (PSR/SRD)
@@ -137,41 +138,42 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, imr_reg, val);
}
-static void psr_event_print(u32 val, bool psr2_enabled)
+static void psr_event_print(struct drm_i915_private *i915,
+ u32 val, bool psr2_enabled)
{
- DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
+ drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
- DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
+ drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
- DRM_DEBUG_KMS("\tPSR2 disabled\n");
+ drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
- DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
+ drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
- DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
+ drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
if (val & PSR_EVENT_GRAPHICS_RESET)
- DRM_DEBUG_KMS("\tGraphics reset\n");
+ drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
if (val & PSR_EVENT_PCH_INTERRUPT)
- DRM_DEBUG_KMS("\tPCH interrupt\n");
+ drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
if (val & PSR_EVENT_MEMORY_UP)
- DRM_DEBUG_KMS("\tMemory up\n");
+ drm_dbg_kms(&i915->drm, "\tMemory up\n");
if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
- DRM_DEBUG_KMS("\tFront buffer modification\n");
+ drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
if (val & PSR_EVENT_WD_TIMER_EXPIRE)
- DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
+ drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
- DRM_DEBUG_KMS("\tPIPE registers updated\n");
+ drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
if (val & PSR_EVENT_REGISTER_UPDATE)
- DRM_DEBUG_KMS("\tRegister updated\n");
+ drm_dbg_kms(&i915->drm, "\tRegister updated\n");
if (val & PSR_EVENT_HDCP_ENABLE)
- DRM_DEBUG_KMS("\tHDCP enabled\n");
+ drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
- DRM_DEBUG_KMS("\tKVMR session enabled\n");
+ drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
if (val & PSR_EVENT_VBI_ENABLE)
- DRM_DEBUG_KMS("\tVBI enabled\n");
+ drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
if (val & PSR_EVENT_LPSP_MODE_EXIT)
- DRM_DEBUG_KMS("\tLPSP mode exited\n");
+ drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
- DRM_DEBUG_KMS("\tPSR disabled\n");
+ drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
}
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
@@ -209,7 +211,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
val);
- psr_event_print(val, psr2_enabled);
+ psr_event_print(dev_priv, val, psr2_enabled);
}
}
@@ -249,18 +251,21 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 val = 8; /* assume the worst if we can't read the value */
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
else
- DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
+ drm_dbg_kms(&i915->drm,
+ "Unable to get sink synchronization latency, assuming 8 frames\n");
return val;
}
static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u16 val;
ssize_t r;
@@ -273,7 +278,8 @@ static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
if (r != 2)
- DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
+ drm_dbg_kms(&i915->drm,
+ "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
/*
* Spec says that if the value read is 0 the default granularity should
@@ -352,39 +358,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
}
}
-static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct dp_sdp psr_vsc;
-
- if (dev_priv->psr.psr2_enabled) {
- /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- if (dev_priv->psr.colorimetry_support) {
- psr_vsc.sdp_header.HB2 = 0x5;
- psr_vsc.sdp_header.HB3 = 0x13;
- } else {
- psr_vsc.sdp_header.HB2 = 0x4;
- psr_vsc.sdp_header.HB3 = 0xe;
- }
- } else {
- /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
- }
-
- intel_dig_port->write_infoframe(&intel_dig_port->base,
- crtc_state,
- DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
-}
-
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -751,6 +724,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
if (intel_dp != dev_priv->psr.dp)
return;
+ if (!psr_global_enabled(dev_priv))
+ return;
/*
* HSW spec explicitly says PSR is tied to port A.
* BDW+ platforms have a instance of PSR registers per transcoder but
@@ -793,6 +768,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
crtc_state->has_psr = true;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
+ crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
}
static void intel_psr_activate(struct intel_dp *intel_dp)
@@ -875,9 +851,12 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &intel_dig_port->base;
u32 val;
drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
@@ -916,7 +895,9 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
- intel_psr_setup_vsc(intel_dp, crtc_state);
+ intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
+ &dev_priv->psr.vsc);
+ intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp, crtc_state);
dev_priv->psr.enabled = true;
@@ -928,11 +909,13 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
* intel_psr_enable - Enable PSR
* @intel_dp: Intel DP
* @crtc_state: new CRTC state
+ * @conn_state: new CONNECTOR state
*
* This function can only be called after the pipe is fully trained and enabled.
*/
void intel_psr_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -953,7 +936,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
goto unlock;
}
- intel_psr_enable_locked(dev_priv, crtc_state);
+ intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -1086,13 +1069,15 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
* intel_psr_update - Update PSR state
* @intel_dp: Intel DP
* @crtc_state: new CRTC state
+ * @conn_state: new CONNECTOR state
*
* This functions will update PSR states, disabling, enabling or switching PSR
* version when executing fastsets. For full modeset, intel_psr_disable() and
* intel_psr_enable() should be called instead.
*/
void intel_psr_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct i915_psr *psr = &dev_priv->psr;
@@ -1129,7 +1114,7 @@ void intel_psr_update(struct intel_dp *intel_dp,
intel_psr_disable_locked(intel_dp);
if (enable)
- intel_psr_enable_locked(dev_priv, crtc_state);
+ intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
unlock:
mutex_unlock(&dev_priv->psr.lock);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 274fc6bb6221..b4515186d5f4 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -17,11 +17,13 @@ struct intel_dp;
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
void intel_psr_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 637d8fe2f8c2..bc6c26818e15 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1430,7 +1430,8 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
#undef UPDATE_PROPERTY
}
-static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
+static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *intel_encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -1727,7 +1728,8 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
SDVO_AUDIO_PRESENCE_DETECT);
}
-static void intel_disable_sdvo(struct intel_encoder *encoder,
+static void intel_disable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -1775,20 +1777,23 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
}
}
-static void pch_disable_sdvo(struct intel_encoder *encoder,
+static void pch_disable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
}
-static void pch_post_disable_sdvo(struct intel_encoder *encoder,
+static void pch_post_disable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
+ intel_disable_sdvo(state, encoder, old_crtc_state, old_conn_state);
}
-static void intel_enable_sdvo(struct intel_encoder *encoder,
+static void intel_enable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1934,12 +1939,11 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
static enum intel_hotplug_state
intel_sdvo_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received)
+ struct intel_connector *connector)
{
intel_sdvo_enable_hotplug(encoder);
- return intel_encoder_hotplug(encoder, connector, irq_received);
+ return intel_encoder_hotplug(encoder, connector);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 33d886141138..0000ec7055f7 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -2503,6 +2503,7 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
+ DRM_FORMAT_XYUV8888,
};
static const u32 skl_planar_formats[] = {
@@ -2521,6 +2522,7 @@ static const u32 skl_planar_formats[] = {
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_NV12,
+ DRM_FORMAT_XYUV8888,
};
static const u32 glk_planar_formats[] = {
@@ -2539,6 +2541,7 @@ static const u32 glk_planar_formats[] = {
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_NV12,
+ DRM_FORMAT_XYUV8888,
DRM_FORMAT_P010,
DRM_FORMAT_P012,
DRM_FORMAT_P016,
@@ -2562,6 +2565,7 @@ static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
DRM_FORMAT_XVYU12_16161616,
DRM_FORMAT_XVYU16161616,
@@ -2589,6 +2593,7 @@ static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
DRM_FORMAT_XVYU12_16161616,
DRM_FORMAT_XVYU16161616,
@@ -2620,6 +2625,7 @@ static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
DRM_FORMAT_XVYU12_16161616,
DRM_FORMAT_XVYU16161616,
@@ -2790,6 +2796,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
@@ -2860,6 +2867,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 9b850c11aa78..b161c15baf86 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -34,6 +34,7 @@ tc_port_load_fia_params(struct drm_i915_private *i915,
if (INTEL_INFO(i915)->display.has_modular_fia) {
modular_fia = intel_uncore_read(&i915->uncore,
PORT_TX_DFLEXDPSP(FIA1));
+ drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff);
modular_fia &= MODULAR_FIA_MASK;
} else {
modular_fia = 0;
@@ -52,6 +53,62 @@ tc_port_load_fia_params(struct drm_i915_private *i915,
}
}
+static enum intel_display_power_domain
+tc_cold_get_power_domain(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (INTEL_GEN(i915) == 11)
+ return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
+ else
+ return POWER_DOMAIN_TC_COLD_OFF;
+}
+
+static intel_wakeref_t
+tc_cold_block(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum intel_display_power_domain domain;
+
+ if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
+ return 0;
+
+ domain = tc_cold_get_power_domain(dig_port);
+ return intel_display_power_get(i915, domain);
+}
+
+static void
+tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum intel_display_power_domain domain;
+
+ /*
+ * wakeref == -1, means some error happened saving save_depot_stack but
+ * power should still be put down and 0 is a invalid save_depot_stack
+ * id so can be used to skip it for non TC legacy ports.
+ */
+ if (wakeref == 0)
+ return;
+
+ domain = tc_cold_get_power_domain(dig_port);
+ intel_display_power_put_async(i915, domain, wakeref);
+}
+
+static void
+assert_tc_cold_blocked(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ bool enabled;
+
+ if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
+ return;
+
+ enabled = intel_display_power_is_enabled(i915,
+ tc_cold_get_power_domain(dig_port));
+ drm_WARN_ON(&i915->drm, !enabled);
+}
+
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -62,6 +119,7 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
+ assert_tc_cold_blocked(dig_port);
lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -77,6 +135,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
+ assert_tc_cold_blocked(dig_port);
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -91,6 +150,8 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
if (dig_port->tc_mode != TC_PORT_DP_ALT)
return 4;
+ assert_tc_cold_blocked(dig_port);
+
lane_mask = 0;
with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
lane_mask = intel_tc_port_get_lane_mask(dig_port);
@@ -123,6 +184,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
drm_WARN_ON(&i915->drm,
lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
+ assert_tc_cold_blocked(dig_port);
+
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
@@ -152,6 +215,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
u32 live_status_mask)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 valid_hpd_mask;
if (dig_port->tc_legacy_port)
@@ -164,8 +228,9 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
return;
/* If live status mismatches the VBT flag, trust the live status. */
- DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
- dig_port->tc_port_name, live_status_mask);
+ drm_err(&i915->drm,
+ "Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
+ dig_port->tc_port_name, live_status_mask);
dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
}
@@ -173,8 +238,8 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
+ u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
u32 mask = 0;
u32 val;
@@ -193,7 +258,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
mask |= BIT(TC_PORT_DP_ALT);
- if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
+ if (intel_uncore_read(uncore, SDEISR) & isr_bit)
mask |= BIT(TC_PORT_LEGACY);
/* The sink can be connected only in a single mode. */
@@ -233,8 +298,7 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
- dig_port->tc_port_name,
- enableddisabled(enable));
+ dig_port->tc_port_name, enableddisabled(enable));
return false;
}
@@ -286,11 +350,12 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
int required_lanes)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int max_lanes;
if (!icl_tc_phy_status_complete(dig_port)) {
- DRM_DEBUG_KMS("Port %s: PHY not ready\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
+ dig_port->tc_port_name);
goto out_set_tbt_alt_mode;
}
@@ -311,15 +376,16 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
* became disconnected. Not necessary for legacy mode.
*/
if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
- DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
+ dig_port->tc_port_name);
goto out_set_safe_mode;
}
if (max_lanes < required_lanes) {
- DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
- dig_port->tc_port_name,
- max_lanes, required_lanes);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY max lanes %d < required lanes %d\n",
+ dig_port->tc_port_name,
+ max_lanes, required_lanes);
goto out_set_safe_mode;
}
@@ -357,15 +423,17 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
if (!icl_tc_phy_status_complete(dig_port)) {
- DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
+ dig_port->tc_port_name);
return dig_port->tc_mode == TC_PORT_TBT_ALT;
}
if (icl_tc_phy_is_in_safe_mode(dig_port)) {
- DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm, "Port %s: PHY still in safe mode\n",
+ dig_port->tc_port_name);
return false;
}
@@ -415,9 +483,14 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
intel_display_power_flush_work(i915);
- drm_WARN_ON(&i915->drm,
- intel_display_power_is_enabled(i915,
- intel_aux_power_domain(dig_port)));
+ if (INTEL_GEN(i915) != 11 || !dig_port->tc_legacy_port) {
+ enum intel_display_power_domain aux_domain;
+ bool aux_powered;
+
+ aux_domain = intel_aux_power_domain(dig_port);
+ aux_powered = intel_display_power_is_enabled(i915, aux_domain);
+ drm_WARN_ON(&i915->drm, aux_powered);
+ }
icl_tc_phy_disconnect(dig_port);
icl_tc_phy_connect(dig_port, required_lanes);
@@ -438,10 +511,13 @@ intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_encoder *encoder = &dig_port->base;
+ intel_wakeref_t tc_cold_wref;
int active_links = 0;
mutex_lock(&dig_port->tc_lock);
+ tc_cold_wref = tc_cold_block(dig_port);
dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
if (dig_port->dp.is_mst)
@@ -451,8 +527,9 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
if (active_links) {
if (!icl_tc_phy_is_connected(dig_port))
- DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
- dig_port->tc_port_name, active_links);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY disconnected with %d active link(s)\n",
+ dig_port->tc_port_name, active_links);
intel_tc_port_link_init_refcount(dig_port, active_links);
goto out;
@@ -462,10 +539,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
icl_tc_phy_connect(dig_port, 1);
out:
- DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
- dig_port->tc_port_name,
- tc_port_mode_name(dig_port->tc_mode));
+ drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
+ tc_cold_unblock(dig_port, tc_cold_wref);
mutex_unlock(&dig_port->tc_lock);
}
@@ -484,13 +562,19 @@ static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
* connected ports are usable, and avoids exposing to the users objects they
* can't really use.
*/
-bool intel_tc_port_connected(struct intel_digital_port *dig_port)
+bool intel_tc_port_connected(struct intel_encoder *encoder)
{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_connected;
+ intel_wakeref_t tc_cold_wref;
intel_tc_port_lock(dig_port);
+ tc_cold_wref = tc_cold_block(dig_port);
+
is_connected = tc_port_live_status_mask(dig_port) &
BIT(dig_port->tc_mode);
+
+ tc_cold_unblock(dig_port, tc_cold_wref);
intel_tc_port_unlock(dig_port);
return is_connected;
@@ -506,9 +590,16 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
mutex_lock(&dig_port->tc_lock);
- if (!dig_port->tc_link_refcount &&
- intel_tc_port_needs_reset(dig_port))
- intel_tc_port_reset_mode(dig_port, required_lanes);
+ if (!dig_port->tc_link_refcount) {
+ intel_wakeref_t tc_cold_wref;
+
+ tc_cold_wref = tc_cold_block(dig_port);
+
+ if (intel_tc_port_needs_reset(dig_port))
+ intel_tc_port_reset_mode(dig_port, required_lanes);
+
+ tc_cold_unblock(dig_port, tc_cold_wref);
+ }
drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
dig_port->tc_lock_wakeref = wakeref;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 463f1b3c836f..b619e4736f85 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -10,8 +10,9 @@
#include <linux/types.h>
struct intel_digital_port;
+struct intel_encoder;
-bool intel_tc_port_connected(struct intel_digital_port *dig_port);
+bool intel_tc_port_connected(struct intel_encoder *encoder);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index d2e3a3a323e9..fbe12aad7d58 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -914,7 +914,8 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
}
static void
-intel_enable_tv(struct intel_encoder *encoder,
+intel_enable_tv(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -930,7 +931,8 @@ intel_enable_tv(struct intel_encoder *encoder,
}
static void
-intel_disable_tv(struct intel_encoder *encoder,
+intel_disable_tv(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -1414,7 +1416,8 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
(color_conversion->bv << 16) | color_conversion->av);
}
-static void intel_tv_pre_enable(struct intel_encoder *encoder,
+static void intel_tv_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1698,13 +1701,13 @@ intel_tv_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
enum drm_connector_status status;
int type;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
- connector->base.id, connector->name,
- force);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, connector->name, force);
if (force) {
struct intel_load_detect_pipe tmp;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 05c7cbe32eb4..aef7fe932d1a 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -462,7 +462,7 @@ struct bdb_general_definitions {
* number = (block_size - sizeof(bdb_general_definitions))/
* defs->child_dev_size;
*/
- u8 devices[0];
+ u8 devices[];
} __packed;
/*
@@ -839,7 +839,7 @@ struct bdb_mipi_config {
struct bdb_mipi_sequence {
u8 version;
- u8 data[0]; /* up to 6 variable length blocks */
+ u8 data[]; /* up to 6 variable length blocks */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index f4c362dc6e15..f582ab52f0b0 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -267,7 +267,6 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
@@ -279,11 +278,11 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
if (HAS_GMCH(dev_priv))
- intel_gmch_panel_fitting(crtc, pipe_config,
- conn_state->scaling_mode);
+ ret = intel_gmch_panel_fitting(pipe_config, conn_state);
else
- intel_pch_panel_fitting(crtc, pipe_config,
- conn_state->scaling_mode);
+ ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -759,7 +758,8 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder);
* DSI port enable has to be done before pipe and plane enable, so we do it in
* the pre_enable hook instead of the enable hook.
*/
-static void intel_dsi_pre_enable(struct intel_encoder *encoder,
+static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -858,11 +858,12 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
}
-static void bxt_dsi_enable(struct intel_encoder *encoder,
+static void bxt_dsi_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- WARN_ON(crtc_state->has_pch_encoder);
+ drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
intel_crtc_vblank_on(crtc_state);
}
@@ -871,14 +872,16 @@ static void bxt_dsi_enable(struct intel_encoder *encoder,
* DSI port disable has to be done after pipe and plane disable, so we do it in
* the post_disable hook.
*/
-static void intel_dsi_disable(struct intel_encoder *encoder,
+static void intel_dsi_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_panel_disable_backlight(old_conn_state);
@@ -906,7 +909,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
vlv_dsi_clear_device_ready(encoder);
}
-static void intel_dsi_post_disable(struct intel_encoder *encoder,
+static void intel_dsi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 34be4c0ee7c5..bc0223716906 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -108,7 +108,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
if (clflush) {
i915_sw_fence_await_reservation(&clflush->base.chain,
obj->base.resv, NULL, true,
- I915_FENCE_TIMEOUT,
+ i915_fence_timeout(to_i915(obj->base.dev)),
I915_FENCE_GFP);
dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
dma_fence_work_commit(&clflush->base);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 0598e5382a1d..d3a86a4d5c04 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -6,7 +6,6 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
-#include "gt/intel_engine_pool.h"
#include "i915_gem_client_blt.h"
#include "i915_gem_object_blt.h"
@@ -289,8 +288,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
i915_gem_object_lock(obj);
err = i915_sw_fence_await_reservation(&work->wait,
- obj->base.resv, NULL,
- true, I915_FENCE_TIMEOUT,
+ obj->base.resv, NULL, true, 0,
I915_FENCE_GFP);
if (err < 0) {
dma_fence_set_error(&work->dma, err);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 68326ad3b2e0..30c229fcb404 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -130,9 +130,7 @@ static void lut_close(struct i915_gem_context *ctx)
if (&lut->obj_link != &obj->lut_list) {
i915_lut_handle_free(lut);
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
- if (atomic_dec_and_test(&vma->open_count) &&
- !i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
+ i915_vma_close(vma);
i915_gem_object_put(obj);
}
@@ -232,7 +230,7 @@ static void intel_context_set_gem(struct intel_context *ce,
ce->timeline = intel_timeline_get(ctx->timeline);
if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
- intel_engine_has_semaphores(ce->engine))
+ intel_engine_has_timeslices(ce->engine))
__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
}
@@ -570,23 +568,19 @@ static void engines_idle_release(struct i915_gem_context *ctx,
engines->ctx = i915_gem_context_get(ctx);
for_each_gem_engine(ce, engines, it) {
- struct dma_fence *fence;
- int err = 0;
+ int err;
/* serialises with execbuf */
set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
if (!intel_context_pin_if_active(ce))
continue;
- fence = i915_active_fence_get(&ce->timeline->last_request);
- if (fence) {
- err = i915_sw_fence_await_dma_fence(&engines->fence,
- fence, 0,
- GFP_KERNEL);
- dma_fence_put(fence);
- }
+ /* Wait until context is finally scheduled out and retired */
+ err = i915_sw_fence_await_active(&engines->fence,
+ &ce->active,
+ I915_ACTIVE_AWAIT_BARRIER);
intel_context_unpin(ce);
- if (err < 0)
+ if (err)
goto kill;
}
@@ -757,21 +751,46 @@ err_free:
return ERR_PTR(err);
}
+static inline struct i915_gem_engines *
+__context_engines_await(const struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines *engines;
+
+ rcu_read_lock();
+ do {
+ engines = rcu_dereference(ctx->engines);
+ GEM_BUG_ON(!engines);
+
+ if (unlikely(!i915_sw_fence_await(&engines->fence)))
+ continue;
+
+ if (likely(engines == rcu_access_pointer(ctx->engines)))
+ break;
+
+ i915_sw_fence_complete(&engines->fence);
+ } while (1);
+ rcu_read_unlock();
+
+ return engines;
+}
+
static int
context_apply_all(struct i915_gem_context *ctx,
int (*fn)(struct intel_context *ce, void *data),
void *data)
{
struct i915_gem_engines_iter it;
+ struct i915_gem_engines *e;
struct intel_context *ce;
int err = 0;
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ e = __context_engines_await(ctx);
+ for_each_gem_engine(ce, e, it) {
err = fn(ce, data);
if (err)
break;
}
- i915_gem_context_unlock_engines(ctx);
+ i915_sw_fence_complete(&e->fence);
return err;
}
@@ -786,11 +805,13 @@ static int __apply_ppgtt(struct intel_context *ce, void *vm)
static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
- struct i915_address_space *old = i915_gem_context_vm(ctx);
+ struct i915_address_space *old;
+ old = rcu_replace_pointer(ctx->vm,
+ i915_vm_open(vm),
+ lockdep_is_held(&ctx->mutex));
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
- rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
context_apply_all(ctx, __apply_ppgtt, vm);
return old;
@@ -1069,30 +1090,6 @@ static void cb_retire(struct i915_active *base)
kfree(cb);
}
-static inline struct i915_gem_engines *
-__context_engines_await(const struct i915_gem_context *ctx)
-{
- struct i915_gem_engines *engines;
-
- rcu_read_lock();
- do {
- engines = rcu_dereference(ctx->engines);
- if (unlikely(!engines))
- break;
-
- if (unlikely(!i915_sw_fence_await(&engines->fence)))
- continue;
-
- if (likely(engines == rcu_access_pointer(ctx->engines)))
- break;
-
- i915_sw_fence_complete(&engines->fence);
- } while (1);
- rcu_read_unlock();
-
- return engines;
-}
-
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
intel_engine_mask_t engines,
@@ -1401,10 +1398,10 @@ static int get_ringsize(struct i915_gem_context *ctx,
return 0;
}
-static int
-user_to_context_sseu(struct drm_i915_private *i915,
- const struct drm_i915_gem_context_param_sseu *user,
- struct intel_sseu *context)
+int
+i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+ const struct drm_i915_gem_context_param_sseu *user,
+ struct intel_sseu *context)
{
const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
@@ -1539,7 +1536,7 @@ static int set_sseu(struct i915_gem_context *ctx,
goto out_ce;
}
- ret = user_to_context_sseu(i915, &user_sseu, &sseu);
+ ret = i915_gem_user_to_context_sseu(i915, &user_sseu, &sseu);
if (ret)
goto out_ce;
@@ -1924,11 +1921,6 @@ get_engines(struct i915_gem_context *ctx,
}
user = u64_to_user_ptr(args->value);
- if (!access_ok(user, size)) {
- err = -EFAULT;
- goto err_free;
- }
-
if (put_user(0, &user->extensions)) {
err = -EFAULT;
goto err_free;
@@ -1972,7 +1964,7 @@ static int __apply_priority(struct intel_context *ce, void *arg)
{
struct i915_gem_context *ctx = arg;
- if (!intel_engine_has_semaphores(ce->engine))
+ if (!intel_engine_has_timeslices(ce->engine))
return 0;
if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index f1d884d304bd..3702b2fb27ab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -225,4 +225,8 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
struct i915_lut_handle *i915_lut_handle_alloc(void);
void i915_lut_handle_free(struct i915_lut_handle *lut);
+int i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+ const struct drm_i915_gem_context_param_sseu *user,
+ struct intel_sseu *context);
+
#endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 4f96c8788a2e..7f76fc68f498 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -368,7 +368,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_vma *vma;
- if (!atomic_read(&obj->bind_count))
+ if (list_empty(&obj->vma.list))
return;
mutex_lock(&i915->ggtt.vm.mutex);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b7440f06c5e2..db8eb1c6afe9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -15,8 +15,8 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
-#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
@@ -40,6 +40,11 @@ struct eb_vma {
u32 handle;
};
+struct eb_vma_array {
+ struct kref kref;
+ struct eb_vma vma[];
+};
+
enum {
FORCE_CPU_RELOC = 1,
FORCE_GTT_RELOC,
@@ -52,7 +57,6 @@ enum {
#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
#define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
-#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31)
#define __EXEC_INTERNAL_FLAGS (~0u << 31)
@@ -264,7 +268,9 @@ struct i915_execbuffer {
bool has_fence : 1;
bool needs_unfenced : 1;
+ struct i915_vma *target;
struct i915_request *rq;
+ struct i915_vma *rq_vma;
u32 *rq_cmd;
unsigned int rq_size;
} reloc_cache;
@@ -283,6 +289,7 @@ struct i915_execbuffer {
*/
int lut_size;
struct hlist_head *buckets; /** ht for relocation handles */
+ struct eb_vma_array *array;
};
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
@@ -292,8 +299,62 @@ static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
eb->args->batch_len);
}
+static struct eb_vma_array *eb_vma_array_create(unsigned int count)
+{
+ struct eb_vma_array *arr;
+
+ arr = kvmalloc(struct_size(arr, vma, count), GFP_KERNEL | __GFP_NOWARN);
+ if (!arr)
+ return NULL;
+
+ kref_init(&arr->kref);
+ arr->vma[0].vma = NULL;
+
+ return arr;
+}
+
+static inline void eb_unreserve_vma(struct eb_vma *ev)
+{
+ struct i915_vma *vma = ev->vma;
+
+ if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
+ __i915_vma_unpin_fence(vma);
+
+ if (ev->flags & __EXEC_OBJECT_HAS_PIN)
+ __i915_vma_unpin(vma);
+
+ ev->flags &= ~(__EXEC_OBJECT_HAS_PIN |
+ __EXEC_OBJECT_HAS_FENCE);
+}
+
+static void eb_vma_array_destroy(struct kref *kref)
+{
+ struct eb_vma_array *arr = container_of(kref, typeof(*arr), kref);
+ struct eb_vma *ev = arr->vma;
+
+ while (ev->vma) {
+ eb_unreserve_vma(ev);
+ i915_vma_put(ev->vma);
+ ev++;
+ }
+
+ kvfree(arr);
+}
+
+static void eb_vma_array_put(struct eb_vma_array *arr)
+{
+ kref_put(&arr->kref, eb_vma_array_destroy);
+}
+
static int eb_create(struct i915_execbuffer *eb)
{
+ /* Allocate an extra slot for use by the command parser + sentinel */
+ eb->array = eb_vma_array_create(eb->buffer_count + 2);
+ if (!eb->array)
+ return -ENOMEM;
+
+ eb->vma = eb->array->vma;
+
if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
unsigned int size = 1 + ilog2(eb->buffer_count);
@@ -327,8 +388,10 @@ static int eb_create(struct i915_execbuffer *eb)
break;
} while (--size);
- if (unlikely(!size))
+ if (unlikely(!size)) {
+ eb_vma_array_put(eb->array);
return -ENOMEM;
+ }
eb->lut_size = size;
} else {
@@ -368,6 +431,32 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
return false;
}
+static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry,
+ unsigned int exec_flags)
+{
+ u64 pin_flags = 0;
+
+ if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
+ pin_flags |= PIN_GLOBAL;
+
+ /*
+ * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
+ * limit address to the first 4GBs for unflagged objects.
+ */
+ if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
+ pin_flags |= PIN_ZONE_4G;
+
+ if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
+ pin_flags |= PIN_MAPPABLE;
+
+ if (exec_flags & EXEC_OBJECT_PINNED)
+ pin_flags |= entry->offset | PIN_OFFSET_FIXED;
+ else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
+ pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+
+ return pin_flags;
+}
+
static inline bool
eb_pin_vma(struct i915_execbuffer *eb,
const struct drm_i915_gem_exec_object2 *entry,
@@ -385,8 +474,19 @@ eb_pin_vma(struct i915_execbuffer *eb,
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
pin_flags |= PIN_GLOBAL;
- if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
- return false;
+ /* Attempt to reuse the current location if available */
+ if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) {
+ if (entry->flags & EXEC_OBJECT_PINNED)
+ return false;
+
+ /* Failing that pick any _free_ space if suitable */
+ if (unlikely(i915_vma_pin(vma,
+ entry->pad_to_size,
+ entry->alignment,
+ eb_pin_flags(entry, ev->flags) |
+ PIN_USER | PIN_NOEVICT)))
+ return false;
+ }
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
if (unlikely(i915_vma_pin_fence(vma))) {
@@ -402,26 +502,6 @@ eb_pin_vma(struct i915_execbuffer *eb,
return !eb_vma_misplaced(entry, vma, ev->flags);
}
-static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
-{
- GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
-
- if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
- __i915_vma_unpin_fence(vma);
-
- __i915_vma_unpin(vma);
-}
-
-static inline void
-eb_unreserve_vma(struct eb_vma *ev)
-{
- if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
- return;
-
- __eb_unreserve_vma(ev->vma, ev->flags);
- ev->flags &= ~__EXEC_OBJECT_RESERVED;
-}
-
static int
eb_validate_vma(struct i915_execbuffer *eb,
struct drm_i915_gem_exec_object2 *entry,
@@ -481,7 +561,7 @@ eb_add_vma(struct i915_execbuffer *eb,
GEM_BUG_ON(i915_vma_is_closed(vma));
- ev->vma = i915_vma_get(vma);
+ ev->vma = vma;
ev->exec = entry;
ev->flags = entry->flags;
@@ -547,28 +627,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
u64 pin_flags)
{
struct drm_i915_gem_exec_object2 *entry = ev->exec;
- unsigned int exec_flags = ev->flags;
struct i915_vma *vma = ev->vma;
int err;
- if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
- pin_flags |= PIN_GLOBAL;
-
- /*
- * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
- * limit address to the first 4GBs for unflagged objects.
- */
- if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
- pin_flags |= PIN_ZONE_4G;
-
- if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
- pin_flags |= PIN_MAPPABLE;
-
- if (exec_flags & EXEC_OBJECT_PINNED)
- pin_flags |= entry->offset | PIN_OFFSET_FIXED;
- else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
- pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
-
if (drm_mm_node_allocated(&vma->node) &&
eb_vma_misplaced(entry, vma, ev->flags)) {
err = i915_vma_unbind(vma);
@@ -578,7 +639,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
err = i915_vma_pin(vma,
entry->pad_to_size, entry->alignment,
- pin_flags);
+ eb_pin_flags(entry, ev->flags) | pin_flags);
if (err)
return err;
@@ -587,7 +648,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
eb->args->flags |= __EXEC_HAS_RELOC;
}
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_pin_fence(vma);
if (unlikely(err)) {
i915_vma_unpin(vma);
@@ -595,10 +656,10 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
}
if (vma->fence)
- exec_flags |= __EXEC_OBJECT_HAS_FENCE;
+ ev->flags |= __EXEC_OBJECT_HAS_FENCE;
}
- ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
+ ev->flags |= __EXEC_OBJECT_HAS_PIN;
GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
return 0;
@@ -728,77 +789,117 @@ static int eb_select_context(struct i915_execbuffer *eb)
return 0;
}
-static int eb_lookup_vmas(struct i915_execbuffer *eb)
+static int __eb_add_lut(struct i915_execbuffer *eb,
+ u32 handle, struct i915_vma *vma)
{
- struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
- struct drm_i915_gem_object *obj;
- unsigned int i, batch;
+ struct i915_gem_context *ctx = eb->gem_context;
+ struct i915_lut_handle *lut;
int err;
- if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
- return -ENOENT;
+ lut = i915_lut_handle_alloc();
+ if (unlikely(!lut))
+ return -ENOMEM;
- INIT_LIST_HEAD(&eb->relocs);
- INIT_LIST_HEAD(&eb->unbound);
+ i915_vma_get(vma);
+ if (!atomic_fetch_inc(&vma->open_count))
+ i915_vma_reopen(vma);
+ lut->handle = handle;
+ lut->ctx = ctx;
+
+ /* Check that the context hasn't been closed in the meantime */
+ err = -EINTR;
+ if (!mutex_lock_interruptible(&ctx->mutex)) {
+ err = -ENOENT;
+ if (likely(!i915_gem_context_is_closed(ctx)))
+ err = radix_tree_insert(&ctx->handles_vma, handle, vma);
+ if (err == 0) { /* And nor has this handle */
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ i915_gem_object_lock(obj);
+ if (idr_find(&eb->file->object_idr, handle) == obj) {
+ list_add(&lut->obj_link, &obj->lut_list);
+ } else {
+ radix_tree_delete(&ctx->handles_vma, handle);
+ err = -ENOENT;
+ }
+ i915_gem_object_unlock(obj);
+ }
+ mutex_unlock(&ctx->mutex);
+ }
+ if (unlikely(err))
+ goto err;
- batch = eb_batch_index(eb);
+ return 0;
- for (i = 0; i < eb->buffer_count; i++) {
- u32 handle = eb->exec[i].handle;
- struct i915_lut_handle *lut;
+err:
+ i915_vma_close(vma);
+ i915_vma_put(vma);
+ i915_lut_handle_free(lut);
+ return err;
+}
+
+static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
+{
+ do {
+ struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ int err;
- vma = radix_tree_lookup(handles_vma, handle);
+ rcu_read_lock();
+ vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
+ if (likely(vma))
+ vma = i915_vma_tryget(vma);
+ rcu_read_unlock();
if (likely(vma))
- goto add_vma;
+ return vma;
obj = i915_gem_object_lookup(eb->file, handle);
- if (unlikely(!obj)) {
- err = -ENOENT;
- goto err_vma;
- }
+ if (unlikely(!obj))
+ return ERR_PTR(-ENOENT);
vma = i915_vma_instance(obj, eb->context->vm, NULL);
if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
+ i915_gem_object_put(obj);
+ return vma;
}
- lut = i915_lut_handle_alloc();
- if (unlikely(!lut)) {
- err = -ENOMEM;
- goto err_obj;
- }
+ err = __eb_add_lut(eb, handle, vma);
+ if (likely(!err))
+ return vma;
- err = radix_tree_insert(handles_vma, handle, vma);
- if (unlikely(err)) {
- i915_lut_handle_free(lut);
- goto err_obj;
- }
+ i915_gem_object_put(obj);
+ if (err != -EEXIST)
+ return ERR_PTR(err);
+ } while (1);
+}
- /* transfer ref to lut */
- if (!atomic_fetch_inc(&vma->open_count))
- i915_vma_reopen(vma);
- lut->handle = handle;
- lut->ctx = eb->gem_context;
+static int eb_lookup_vmas(struct i915_execbuffer *eb)
+{
+ unsigned int batch = eb_batch_index(eb);
+ unsigned int i;
+ int err = 0;
- i915_gem_object_lock(obj);
- list_add(&lut->obj_link, &obj->lut_list);
- i915_gem_object_unlock(obj);
+ INIT_LIST_HEAD(&eb->relocs);
+ INIT_LIST_HEAD(&eb->unbound);
+
+ for (i = 0; i < eb->buffer_count; i++) {
+ struct i915_vma *vma;
+
+ vma = eb_lookup_vma(eb, eb->exec[i].handle);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ break;
+ }
-add_vma:
err = eb_validate_vma(eb, &eb->exec[i], vma);
- if (unlikely(err))
- goto err_vma;
+ if (unlikely(err)) {
+ i915_vma_put(vma);
+ break;
+ }
eb_add_vma(eb, i, batch, vma);
}
- return 0;
-
-err_obj:
- i915_gem_object_put(obj);
-err_vma:
eb->vma[i].vma = NULL;
return err;
}
@@ -823,31 +924,13 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
}
}
-static void eb_release_vmas(const struct i915_execbuffer *eb)
-{
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- struct eb_vma *ev = &eb->vma[i];
- struct i915_vma *vma = ev->vma;
-
- if (!vma)
- break;
-
- eb->vma[i].vma = NULL;
-
- if (ev->flags & __EXEC_OBJECT_HAS_PIN)
- __eb_unreserve_vma(vma, ev->flags);
-
- i915_vma_put(vma);
- }
-}
-
static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
+ if (eb->array)
+ eb_vma_array_put(eb->array);
+
if (eb->lut_size > 0)
kfree(eb->buckets);
}
@@ -872,7 +955,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
cache->node.flags = 0;
cache->rq = NULL;
- cache->rq_size = 0;
+ cache->target = NULL;
}
static inline void *unmask_page(unsigned long p)
@@ -894,29 +977,122 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
return &i915->ggtt;
}
-static void reloc_gpu_flush(struct reloc_cache *cache)
+#define RELOC_TAIL 4
+
+static int reloc_gpu_chain(struct reloc_cache *cache)
{
- struct drm_i915_gem_object *obj = cache->rq->batch->obj;
+ struct intel_gt_buffer_pool_node *pool;
+ struct i915_request *rq = cache->rq;
+ struct i915_vma *batch;
+ u32 *cmd;
+ int err;
- GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
- cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
+ pool = intel_gt_get_buffer_pool(rq->engine->gt, PAGE_SIZE);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
- __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
- i915_gem_object_unpin_map(obj);
+ batch = i915_vma_instance(pool->obj, rq->context->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_pool;
+ }
- intel_gt_chipset_flush(cache->rq->engine->gt);
+ err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
+ if (err)
+ goto out_pool;
- i915_request_add(cache->rq);
- cache->rq = NULL;
+ GEM_BUG_ON(cache->rq_size + RELOC_TAIL > PAGE_SIZE / sizeof(u32));
+ cmd = cache->rq_cmd + cache->rq_size;
+ *cmd++ = MI_ARB_CHECK;
+ if (cache->gen >= 8)
+ *cmd++ = MI_BATCH_BUFFER_START_GEN8;
+ else if (cache->gen >= 6)
+ *cmd++ = MI_BATCH_BUFFER_START;
+ else
+ *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cmd++ = lower_32_bits(batch->node.start);
+ *cmd++ = upper_32_bits(batch->node.start); /* Always 0 for gen<8 */
+ i915_gem_object_flush_map(cache->rq_vma->obj);
+ i915_gem_object_unpin_map(cache->rq_vma->obj);
+ cache->rq_vma = NULL;
+
+ err = intel_gt_buffer_pool_mark_active(pool, rq);
+ if (err == 0) {
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ }
+ i915_vma_unpin(batch);
+ if (err)
+ goto out_pool;
+
+ cmd = i915_gem_object_pin_map(batch->obj,
+ cache->has_llc ?
+ I915_MAP_FORCE_WB :
+ I915_MAP_FORCE_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out_pool;
+ }
+
+ /* Return with batch mapping (cmd) still pinned */
+ cache->rq_cmd = cmd;
+ cache->rq_size = 0;
+ cache->rq_vma = batch;
+
+out_pool:
+ intel_gt_buffer_pool_put(pool);
+ return err;
+}
+
+static unsigned int reloc_bb_flags(const struct reloc_cache *cache)
+{
+ return cache->gen > 5 ? 0 : I915_DISPATCH_SECURE;
+}
+
+static int reloc_gpu_flush(struct reloc_cache *cache)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = fetch_and_zero(&cache->rq);
+ if (!rq)
+ return 0;
+
+ if (cache->rq_vma) {
+ struct drm_i915_gem_object *obj = cache->rq_vma->obj;
+
+ GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
+ cache->rq_cmd[cache->rq_size++] = MI_BATCH_BUFFER_END;
+
+ __i915_gem_object_flush_map(obj,
+ 0, sizeof(u32) * cache->rq_size);
+ i915_gem_object_unpin_map(obj);
+ }
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb)
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ rq->batch->node.start,
+ PAGE_SIZE,
+ reloc_bb_flags(cache));
+ if (err)
+ i915_request_set_error_once(rq, err);
+
+ intel_gt_chipset_flush(rq->engine->gt);
+ i915_request_add(rq);
+
+ return err;
}
static void reloc_cache_reset(struct reloc_cache *cache)
{
void *vaddr;
- if (cache->rq)
- reloc_gpu_flush(cache);
-
if (!cache->vaddr)
return;
@@ -1109,17 +1285,17 @@ static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
}
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
- struct i915_vma *vma,
+ struct intel_engine_cs *engine,
unsigned int len)
{
struct reloc_cache *cache = &eb->reloc_cache;
- struct intel_engine_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool;
struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
- pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
+ pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
if (IS_ERR(pool))
return PTR_ERR(pool);
@@ -1132,7 +1308,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto out_pool;
}
- batch = i915_vma_instance(pool->obj, vma->vm, NULL);
+ batch = i915_vma_instance(pool->obj, eb->context->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_unmap;
@@ -1142,26 +1318,32 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;
- rq = i915_request_create(eb->context);
+ if (engine == eb->context->engine) {
+ rq = i915_request_create(eb->context);
+ } else {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err_unpin;
+ }
+
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(eb->context->vm);
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ }
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
}
- err = intel_engine_pool_mark_active(pool, rq);
- if (err)
- goto err_request;
-
- err = reloc_move_to_gpu(rq, vma);
+ err = intel_gt_buffer_pool_mark_active(pool, rq);
if (err)
goto err_request;
- err = eb->engine->emit_bb_start(rq,
- batch->node.start, PAGE_SIZE,
- cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
- if (err)
- goto skip_request;
-
i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
@@ -1176,6 +1358,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
cache->rq = rq;
cache->rq_cmd = cmd;
cache->rq_size = 0;
+ cache->rq_vma = batch;
/* Return with batch mapping (cmd) still pinned */
goto out_pool;
@@ -1189,124 +1372,206 @@ err_unpin:
err_unmap:
i915_gem_object_unpin_map(pool->obj);
out_pool:
- intel_engine_pool_put(pool);
+ intel_gt_buffer_pool_put(pool);
return err;
}
+static bool reloc_can_use_engine(const struct intel_engine_cs *engine)
+{
+ return engine->class != VIDEO_DECODE_CLASS || !IS_GEN(engine->i915, 6);
+}
+
static u32 *reloc_gpu(struct i915_execbuffer *eb,
struct i915_vma *vma,
unsigned int len)
{
struct reloc_cache *cache = &eb->reloc_cache;
u32 *cmd;
-
- if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
- reloc_gpu_flush(cache);
+ int err;
if (unlikely(!cache->rq)) {
- int err;
+ struct intel_engine_cs *engine = eb->engine;
- if (!intel_engine_can_store_dword(eb->engine))
- return ERR_PTR(-ENODEV);
+ if (!reloc_can_use_engine(engine)) {
+ engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
+ if (!engine)
+ return ERR_PTR(-ENODEV);
+ }
- err = __reloc_gpu_alloc(eb, vma, len);
+ err = __reloc_gpu_alloc(eb, engine, len);
if (unlikely(err))
return ERR_PTR(err);
}
+ if (vma != cache->target) {
+ err = reloc_move_to_gpu(cache->rq, vma);
+ if (unlikely(err)) {
+ i915_request_set_error_once(cache->rq, err);
+ return ERR_PTR(err);
+ }
+
+ cache->target = vma;
+ }
+
+ if (unlikely(cache->rq_size + len >
+ PAGE_SIZE / sizeof(u32) - RELOC_TAIL)) {
+ err = reloc_gpu_chain(cache);
+ if (unlikely(err)) {
+ i915_request_set_error_once(cache->rq, err);
+ return ERR_PTR(err);
+ }
+ }
+
+ GEM_BUG_ON(cache->rq_size + len >= PAGE_SIZE / sizeof(u32));
cmd = cache->rq_cmd + cache->rq_size;
cache->rq_size += len;
return cmd;
}
-static u64
-relocate_entry(struct i915_vma *vma,
- const struct drm_i915_gem_relocation_entry *reloc,
- struct i915_execbuffer *eb,
- const struct i915_vma *target)
+static inline bool use_reloc_gpu(struct i915_vma *vma)
{
- u64 offset = reloc->offset;
- u64 target_offset = relocation_target(reloc, target);
- bool wide = eb->reloc_cache.use_64bit_reloc;
- void *vaddr;
+ if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
+ return true;
- if (!eb->reloc_cache.vaddr &&
- (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
- !dma_resv_test_signaled_rcu(vma->resv, true))) {
- const unsigned int gen = eb->reloc_cache.gen;
- unsigned int len;
- u32 *batch;
- u64 addr;
-
- if (wide)
- len = offset & 7 ? 8 : 5;
- else if (gen >= 4)
- len = 4;
- else
- len = 3;
+ if (DBG_FORCE_RELOC)
+ return false;
- batch = reloc_gpu(eb, vma, len);
- if (IS_ERR(batch))
- goto repeat;
+ return !dma_resv_test_signaled_rcu(vma->resv, true);
+}
- addr = gen8_canonical_addr(vma->node.start + offset);
- if (wide) {
- if (offset & 7) {
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(addr);
- *batch++ = upper_32_bits(addr);
- *batch++ = lower_32_bits(target_offset);
-
- addr = gen8_canonical_addr(addr + 4);
-
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(addr);
- *batch++ = upper_32_bits(addr);
- *batch++ = upper_32_bits(target_offset);
- } else {
- *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
- *batch++ = lower_32_bits(addr);
- *batch++ = upper_32_bits(addr);
- *batch++ = lower_32_bits(target_offset);
- *batch++ = upper_32_bits(target_offset);
- }
- } else if (gen >= 6) {
+static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
+{
+ struct page *page;
+ unsigned long addr;
+
+ GEM_BUG_ON(vma->pages != vma->obj->mm.pages);
+
+ page = i915_gem_object_get_page(vma->obj, offset >> PAGE_SHIFT);
+ addr = PFN_PHYS(page_to_pfn(page));
+ GEM_BUG_ON(overflows_type(addr, u32)); /* expected dma32 */
+
+ return addr + offset_in_page(offset);
+}
+
+static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
+ u64 offset,
+ u64 target_addr)
+{
+ const unsigned int gen = eb->reloc_cache.gen;
+ unsigned int len;
+ u32 *batch;
+ u64 addr;
+
+ if (gen >= 8)
+ len = offset & 7 ? 8 : 5;
+ else if (gen >= 4)
+ len = 4;
+ else
+ len = 3;
+
+ batch = reloc_gpu(eb, vma, len);
+ if (IS_ERR(batch))
+ return false;
+
+ addr = gen8_canonical_addr(vma->node.start + offset);
+ if (gen >= 8) {
+ if (offset & 7) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = 0;
- *batch++ = addr;
- *batch++ = target_offset;
- } else if (gen >= 4) {
- *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *batch++ = 0;
- *batch++ = addr;
- *batch++ = target_offset;
+ *batch++ = lower_32_bits(addr);
+ *batch++ = upper_32_bits(addr);
+ *batch++ = lower_32_bits(target_addr);
+
+ addr = gen8_canonical_addr(addr + 4);
+
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(addr);
+ *batch++ = upper_32_bits(addr);
+ *batch++ = upper_32_bits(target_addr);
} else {
- *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *batch++ = addr;
- *batch++ = target_offset;
+ *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
+ *batch++ = lower_32_bits(addr);
+ *batch++ = upper_32_bits(addr);
+ *batch++ = lower_32_bits(target_addr);
+ *batch++ = upper_32_bits(target_addr);
}
-
- goto out;
+ } else if (gen >= 6) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = addr;
+ *batch++ = target_addr;
+ } else if (IS_I965G(eb->i915)) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = vma_phys_addr(vma, offset);
+ *batch++ = target_addr;
+ } else if (gen >= 4) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *batch++ = 0;
+ *batch++ = addr;
+ *batch++ = target_addr;
+ } else if (gen >= 3 &&
+ !(IS_I915G(eb->i915) || IS_I915GM(eb->i915))) {
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *batch++ = addr;
+ *batch++ = target_addr;
+ } else {
+ *batch++ = MI_STORE_DWORD_IMM;
+ *batch++ = vma_phys_addr(vma, offset);
+ *batch++ = target_addr;
}
+ return true;
+}
+
+static bool reloc_entry_gpu(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
+ u64 offset,
+ u64 target_addr)
+{
+ if (eb->reloc_cache.vaddr)
+ return false;
+
+ if (!use_reloc_gpu(vma))
+ return false;
+
+ return __reloc_entry_gpu(eb, vma, offset, target_addr);
+}
+
+static u64
+relocate_entry(struct i915_vma *vma,
+ const struct drm_i915_gem_relocation_entry *reloc,
+ struct i915_execbuffer *eb,
+ const struct i915_vma *target)
+{
+ u64 target_addr = relocation_target(reloc, target);
+ u64 offset = reloc->offset;
+
+ if (!reloc_entry_gpu(eb, vma, offset, target_addr)) {
+ bool wide = eb->reloc_cache.use_64bit_reloc;
+ void *vaddr;
+
repeat:
- vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
+ vaddr = reloc_vaddr(vma->obj,
+ &eb->reloc_cache,
+ offset >> PAGE_SHIFT);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
- clflush_write32(vaddr + offset_in_page(offset),
- lower_32_bits(target_offset),
- eb->reloc_cache.vaddr);
+ GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
+ clflush_write32(vaddr + offset_in_page(offset),
+ lower_32_bits(target_addr),
+ eb->reloc_cache.vaddr);
- if (wide) {
- offset += sizeof(u32);
- target_offset >>= 32;
- wide = false;
- goto repeat;
+ if (wide) {
+ offset += sizeof(u32);
+ target_addr >>= 32;
+ wide = false;
+ goto repeat;
+ }
}
-out:
return target->node.start | UPDATE;
}
@@ -1411,12 +1676,11 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
- struct drm_i915_gem_relocation_entry __user *urelocs;
const struct drm_i915_gem_exec_object2 *entry = ev->exec;
- unsigned int remain;
+ struct drm_i915_gem_relocation_entry __user *urelocs =
+ u64_to_user_ptr(entry->relocs_ptr);
+ unsigned long remain = entry->relocation_count;
- urelocs = u64_to_user_ptr(entry->relocs_ptr);
- remain = entry->relocation_count;
if (unlikely(remain > N_RELOC(ULONG_MAX)))
return -EINVAL;
@@ -1425,13 +1689,13 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* to read. However, if the array is not writable the user loses
* the updated relocation values.
*/
- if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
+ if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs))))
return -EFAULT;
do {
struct drm_i915_gem_relocation_entry *r = stack;
unsigned int count =
- min_t(unsigned int, remain, ARRAY_SIZE(stack));
+ min_t(unsigned long, remain, ARRAY_SIZE(stack));
unsigned int copied;
/*
@@ -1494,9 +1758,7 @@ static int eb_relocate(struct i915_execbuffer *eb)
{
int err;
- mutex_lock(&eb->gem_context->mutex);
err = eb_lookup_vmas(eb);
- mutex_unlock(&eb->gem_context->mutex);
if (err)
return err;
@@ -1509,15 +1771,20 @@ static int eb_relocate(struct i915_execbuffer *eb)
/* The objects are in their final locations, apply the relocations. */
if (eb->args->flags & __EXEC_HAS_RELOC) {
struct eb_vma *ev;
+ int flush;
list_for_each_entry(ev, &eb->relocs, reloc_link) {
err = eb_relocate_vma(eb, ev);
if (err)
- return err;
+ break;
}
+
+ flush = reloc_gpu_flush(&eb->reloc_cache);
+ if (!err)
+ err = flush;
}
- return 0;
+ return err;
}
static int eb_move_to_gpu(struct i915_execbuffer *eb)
@@ -1597,19 +1864,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
err = i915_vma_move_to_active(vma, eb->request, flags);
i915_vma_unlock(vma);
-
- __eb_unreserve_vma(vma, flags);
- i915_vma_put(vma);
-
- ev->vma = NULL;
+ eb_unreserve_vma(ev);
}
ww_acquire_fini(&acquire);
+ eb_vma_array_put(fetch_and_zero(&eb->array));
+
if (unlikely(err))
goto err_skip;
- eb->exec = NULL;
-
/* Unconditionally flush any chipset caches (for streaming writes). */
intel_gt_chipset_flush(eb->engine->gt);
return 0;
@@ -1725,6 +1988,38 @@ static const struct dma_fence_work_ops eb_parse_ops = {
.release = __eb_parse_release,
};
+static inline int
+__parser_mark_active(struct i915_vma *vma,
+ struct intel_timeline *tl,
+ struct dma_fence *fence)
+{
+ struct intel_gt_buffer_pool_node *node = vma->private;
+
+ return i915_active_ref(&node->active, tl, fence);
+}
+
+static int
+parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
+{
+ int err;
+
+ mutex_lock(&tl->mutex);
+
+ err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
+ if (err)
+ goto unlock;
+
+ if (pw->trampoline) {
+ err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
+ if (err)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&tl->mutex);
+ return err;
+}
+
static int eb_parse_pipeline(struct i915_execbuffer *eb,
struct i915_vma *shadow,
struct i915_vma *trampoline)
@@ -1759,20 +2054,25 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
pw->shadow = shadow;
pw->trampoline = trampoline;
+ /* Mark active refs early for this worker, in case we get interrupted */
+ err = parser_mark_active(pw, eb->context->timeline);
+ if (err)
+ goto err_commit;
+
err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
if (err)
- goto err_trampoline;
+ goto err_commit;
err = dma_resv_reserve_shared(pw->batch->resv, 1);
if (err)
- goto err_batch_unlock;
+ goto err_commit_unlock;
/* Wait for all writes (and relocs) into the batch to complete */
err = i915_sw_fence_await_reservation(&pw->base.chain,
pw->batch->resv, NULL, false,
0, I915_FENCE_GFP);
if (err < 0)
- goto err_batch_unlock;
+ goto err_commit_unlock;
/* Keep the batch alive and unwritten as we parse */
dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
@@ -1784,14 +2084,16 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
dma_resv_unlock(shadow->resv);
- dma_fence_work_commit(&pw->base);
+ dma_fence_work_commit_imm(&pw->base);
return 0;
-err_batch_unlock:
+err_commit_unlock:
dma_resv_unlock(pw->batch->resv);
-err_trampoline:
- if (trampoline)
- i915_active_release(&trampoline->active);
+err_commit:
+ i915_sw_fence_set_error_once(&pw->base.chain, err);
+ dma_fence_work_commit_imm(&pw->base);
+ return err;
+
err_shadow:
i915_active_release(&shadow->active);
err_batch:
@@ -1804,7 +2106,7 @@ err_free:
static int eb_parse(struct i915_execbuffer *eb)
{
struct drm_i915_private *i915 = eb->i915;
- struct intel_engine_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool;
struct i915_vma *shadow, *trampoline;
unsigned int len;
int err;
@@ -1827,7 +2129,7 @@ static int eb_parse(struct i915_execbuffer *eb)
len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
}
- pool = intel_engine_get_pool(eb->engine, len);
+ pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
if (IS_ERR(pool))
return PTR_ERR(pool);
@@ -1837,6 +2139,7 @@ static int eb_parse(struct i915_execbuffer *eb)
goto err;
}
i915_gem_object_set_readonly(shadow->obj);
+ shadow->private = pool;
trampoline = NULL;
if (CMDPARSER_USES_GGTT(eb->i915)) {
@@ -1850,6 +2153,7 @@ static int eb_parse(struct i915_execbuffer *eb)
shadow = trampoline;
goto err_shadow;
}
+ shadow->private = pool;
eb->batch_flags |= I915_DISPATCH_SECURE;
}
@@ -1861,11 +2165,11 @@ static int eb_parse(struct i915_execbuffer *eb)
eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
eb->batch = &eb->vma[eb->buffer_count++];
+ eb->vma[eb->buffer_count].vma = NULL;
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
- shadow->private = pool;
return 0;
err_trampoline:
@@ -1874,7 +2178,7 @@ err_trampoline:
err_shadow:
i915_vma_unpin(shadow);
err:
- intel_engine_pool_put(pool);
+ intel_gt_buffer_pool_put(pool);
return err;
}
@@ -2318,39 +2622,13 @@ static void eb_request_add(struct i915_execbuffer *eb)
/* Check that the context wasn't destroyed before submission */
if (likely(!intel_context_is_closed(eb->context))) {
attr = eb->gem_context->sched;
-
- /*
- * Boost actual workloads past semaphores!
- *
- * With semaphores we spin on one engine waiting for another,
- * simply to reduce the latency of starting our work when
- * the signaler completes. However, if there is any other
- * work that we could be doing on this engine instead, that
- * is better utilisation and will reduce the overall duration
- * of the current work. To avoid PI boosting a semaphore
- * far in the distance past over useful work, we keep a history
- * of any semaphore use along our dependency chain.
- */
- if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
- attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
- /*
- * Boost priorities to new clients (new request flows).
- *
- * Allow interactive/synchronous clients to jump ahead of
- * the bulk clients. (FQ_CODEL)
- */
- if (list_empty(&rq->sched.signalers_list))
- attr.priority |= I915_PRIORITY_WAIT;
} else {
/* Serialise with context_close via the add_to_timeline */
i915_request_set_error_once(rq, -ENOENT);
__i915_request_skip(rq);
}
- local_bh_disable();
__i915_request_queue(rq, &attr);
- local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
/* Try to clean up the client's timeline after submitting the request */
if (prev)
@@ -2369,7 +2647,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_i915_private *i915 = to_i915(dev);
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
- struct dma_fence *exec_fence = NULL;
struct sync_file *out_fence = NULL;
struct i915_vma *batch;
int out_fence_fd = -1;
@@ -2386,8 +2663,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
- eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
- eb.vma[0].vma = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
reloc_cache_init(&eb.reloc_cache, eb.i915);
@@ -2414,30 +2689,22 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (args->flags & I915_EXEC_IS_PINNED)
eb.batch_flags |= I915_DISPATCH_PINNED;
- if (args->flags & I915_EXEC_FENCE_IN) {
+#define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT)
+ if (args->flags & IN_FENCES) {
+ if ((args->flags & IN_FENCES) == IN_FENCES)
+ return -EINVAL;
+
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
if (!in_fence)
return -EINVAL;
}
-
- if (args->flags & I915_EXEC_FENCE_SUBMIT) {
- if (in_fence) {
- err = -EINVAL;
- goto err_in_fence;
- }
-
- exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
- if (!exec_fence) {
- err = -EINVAL;
- goto err_in_fence;
- }
- }
+#undef IN_FENCES
if (args->flags & I915_EXEC_FENCE_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
err = out_fence_fd;
- goto err_exec_fence;
+ goto err_in_fence;
}
}
@@ -2528,14 +2795,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
}
if (in_fence) {
- err = i915_request_await_dma_fence(eb.request, in_fence);
- if (err < 0)
- goto err_request;
- }
-
- if (exec_fence) {
- err = i915_request_await_execution(eb.request, exec_fence,
- eb.engine->bond_execute);
+ if (args->flags & I915_EXEC_FENCE_SUBMIT)
+ err = i915_request_await_execution(eb.request,
+ in_fence,
+ eb.engine->bond_execute);
+ else
+ err = i915_request_await_dma_fence(eb.request,
+ in_fence);
if (err < 0)
goto err_request;
}
@@ -2563,7 +2829,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
*/
eb.request->batch = batch;
if (batch->private)
- intel_engine_pool_mark_active(batch->private, eb.request);
+ intel_gt_buffer_pool_mark_active(batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb, batch);
@@ -2592,10 +2858,8 @@ err_batch_unpin:
i915_vma_unpin(batch);
err_parse:
if (batch->private)
- intel_engine_pool_put(batch->private);
+ intel_gt_buffer_pool_put(batch->private);
err_vma:
- if (eb.exec)
- eb_release_vmas(&eb);
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
eb_unpin_engine(&eb);
@@ -2606,8 +2870,6 @@ err_destroy:
err_out_fence:
if (out_fence_fd != -1)
put_unused_fd(out_fence_fd);
-err_exec_fence:
- dma_fence_put(exec_fence);
err_in_fence:
dma_fence_put(in_fence);
return err;
@@ -2615,7 +2877,7 @@ err_in_fence:
static size_t eb_element_size(void)
{
- return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
+ return sizeof(struct drm_i915_gem_exec_object2);
}
static bool check_buffer_count(size_t count)
@@ -2671,7 +2933,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
/* Copy in the exec list from userland */
exec_list = kvmalloc_array(count, sizeof(*exec_list),
__GFP_NOWARN | GFP_KERNEL);
- exec2_list = kvmalloc_array(count + 1, eb_element_size(),
+ exec2_list = kvmalloc_array(count, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
drm_dbg(&i915->drm,
@@ -2749,8 +3011,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
if (err)
return err;
- /* Allocate an extra slot for use by the command parser */
- exec2_list = kvmalloc_array(count + 1, eb_element_size(),
+ exec2_list = kvmalloc_array(count, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
@@ -2794,7 +3055,8 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
* And this range already got effectively checked earlier
* when we did the "copy_from_user()" above.
*/
- if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
+ if (!user_write_access_begin(user_exec_list,
+ count * sizeof(*user_exec_list)))
goto end;
for (i = 0; i < args->buffer_count; i++) {
@@ -2808,7 +3070,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
end_user);
}
end_user:
- user_access_end();
+ user_write_access_end();
end:;
}
@@ -2817,3 +3079,7 @@ end:;
kvfree(exec2_list);
return err;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_execbuffer.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
index 2f6100ec2608..8ab842c80f99 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
@@ -72,8 +72,8 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
0, 0);
if (i915_sw_fence_await_reservation(&stub->chain,
- obj->base.resv, NULL,
- true, I915_FENCE_TIMEOUT,
+ obj->base.resv, NULL, true,
+ i915_fence_timeout(to_i915(obj->base.dev)),
I915_FENCE_GFP) < 0)
goto err;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index b39c24dae64e..fe45bd4d63a5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -93,7 +93,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mmap_write_lock_killable(mm)) {
addr = -EINTR;
goto err;
}
@@ -103,7 +103,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
addr = -ENOMEM;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (IS_ERR_VALUE(addr))
goto err;
}
@@ -396,6 +396,38 @@ err:
return i915_error_to_vmf_fault(ret);
}
+static int
+vm_access(struct vm_area_struct *area, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct i915_mmap_offset *mmo = area->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+ void *vaddr;
+
+ if (i915_gem_object_is_readonly(obj) && write)
+ return -EACCES;
+
+ addr -= area->vm_start;
+ if (addr >= obj->base.size)
+ return -EINVAL;
+
+ /* As this is primarily for debugging, let's focus on simplicity */
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ if (write) {
+ memcpy(vaddr + addr, buf, len);
+ __i915_gem_object_flush_map(obj, addr, len);
+ } else {
+ memcpy(buf, vaddr + addr, len);
+ }
+
+ i915_gem_object_unpin_map(obj);
+
+ return len;
+}
+
void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
@@ -745,12 +777,14 @@ static void vm_close(struct vm_area_struct *vma)
static const struct vm_operations_struct vm_ops_gtt = {
.fault = vm_fault_gtt,
+ .access = vm_access,
.open = vm_open,
.close = vm_close,
};
static const struct vm_operations_struct vm_ops_cpu = {
.fault = vm_fault_cpu,
+ .access = vm_access,
.open = vm_open,
.close = vm_close,
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 5da9f9e534b9..99356c00c19e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -135,9 +135,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
if (vma) {
GEM_BUG_ON(vma->obj != obj);
GEM_BUG_ON(!atomic_read(&vma->open_count));
- if (atomic_dec_and_test(&vma->open_count) &&
- !i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
+ i915_vma_close(vma);
}
mutex_unlock(&ctx->mutex);
@@ -164,9 +162,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
- intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_mmap_offset *mmo, *mn;
@@ -206,7 +202,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
}
obj->mmo.offsets = RB_ROOT;
- GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
@@ -227,7 +222,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
cond_resched();
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index e00792158f13..f457d7130491 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -6,8 +6,8 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
-#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_ring.h"
#include "i915_gem_clflush.h"
#include "i915_gem_object_blt.h"
@@ -18,7 +18,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
{
struct drm_i915_private *i915 = ce->vm->i915;
const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
- struct intel_engine_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool;
struct i915_vma *batch;
u64 offset;
u64 count;
@@ -33,7 +33,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
count = div_u64(round_up(vma->size, block_size), block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_engine_get_pool(ce->engine, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -78,10 +78,12 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
} while (rem);
*cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(ce->vm->gt);
+ i915_gem_object_flush_map(pool->obj);
i915_gem_object_unpin_map(pool->obj);
+ intel_gt_chipset_flush(ce->vm->gt);
+
batch = i915_vma_instance(pool->obj, ce->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@@ -96,7 +98,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
return batch;
out_put:
- intel_engine_pool_put(pool);
+ intel_gt_buffer_pool_put(pool);
out_pm:
intel_engine_pm_put(ce->engine);
return ERR_PTR(err);
@@ -114,13 +116,13 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
if (unlikely(err))
return err;
- return intel_engine_pool_mark_active(vma->private, rq);
+ return intel_gt_buffer_pool_mark_active(vma->private, rq);
}
void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
{
i915_vma_unpin(vma);
- intel_engine_pool_put(vma->private);
+ intel_gt_buffer_pool_put(vma->private);
intel_engine_pm_put(ce->engine);
}
@@ -213,7 +215,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
{
struct drm_i915_private *i915 = ce->vm->i915;
const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
- struct intel_engine_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool;
struct i915_vma *batch;
u64 src_offset, dst_offset;
u64 count, rem;
@@ -228,7 +230,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
count = div_u64(round_up(dst->size, block_size), block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_engine_get_pool(ce->engine, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -289,10 +291,12 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
} while (rem);
*cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(ce->vm->gt);
+ i915_gem_object_flush_map(pool->obj);
i915_gem_object_unpin_map(pool->obj);
+ intel_gt_chipset_flush(ce->vm->gt);
+
batch = i915_vma_instance(pool->obj, ce->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@@ -307,7 +311,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
return batch;
out_put:
- intel_engine_pool_put(pool);
+ intel_gt_buffer_pool_put(pool);
out_pm:
intel_engine_pm_put(ce->engine);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
index 243a43a87824..8bcd336a90dc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
@@ -10,7 +10,6 @@
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
-#include "gt/intel_engine_pool.h"
#include "i915_vma.h"
struct drm_i915_gem_object;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index a0b10bcd8d8a..54ee658bb168 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -179,9 +179,6 @@ struct drm_i915_gem_object {
#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
#define STRIDE_MASK (~TILING_MASK)
- /** Count of VMA actually bound by this object */
- atomic_t bind_count;
-
struct {
/*
* Protects the pages and their use. Do not use directly, but
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 24f4cadea114..af9e48ee4a33 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -199,8 +199,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY;
- GEM_BUG_ON(atomic_read(&obj->bind_count));
-
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock(&obj->mm.lock);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
@@ -393,6 +391,7 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
offset, size, obj->base.size));
+ wmb(); /* let all previous writes be visible to coherent partners */
obj->mm.dirty = true;
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 698e22420dc5..7fe9831aa9ba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -10,8 +10,6 @@
#include <drm/drm.h> /* for drm_legacy.h! */
#include <drm/drm_cache.h>
-#include <drm/drm_legacy.h> /* for drm_pci.h! */
-#include <drm/drm_pci.h>
#include "gt/intel_gt.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 5d5d7eef3f43..7aff3514d97a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -39,7 +39,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment = i915_sg_segment_size();
unsigned int sg_page_sizes;
- struct pagevec pvec;
gfp_t noreclaim;
int ret;
@@ -192,13 +191,17 @@ err_sg:
sg_mark_end(sg);
err_pages:
mapping_clear_unevictable(mapping);
- pagevec_init(&pvec);
- for_each_sgt_page(page, sgt_iter, st) {
- if (!pagevec_add(&pvec, page))
+ if (sg != st->sgl) {
+ struct pagevec pvec;
+
+ pagevec_init(&pvec);
+ for_each_sgt_page(page, sgt_iter, st) {
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
check_release_pagevec(&pvec);
}
- if (pagevec_count(&pvec))
- check_release_pagevec(&pvec);
sg_free_table(st);
kfree(st);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 03e5eb4c99d1..5b65ce738b16 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -27,18 +27,6 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
return false;
/*
- * Only report true if by unbinding the object and putting its pages
- * we can actually make forward progress towards freeing physical
- * pages.
- *
- * If the pages are pinned for any other reason than being bound
- * to the GPU, simply unbinding from the GPU is not going to succeed
- * in releasing our pin count on the pages themselves.
- */
- if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
- return false;
-
- /*
* We can only return physical pages to the system if we can either
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
@@ -54,6 +42,8 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
flags = 0;
if (shrink & I915_SHRINK_ACTIVE)
flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
+ if (!(shrink & I915_SHRINK_BOUND))
+ flags = I915_GEM_OBJECT_UNBIND_TEST;
if (i915_gem_object_unbind(obj, flags) == 0)
__i915_gem_object_put_pages(obj);
@@ -194,10 +184,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
i915_gem_object_is_framebuffer(obj))
continue;
- if (!(shrink & I915_SHRINK_BOUND) &&
- atomic_read(&obj->bind_count))
- continue;
-
if (!can_release_pages(obj))
continue;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 5557dfa83a7b..dc250278bd2c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -381,14 +381,14 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
mutex_init(&i915->mm.stolen_lock);
if (intel_vgpu_active(i915)) {
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"%s, disabling use of stolen memory\n",
"iGVT-g active");
return 0;
}
if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"%s, disabling use of stolen memory\n",
"DMAR active");
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 7ffd7afeb7a5..c31a6744daee 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -200,10 +200,10 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
if (IS_ERR(mn))
err = PTR_ERR(mn);
- down_write(&mm->mm->mmap_sem);
+ mmap_write_lock(mm->mm);
mutex_lock(&mm->i915->mm_lock);
if (mm->mn == NULL && !err) {
- /* Protected by mmap_sem (write-lock) */
+ /* Protected by mmap_lock (write-lock) */
err = __mmu_notifier_register(&mn->mn, mm->mm);
if (!err) {
/* Protected by mm_lock */
@@ -217,7 +217,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
err = 0;
}
mutex_unlock(&mm->i915->mm_lock);
- up_write(&mm->mm->mmap_sem);
+ mmap_write_unlock(mm->mm);
if (mn && !IS_ERR(mn))
kfree(mn);
@@ -468,10 +468,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (mmget_not_zero(mm)) {
while (pinned < npages) {
if (!locked) {
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
locked = 1;
}
- ret = get_user_pages_remote
+ ret = pin_user_pages_remote
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
@@ -483,7 +483,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pinned += ret;
}
if (locked)
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
}
@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
mutex_unlock(&obj->mm.lock);
- release_pages(pvec, pinned);
+ unpin_user_pages(pvec, pinned);
kvfree(pvec);
i915_gem_object_put(obj);
@@ -522,8 +522,8 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
/* Spawn a worker so that we can acquire the
* user pages without holding our mutex. Access
- * to the user pages requires mmap_sem, and we have
- * a strict lock ordering of mmap_sem, struct_mutex -
+ * to the user pages requires mmap_lock, and we have
+ * a strict lock ordering of mmap_lock, struct_mutex -
* we already hold struct_mutex here and so cannot
* call gup without encountering a lock inversion.
*
@@ -564,6 +564,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
struct sg_table *pages;
bool active;
int pinned;
+ unsigned int gup_flags = 0;
/* If userspace should engineer that these pages are replaced in
* the vma between us binding this page into the GTT and completion
@@ -598,11 +599,22 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
GFP_KERNEL |
__GFP_NORETRY |
__GFP_NOWARN);
- if (pvec) /* defer to worker if malloc fails */
- pinned = __get_user_pages_fast(obj->userptr.ptr,
- num_pages,
- !i915_gem_object_is_readonly(obj),
- pvec);
+ /*
+ * Using __get_user_pages_fast() with a read-only
+ * access is questionable. A read-only page may be
+ * COW-broken, and then this might end up giving
+ * the wrong side of the COW..
+ *
+ * We may or may not care.
+ */
+ if (pvec) {
+ /* defer to worker if malloc fails */
+ if (!i915_gem_object_is_readonly(obj))
+ gup_flags |= FOLL_WRITE;
+ pinned = pin_user_pages_fast_only(obj->userptr.ptr,
+ num_pages, gup_flags,
+ pvec);
+ }
}
active = false;
@@ -620,7 +632,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
__i915_gem_userptr_set_active(obj, true);
if (IS_ERR(pages))
- release_pages(pvec, pinned);
+ unpin_user_pages(pvec, pinned);
kvfree(pvec);
return PTR_ERR_OR_ZERO(pages);
@@ -675,7 +687,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
}
mark_page_accessed(page);
- put_page(page);
+ unpin_user_page(page);
}
obj->mm.dirty = false;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index fa16f2c3f3ac..2b46c6530da9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -88,8 +88,7 @@ static void huge_put_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops huge_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = huge_get_pages,
.put_pages = huge_put_pages,
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index d4f94ca9ae0d..c9988b6d5c88 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -421,7 +421,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- goto out_close;
+ goto out_put;
err = igt_check_page_sizes(vma);
@@ -432,8 +432,6 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
}
i915_vma_unpin(vma);
- i915_vma_close(vma);
-
i915_gem_object_put(obj);
if (err)
@@ -443,8 +441,6 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
goto out_device;
-out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_device:
@@ -492,7 +488,7 @@ static int igt_mock_memory_region_huge_pages(void *arg)
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- goto out_close;
+ goto out_put;
err = igt_check_page_sizes(vma);
if (err)
@@ -515,8 +511,6 @@ static int igt_mock_memory_region_huge_pages(void *arg)
}
i915_vma_unpin(vma);
- i915_vma_close(vma);
-
__i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
@@ -526,8 +520,6 @@ static int igt_mock_memory_region_huge_pages(void *arg)
out_unpin:
i915_vma_unpin(vma);
-out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_region:
@@ -587,10 +579,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
}
err = i915_vma_pin(vma, 0, 0, flags);
- if (err) {
- i915_vma_close(vma);
+ if (err)
goto out_unpin;
- }
err = igt_check_page_sizes(vma);
@@ -603,10 +593,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_unpin(vma);
- if (err) {
- i915_vma_close(vma);
+ if (err)
goto out_unpin;
- }
/*
* Try all the other valid offsets until the next
@@ -615,16 +603,12 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
*/
for (offset = 4096; offset < page_size; offset += 4096) {
err = i915_vma_unbind(vma);
- if (err) {
- i915_vma_close(vma);
+ if (err)
goto out_unpin;
- }
err = i915_vma_pin(vma, 0, 0, flags | offset);
- if (err) {
- i915_vma_close(vma);
+ if (err)
goto out_unpin;
- }
err = igt_check_page_sizes(vma);
@@ -636,10 +620,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_unpin(vma);
- if (err) {
- i915_vma_close(vma);
+ if (err)
goto out_unpin;
- }
if (igt_timeout(end_time,
"%s timed out at offset %x with page-size %x\n",
@@ -647,8 +629,6 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
break;
}
- i915_vma_close(vma);
-
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
@@ -670,12 +650,6 @@ static void close_object_list(struct list_head *objects,
struct drm_i915_gem_object *obj, *on;
list_for_each_entry_safe(obj, on, objects, st_link) {
- struct i915_vma *vma;
-
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
- if (!IS_ERR(vma))
- i915_vma_close(vma);
-
list_del(&obj->st_link);
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
@@ -912,7 +886,7 @@ static int igt_mock_ppgtt_64K(void *arg)
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
- goto out_vma_close;
+ goto out_object_unpin;
err = igt_check_page_sizes(vma);
if (err)
@@ -945,8 +919,6 @@ static int igt_mock_ppgtt_64K(void *arg)
}
i915_vma_unpin(vma);
- i915_vma_close(vma);
-
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
@@ -957,8 +929,6 @@ static int igt_mock_ppgtt_64K(void *arg)
out_vma_unpin:
i915_vma_unpin(vma);
-out_vma_close:
- i915_vma_close(vma);
out_object_unpin:
i915_gem_object_unpin_pages(obj);
out_object_put:
@@ -1070,7 +1040,7 @@ static int __igt_write_huge(struct intel_context *ce,
err = i915_vma_unbind(vma);
if (err)
- goto out_vma_close;
+ return err;
err = i915_vma_pin(vma, size, 0, flags | offset);
if (err) {
@@ -1081,7 +1051,7 @@ static int __igt_write_huge(struct intel_context *ce,
if (err == -ENOSPC && i915_is_ggtt(ce->vm))
err = 0;
- goto out_vma_close;
+ return err;
}
err = igt_check_page_sizes(vma);
@@ -1102,8 +1072,6 @@ static int __igt_write_huge(struct intel_context *ce,
out_vma_unpin:
i915_vma_unpin(vma);
-out_vma_close:
- __i915_vma_put(vma);
return err;
}
@@ -1490,7 +1458,7 @@ static int igt_ppgtt_pin_update(void *arg)
err = i915_vma_pin(vma, SZ_2M, 0, flags);
if (err)
- goto out_close;
+ goto out_put;
if (vma->page_sizes.sg < page_size) {
pr_info("Unable to allocate page-size %x, finishing test early\n",
@@ -1527,8 +1495,6 @@ static int igt_ppgtt_pin_update(void *arg)
goto out_unpin;
i915_vma_unpin(vma);
- i915_vma_close(vma);
-
i915_gem_object_put(obj);
}
@@ -1546,7 +1512,7 @@ static int igt_ppgtt_pin_update(void *arg)
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
- goto out_close;
+ goto out_put;
/*
* Make sure we don't end up with something like where the pde is still
@@ -1576,8 +1542,6 @@ static int igt_ppgtt_pin_update(void *arg)
out_unpin:
i915_vma_unpin(vma);
-out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_vm:
@@ -1629,13 +1593,11 @@ static int igt_tmpfs_fallback(void *arg)
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- goto out_close;
+ goto out_put;
err = igt_check_page_sizes(vma);
i915_vma_unpin(vma);
-out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_restore:
@@ -1682,7 +1644,7 @@ static int igt_shrink_thp(void *arg)
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
- goto out_close;
+ goto out_put;
if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
pr_info("failed to allocate THP, finishing test early\n");
@@ -1706,7 +1668,7 @@ static int igt_shrink_thp(void *arg)
i915_gem_context_unlock_engines(ctx);
i915_vma_unpin(vma);
if (err)
- goto out_close;
+ goto out_put;
/*
* Now that the pages are *unpinned* shrink-all should invoke
@@ -1716,18 +1678,18 @@ static int igt_shrink_thp(void *arg)
if (i915_gem_object_has_pages(obj)) {
pr_err("shrink-all didn't truncate the pages\n");
err = -EINVAL;
- goto out_close;
+ goto out_put;
}
if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
pr_err("residual page-size bits left\n");
err = -EINVAL;
- goto out_close;
+ goto out_put;
}
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
- goto out_close;
+ goto out_put;
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
@@ -1737,8 +1699,6 @@ static int igt_shrink_thp(void *arg)
out_unpin:
i915_vma_unpin(vma);
-out_close:
- i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
out_vm:
@@ -1777,21 +1737,20 @@ int i915_gem_huge_page_mock_selftests(void)
if (!i915_vm_is_4lvl(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n");
err = -EINVAL;
- goto out_close;
+ goto out_put;
}
/* If we were ever hit this then it's time to mock the 64K scratch */
if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
pr_err("PPGTT missing 64K scratch page\n");
err = -EINVAL;
- goto out_close;
+ goto out_put;
}
err = i915_subtests(tests, ppgtt);
-out_close:
+out_put:
i915_vm_put(&ppgtt->vm);
-
out_unlock:
drm_dev_put(&dev_priv->drm);
return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index b972be165e85..8fe3ad2ee34e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -7,9 +7,12 @@
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gpu_commands.h"
+#include "gem/i915_gem_lmem.h"
#include "selftests/igt_flush_test.h"
#include "selftests/mock_drm.h"
+#include "selftests/i915_random.h"
#include "huge_gem_object.h"
#include "mock_context.h"
@@ -127,10 +130,573 @@ static int igt_client_fill(void *arg)
} while (1);
}
+#define WIDTH 512
+#define HEIGHT 32
+
+struct blit_buffer {
+ struct i915_vma *vma;
+ u32 start_val;
+ u32 tiling;
+};
+
+struct tiled_blits {
+ struct intel_context *ce;
+ struct blit_buffer buffers[3];
+ struct blit_buffer scratch;
+ struct i915_vma *batch;
+ u64 hole;
+ u32 width;
+ u32 height;
+};
+
+static int prepare_blit(const struct tiled_blits *t,
+ struct blit_buffer *dst,
+ struct blit_buffer *src,
+ struct drm_i915_gem_object *batch)
+{
+ const int gen = INTEL_GEN(to_i915(batch->base.dev));
+ bool use_64b_reloc = gen >= 8;
+ u32 src_pitch, dst_pitch;
+ u32 cmd, *cs;
+
+ cs = i915_gem_object_pin_map(batch, I915_MAP_WC);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
+ cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
+ if (src->tiling == I915_TILING_Y)
+ cmd |= BCS_SRC_Y;
+ if (dst->tiling == I915_TILING_Y)
+ cmd |= BCS_DST_Y;
+ *cs++ = cmd;
+
+ cmd = MI_FLUSH_DW;
+ if (gen >= 8)
+ cmd++;
+ *cs++ = cmd;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
+ if (gen >= 8)
+ cmd += 2;
+
+ src_pitch = t->width * 4;
+ if (src->tiling) {
+ cmd |= XY_SRC_COPY_BLT_SRC_TILED;
+ src_pitch /= 4;
+ }
+
+ dst_pitch = t->width * 4;
+ if (dst->tiling) {
+ cmd |= XY_SRC_COPY_BLT_DST_TILED;
+ dst_pitch /= 4;
+ }
+
+ *cs++ = cmd;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
+ *cs++ = 0;
+ *cs++ = t->height << 16 | t->width;
+ *cs++ = lower_32_bits(dst->vma->node.start);
+ if (use_64b_reloc)
+ *cs++ = upper_32_bits(dst->vma->node.start);
+ *cs++ = 0;
+ *cs++ = src_pitch;
+ *cs++ = lower_32_bits(src->vma->node.start);
+ if (use_64b_reloc)
+ *cs++ = upper_32_bits(src->vma->node.start);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch);
+ i915_gem_object_unpin_map(batch);
+
+ return 0;
+}
+
+static void tiled_blits_destroy_buffers(struct tiled_blits *t)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(t->buffers); i++)
+ i915_vma_put(t->buffers[i].vma);
+
+ i915_vma_put(t->scratch.vma);
+ i915_vma_put(t->batch);
+}
+
+static struct i915_vma *
+__create_vma(struct tiled_blits *t, size_t size, bool lmem)
+{
+ struct drm_i915_private *i915 = t->ce->vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ if (lmem)
+ obj = i915_gem_object_create_lmem(i915, size, 0);
+ else
+ obj = i915_gem_object_create_shmem(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, t->ce->vm, NULL);
+ if (IS_ERR(vma))
+ i915_gem_object_put(obj);
+
+ return vma;
+}
+
+static struct i915_vma *create_vma(struct tiled_blits *t, bool lmem)
+{
+ return __create_vma(t, PAGE_ALIGN(t->width * t->height * 4), lmem);
+}
+
+static int tiled_blits_create_buffers(struct tiled_blits *t,
+ int width, int height,
+ struct rnd_state *prng)
+{
+ struct drm_i915_private *i915 = t->ce->engine->i915;
+ int i;
+
+ t->width = width;
+ t->height = height;
+
+ t->batch = __create_vma(t, PAGE_SIZE, false);
+ if (IS_ERR(t->batch))
+ return PTR_ERR(t->batch);
+
+ t->scratch.vma = create_vma(t, false);
+ if (IS_ERR(t->scratch.vma)) {
+ i915_vma_put(t->batch);
+ return PTR_ERR(t->scratch.vma);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
+ struct i915_vma *vma;
+
+ vma = create_vma(t, HAS_LMEM(i915) && i % 2);
+ if (IS_ERR(vma)) {
+ tiled_blits_destroy_buffers(t);
+ return PTR_ERR(vma);
+ }
+
+ t->buffers[i].vma = vma;
+ t->buffers[i].tiling =
+ i915_prandom_u32_max_state(I915_TILING_Y + 1, prng);
+ }
+
+ return 0;
+}
+
+static void fill_scratch(struct tiled_blits *t, u32 *vaddr, u32 val)
+{
+ int i;
+
+ t->scratch.start_val = val;
+ for (i = 0; i < t->width * t->height; i++)
+ vaddr[i] = val++;
+
+ i915_gem_object_flush_map(t->scratch.vma->obj);
+}
+
+static u64 swizzle_bit(unsigned int bit, u64 offset)
+{
+ return (offset & BIT_ULL(bit)) >> (bit - 6);
+}
+
+static u64 tiled_offset(const struct intel_gt *gt,
+ u64 v,
+ unsigned int stride,
+ unsigned int tiling)
+{
+ unsigned int swizzle;
+ u64 x, y;
+
+ if (tiling == I915_TILING_NONE)
+ return v;
+
+ y = div64_u64_rem(v, stride, &x);
+
+ if (tiling == I915_TILING_X) {
+ v = div64_u64_rem(y, 8, &y) * stride * 8;
+ v += y * 512;
+ v += div64_u64_rem(x, 512, &x) << 12;
+ v += x;
+
+ swizzle = gt->ggtt->bit_6_swizzle_x;
+ } else {
+ const unsigned int ytile_span = 16;
+ const unsigned int ytile_height = 512;
+
+ v = div64_u64_rem(y, 32, &y) * stride * 32;
+ v += y * ytile_span;
+ v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
+ v += x;
+
+ swizzle = gt->ggtt->bit_6_swizzle_y;
+ }
+
+ switch (swizzle) {
+ case I915_BIT_6_SWIZZLE_9:
+ v ^= swizzle_bit(9, v);
+ break;
+ case I915_BIT_6_SWIZZLE_9_10:
+ v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
+ break;
+ case I915_BIT_6_SWIZZLE_9_11:
+ v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
+ break;
+ case I915_BIT_6_SWIZZLE_9_10_11:
+ v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
+ break;
+ }
+
+ return v;
+}
+
+static const char *repr_tiling(int tiling)
+{
+ switch (tiling) {
+ case I915_TILING_NONE: return "linear";
+ case I915_TILING_X: return "X";
+ case I915_TILING_Y: return "Y";
+ default: return "unknown";
+ }
+}
+
+static int verify_buffer(const struct tiled_blits *t,
+ struct blit_buffer *buf,
+ struct rnd_state *prng)
+{
+ const u32 *vaddr;
+ int ret = 0;
+ int x, y, p;
+
+ x = i915_prandom_u32_max_state(t->width, prng);
+ y = i915_prandom_u32_max_state(t->height, prng);
+ p = y * t->width + x;
+
+ vaddr = i915_gem_object_pin_map(buf->vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ if (vaddr[0] != buf->start_val) {
+ ret = -EINVAL;
+ } else {
+ u64 v = tiled_offset(buf->vma->vm->gt,
+ p * 4, t->width * 4,
+ buf->tiling);
+
+ if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p)
+ ret = -EINVAL;
+ }
+ if (ret) {
+ pr_err("Invalid %s tiling detected at (%d, %d), start_val %x\n",
+ repr_tiling(buf->tiling),
+ x, y, buf->start_val);
+ igt_hexdump(vaddr, 4096);
+ }
+
+ i915_gem_object_unpin_map(buf->vma->obj);
+ return ret;
+}
+
+static int move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, flags);
+ i915_vma_unlock(vma);
+
+ return err;
+}
+
+static int pin_buffer(struct i915_vma *vma, u64 addr)
+{
+ int err;
+
+ if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
+ err = i915_vma_unbind(vma);
+ if (err)
+ return err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED | addr);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+tiled_blit(struct tiled_blits *t,
+ struct blit_buffer *dst, u64 dst_addr,
+ struct blit_buffer *src, u64 src_addr)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = pin_buffer(src->vma, src_addr);
+ if (err) {
+ pr_err("Cannot pin src @ %llx\n", src_addr);
+ return err;
+ }
+
+ err = pin_buffer(dst->vma, dst_addr);
+ if (err) {
+ pr_err("Cannot pin dst @ %llx\n", dst_addr);
+ goto err_src;
+ }
+
+ err = i915_vma_pin(t->batch, 0, 0, PIN_USER | PIN_HIGH);
+ if (err) {
+ pr_err("cannot pin batch\n");
+ goto err_dst;
+ }
+
+ err = prepare_blit(t, dst, src, t->batch->obj);
+ if (err)
+ goto err_bb;
+
+ rq = intel_context_create_request(t->ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_bb;
+ }
+
+ err = move_to_active(t->batch, rq, 0);
+ if (!err)
+ err = move_to_active(src->vma, rq, 0);
+ if (!err)
+ err = move_to_active(dst->vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ t->batch->node.start,
+ t->batch->node.size,
+ 0);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 2) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ dst->start_val = src->start_val;
+err_bb:
+ i915_vma_unpin(t->batch);
+err_dst:
+ i915_vma_unpin(dst->vma);
+err_src:
+ i915_vma_unpin(src->vma);
+ return err;
+}
+
+static struct tiled_blits *
+tiled_blits_create(struct intel_engine_cs *engine, struct rnd_state *prng)
+{
+ struct drm_mm_node hole;
+ struct tiled_blits *t;
+ u64 hole_size;
+ int err;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return ERR_PTR(-ENOMEM);
+
+ t->ce = intel_context_create(engine);
+ if (IS_ERR(t->ce)) {
+ err = PTR_ERR(t->ce);
+ goto err_free;
+ }
+
+ hole_size = 2 * PAGE_ALIGN(WIDTH * HEIGHT * 4);
+ hole_size *= 2; /* room to maneuver */
+ hole_size += 2 * I915_GTT_MIN_ALIGNMENT;
+
+ mutex_lock(&t->ce->vm->mutex);
+ memset(&hole, 0, sizeof(hole));
+ err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole,
+ hole_size, 0, I915_COLOR_UNEVICTABLE,
+ 0, U64_MAX,
+ DRM_MM_INSERT_BEST);
+ if (!err)
+ drm_mm_remove_node(&hole);
+ mutex_unlock(&t->ce->vm->mutex);
+ if (err) {
+ err = -ENODEV;
+ goto err_put;
+ }
+
+ t->hole = hole.start + I915_GTT_MIN_ALIGNMENT;
+ pr_info("Using hole at %llx\n", t->hole);
+
+ err = tiled_blits_create_buffers(t, WIDTH, HEIGHT, prng);
+ if (err)
+ goto err_put;
+
+ return t;
+
+err_put:
+ intel_context_put(t->ce);
+err_free:
+ kfree(t);
+ return ERR_PTR(err);
+}
+
+static void tiled_blits_destroy(struct tiled_blits *t)
+{
+ tiled_blits_destroy_buffers(t);
+
+ intel_context_put(t->ce);
+ kfree(t);
+}
+
+static int tiled_blits_prepare(struct tiled_blits *t,
+ struct rnd_state *prng)
+{
+ u64 offset = PAGE_ALIGN(t->width * t->height * 4);
+ u32 *map;
+ int err;
+ int i;
+
+ map = i915_gem_object_pin_map(t->scratch.vma->obj, I915_MAP_WC);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ /* Use scratch to fill objects */
+ for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
+ fill_scratch(t, map, prandom_u32_state(prng));
+ GEM_BUG_ON(verify_buffer(t, &t->scratch, prng));
+
+ err = tiled_blit(t,
+ &t->buffers[i], t->hole + offset,
+ &t->scratch, t->hole);
+ if (err == 0)
+ err = verify_buffer(t, &t->buffers[i], prng);
+ if (err) {
+ pr_err("Failed to create buffer %d\n", i);
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(t->scratch.vma->obj);
+ return err;
+}
+
+static int tiled_blits_bounce(struct tiled_blits *t, struct rnd_state *prng)
+{
+ u64 offset =
+ round_up(t->width * t->height * 4, 2 * I915_GTT_MIN_ALIGNMENT);
+ int err;
+
+ /* We want to check position invariant tiling across GTT eviction */
+
+ err = tiled_blit(t,
+ &t->buffers[1], t->hole + offset / 2,
+ &t->buffers[0], t->hole + 2 * offset);
+ if (err)
+ return err;
+
+ /* Reposition so that we overlap the old addresses, and slightly off */
+ err = tiled_blit(t,
+ &t->buffers[2], t->hole + I915_GTT_MIN_ALIGNMENT,
+ &t->buffers[1], t->hole + 3 * offset / 2);
+ if (err)
+ return err;
+
+ err = verify_buffer(t, &t->buffers[2], prng);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int __igt_client_tiled_blits(struct intel_engine_cs *engine,
+ struct rnd_state *prng)
+{
+ struct tiled_blits *t;
+ int err;
+
+ t = tiled_blits_create(engine, prng);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+
+ err = tiled_blits_prepare(t, prng);
+ if (err)
+ goto out;
+
+ err = tiled_blits_bounce(t, prng);
+ if (err)
+ goto out;
+
+out:
+ tiled_blits_destroy(t);
+ return err;
+}
+
+static bool has_bit17_swizzle(int sw)
+{
+ return (sw == I915_BIT_6_SWIZZLE_9_10_17 ||
+ sw == I915_BIT_6_SWIZZLE_9_17);
+}
+
+static bool bad_swizzling(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ return true;
+
+ if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) ||
+ has_bit17_swizzle(ggtt->bit_6_swizzle_y))
+ return true;
+
+ return false;
+}
+
+static int igt_client_tiled_blits(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ I915_RND_STATE(prng);
+ int inst = 0;
+
+ /* Test requires explicit BLT tiling controls */
+ if (INTEL_GEN(i915) < 4)
+ return 0;
+
+ if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */
+ return 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ int err;
+
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ err = __igt_client_tiled_blits(engine, &prng);
+ if (err == -ENODEV)
+ err = 0;
+ if (err)
+ return err;
+ } while (1);
+}
+
int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_client_fill),
+ SUBTEST(igt_client_tiled_blits),
};
if (intel_gt_is_wedged(&i915->gt))
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 3f6079e1dfb6..87d7d8aa080f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -158,6 +158,8 @@ static int wc_set(struct context *ctx, unsigned long offset, u32 v)
return PTR_ERR(map);
map[offset / sizeof(*map)] = v;
+
+ __i915_gem_object_flush_map(ctx->obj, offset, sizeof(*map));
i915_gem_object_unpin_map(ctx->obj);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 54b86cf7f5d2..b81978890641 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -972,12 +972,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
goto err_batch;
}
- err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- 0);
- if (err)
- goto err_request;
-
i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
@@ -994,6 +988,18 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (err)
goto skip_request;
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto skip_request;
+ }
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
+ if (err)
+ goto skip_request;
+
i915_vma_unpin_and_release(&batch, 0);
i915_vma_unpin(vma);
@@ -1005,7 +1011,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
skip_request:
i915_request_set_error_once(rq, err);
-err_request:
i915_request_add(rq);
err_batch:
i915_vma_unpin_and_release(&batch, 0);
@@ -1541,10 +1546,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto err_unpin;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
- if (err)
- goto err_request;
-
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false);
if (err == 0)
@@ -1553,6 +1554,16 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto skip_request;
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto skip_request;
+ }
+
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ if (err)
+ goto skip_request;
+
i915_vma_unpin(vma);
i915_request_add(rq);
@@ -1560,7 +1571,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
i915_request_set_error_once(rq, err);
-err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
@@ -1674,10 +1684,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_unpin;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
- if (err)
- goto err_request;
-
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
@@ -1686,8 +1692,17 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err)
goto skip_request;
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto skip_request;
+ }
+
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
+ if (err)
+ goto skip_request;
+
i915_vma_unpin(vma);
- i915_vma_close(vma);
i915_request_add(rq);
@@ -1709,7 +1724,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
i915_request_set_error_once(rq, err);
-err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
@@ -1925,7 +1939,7 @@ static int mock_context_barrier(void *arg)
goto out;
}
- rq = igt_request_alloc(ctx, i915->engine[RCS0]);
+ rq = igt_request_alloc(ctx, i915->gt.engine[RCS0]);
if (IS_ERR(rq)) {
pr_err("Request allocation failed!\n");
goto out;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
new file mode 100644
index 000000000000..a49016f8ee0d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "gt/intel_engine_pm.h"
+#include "selftests/igt_flush_test.h"
+
+static u64 read_reloc(const u32 *map, int x, const u64 mask)
+{
+ u64 reloc;
+
+ memcpy(&reloc, &map[x], sizeof(reloc));
+ return reloc & mask;
+}
+
+static int __igt_gpu_reloc(struct i915_execbuffer *eb,
+ struct drm_i915_gem_object *obj)
+{
+ const unsigned int offsets[] = { 8, 3, 0 };
+ const u64 mask =
+ GENMASK_ULL(eb->reloc_cache.use_64bit_reloc ? 63 : 31, 0);
+ const u32 *map = page_mask_bits(obj->mm.mapping);
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ int err;
+ int i;
+
+ vma = i915_vma_instance(obj, eb->context->vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err)
+ return err;
+
+ /* 8-Byte aligned */
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[0] * sizeof(u32),
+ 0)) {
+ err = -EIO;
+ goto unpin_vma;
+ }
+
+ /* !8-Byte aligned */
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[1] * sizeof(u32),
+ 1)) {
+ err = -EIO;
+ goto unpin_vma;
+ }
+
+ /* Skip to the end of the cmd page */
+ i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
+ i -= eb->reloc_cache.rq_size;
+ memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size,
+ MI_NOOP, i);
+ eb->reloc_cache.rq_size += i;
+
+ /* Force batch chaining */
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[2] * sizeof(u32),
+ 2)) {
+ err = -EIO;
+ goto unpin_vma;
+ }
+
+ GEM_BUG_ON(!eb->reloc_cache.rq);
+ rq = i915_request_get(eb->reloc_cache.rq);
+ err = reloc_gpu_flush(&eb->reloc_cache);
+ if (err)
+ goto put_rq;
+ GEM_BUG_ON(eb->reloc_cache.rq);
+
+ err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2);
+ if (err) {
+ intel_gt_set_wedged(eb->engine->gt);
+ goto put_rq;
+ }
+
+ if (!i915_request_completed(rq)) {
+ pr_err("%s: did not wait for relocations!\n", eb->engine->name);
+ err = -EINVAL;
+ goto put_rq;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(offsets); i++) {
+ u64 reloc = read_reloc(map, offsets[i], mask);
+
+ if (reloc != i) {
+ pr_err("%s[%d]: map[%d] %llx != %x\n",
+ eb->engine->name, i, offsets[i], reloc, i);
+ err = -EINVAL;
+ }
+ }
+ if (err)
+ igt_hexdump(map, 4096);
+
+put_rq:
+ i915_request_put(rq);
+unpin_vma:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+static int igt_gpu_reloc(void *arg)
+{
+ struct i915_execbuffer eb;
+ struct drm_i915_gem_object *scratch;
+ int err = 0;
+ u32 *map;
+
+ eb.i915 = arg;
+
+ scratch = i915_gem_object_create_internal(eb.i915, 4096);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ map = i915_gem_object_pin_map(scratch, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_scratch;
+ }
+
+ for_each_uabi_engine(eb.engine, eb.i915) {
+ reloc_cache_init(&eb.reloc_cache, eb.i915);
+ memset(map, POISON_INUSE, 4096);
+
+ intel_engine_pm_get(eb.engine);
+ eb.context = intel_context_create(eb.engine);
+ if (IS_ERR(eb.context)) {
+ err = PTR_ERR(eb.context);
+ goto err_pm;
+ }
+
+ err = intel_context_pin(eb.context);
+ if (err)
+ goto err_put;
+
+ err = __igt_gpu_reloc(&eb, scratch);
+
+ intel_context_unpin(eb.context);
+err_put:
+ intel_context_put(eb.context);
+err_pm:
+ intel_engine_pm_put(eb.engine);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(eb.i915))
+ err = -EIO;
+
+err_scratch:
+ i915_gem_object_put(scratch);
+ return err;
+}
+
+int i915_gem_execbuffer_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gpu_reloc),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 43912e9b683d..9c7402ce5bf9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -952,6 +952,129 @@ static int igt_mmap(void *arg)
return 0;
}
+static const char *repr_mmap_type(enum i915_mmap_type type)
+{
+ switch (type) {
+ case I915_MMAP_TYPE_GTT: return "gtt";
+ case I915_MMAP_TYPE_WB: return "wb";
+ case I915_MMAP_TYPE_WC: return "wc";
+ case I915_MMAP_TYPE_UC: return "uc";
+ default: return "unknown";
+ }
+}
+
+static bool can_access(const struct drm_i915_gem_object *obj)
+{
+ unsigned int flags =
+ I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
+
+ return i915_gem_object_type_has(obj, flags);
+}
+
+static int __igt_mmap_access(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj,
+ enum i915_mmap_type type)
+{
+ struct i915_mmap_offset *mmo;
+ unsigned long __user *ptr;
+ unsigned long A, B;
+ unsigned long x, y;
+ unsigned long addr;
+ int err;
+
+ memset(&A, 0xAA, sizeof(A));
+ memset(&B, 0xBB, sizeof(B));
+
+ if (!can_mmap(obj, type) || !can_access(obj))
+ return 0;
+
+ mmo = mmap_offset_attach(obj, type, NULL);
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
+
+ addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+ ptr = (unsigned long __user *)addr;
+
+ err = __put_user(A, ptr);
+ if (err) {
+ pr_err("%s(%s): failed to write into user mmap\n",
+ obj->mm.region->name, repr_mmap_type(type));
+ goto out_unmap;
+ }
+
+ intel_gt_flush_ggtt_writes(&i915->gt);
+
+ err = access_process_vm(current, addr, &x, sizeof(x), 0);
+ if (err != sizeof(x)) {
+ pr_err("%s(%s): access_process_vm() read failed\n",
+ obj->mm.region->name, repr_mmap_type(type));
+ goto out_unmap;
+ }
+
+ err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
+ if (err != sizeof(B)) {
+ pr_err("%s(%s): access_process_vm() write failed\n",
+ obj->mm.region->name, repr_mmap_type(type));
+ goto out_unmap;
+ }
+
+ intel_gt_flush_ggtt_writes(&i915->gt);
+
+ err = __get_user(y, ptr);
+ if (err) {
+ pr_err("%s(%s): failed to read from user mmap\n",
+ obj->mm.region->name, repr_mmap_type(type));
+ goto out_unmap;
+ }
+
+ if (x != A || y != B) {
+ pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
+ obj->mm.region->name, repr_mmap_type(type),
+ x, y);
+ err = -EINVAL;
+ goto out_unmap;
+ }
+
+out_unmap:
+ vm_munmap(addr, obj->base.size);
+ return err;
+}
+
+static int igt_mmap_access(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+
+ for_each_memory_region(mr, i915, id) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
+ if (obj == ERR_PTR(-ENODEV))
+ continue;
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
+ if (err == 0)
+ err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
+ if (err == 0)
+ err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
+ if (err == 0)
+ err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
+
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int __igt_mmap_gpu(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
enum i915_mmap_type type)
@@ -1156,9 +1279,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
if (err)
goto out_unmap;
- GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
- !atomic_read(&obj->bind_count));
-
err = check_present(addr, obj->base.size);
if (err) {
pr_err("%s: was not present\n", obj->mm.region->name);
@@ -1175,7 +1295,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
pr_err("Failed to unbind object!\n");
goto out_unmap;
}
- GEM_BUG_ON(atomic_read(&obj->bind_count));
if (type != I915_MMAP_TYPE_GTT) {
__i915_gem_object_put_pages(obj);
@@ -1233,6 +1352,7 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_smoke_tiling),
SUBTEST(igt_mmap_offset_exhaustion),
SUBTEST(igt_mmap),
+ SUBTEST(igt_mmap_access),
SUBTEST(igt_mmap_revoke),
SUBTEST(igt_mmap_gpu),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index 2b6db6f799de..faa5b6d91795 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -14,7 +14,7 @@ static int igt_gem_object(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
- int err = -ENOMEM;
+ int err;
/* Basic test to ensure we can create an object */
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 772d8cba7da9..e21b5023ca7d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -83,6 +83,8 @@ igt_emit_store_dw(struct i915_vma *vma,
offset += PAGE_SIZE;
}
*cmd = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(vma->vm->gt);
@@ -126,16 +128,6 @@ int igt_gpu_fill_dw(struct intel_context *ce,
goto err_batch;
}
- flags = 0;
- if (INTEL_GEN(ce->vm->i915) <= 5)
- flags |= I915_DISPATCH_SECURE;
-
- err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
- if (err)
- goto err_request;
-
i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
@@ -152,15 +144,17 @@ int igt_gpu_fill_dw(struct intel_context *ce,
if (err)
goto skip_request;
- i915_request_add(rq);
-
- i915_vma_unpin_and_release(&batch, 0);
+ flags = 0;
+ if (INTEL_GEN(ce->vm->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
- return 0;
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
skip_request:
- i915_request_set_error_once(rq, err);
-err_request:
+ if (err)
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
err_batch:
i915_vma_unpin_and_release(&batch, 0);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index 9272bef57092..debaf7b18ab5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -66,7 +66,7 @@ static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
- return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
+ return vm_map_ram(mock->pages, mock->npages, 0);
}
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.c b/drivers/gpu/drm/i915/gt/debugfs_engines.c
index 6a5e9ab20b94..5e3725e62241 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_engines.c
@@ -32,5 +32,5 @@ void debugfs_engines_register(struct intel_gt *gt, struct dentry *root)
{ "engines", &engines_fops },
};
- debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
index 75255aaacaed..1de5fbaa1cf9 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c
@@ -9,6 +9,7 @@
#include "debugfs_engines.h"
#include "debugfs_gt.h"
#include "debugfs_gt_pm.h"
+#include "uc/intel_uc_debugfs.h"
#include "i915_drv.h"
void debugfs_gt_register(struct intel_gt *gt)
@@ -24,17 +25,19 @@ void debugfs_gt_register(struct intel_gt *gt)
debugfs_engines_register(gt, root);
debugfs_gt_pm_register(gt, root);
+
+ intel_uc_debugfs_register(&gt->uc, root);
}
-void debugfs_gt_register_files(struct intel_gt *gt,
- struct dentry *root,
- const struct debugfs_gt_file *files,
- unsigned long count)
+void intel_gt_debugfs_register_files(struct dentry *root,
+ const struct debugfs_gt_file *files,
+ unsigned long count, void *data)
{
while (count--) {
- if (!files->eval || files->eval(gt))
+ umode_t mode = files->fops->write ? 0644 : 0444;
+ if (!files->eval || files->eval(data))
debugfs_create_file(files->name,
- 0444, root, gt,
+ mode, root, data,
files->fops);
files++;
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.h b/drivers/gpu/drm/i915/gt/debugfs_gt.h
index 4ea0f06cda8f..f77540f727e9 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.h
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.h
@@ -28,12 +28,11 @@ void debugfs_gt_register(struct intel_gt *gt);
struct debugfs_gt_file {
const char *name;
const struct file_operations *fops;
- bool (*eval)(const struct intel_gt *gt);
+ bool (*eval)(void *data);
};
-void debugfs_gt_register_files(struct intel_gt *gt,
- struct dentry *root,
- const struct debugfs_gt_file *files,
- unsigned long count);
+void intel_gt_debugfs_register_files(struct dentry *root,
+ const struct debugfs_gt_file *files,
+ unsigned long count, void *data);
#endif /* DEBUGFS_GT_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
index 059c9e5c002e..174a24553322 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -10,6 +10,7 @@
#include "debugfs_gt_pm.h"
#include "i915_drv.h"
#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
#include "intel_llc.h"
#include "intel_rc6.h"
#include "intel_rps.h"
@@ -268,7 +269,7 @@ static int frequency_show(struct seq_file *m, void *unused)
yesno(rpmodectl & GEN6_RP_ENABLE));
seq_printf(m, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
+ GEN6_RP_MEDIA_SW_MODE));
vlv_punit_get(i915);
freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
@@ -300,8 +301,9 @@ static int frequency_show(struct seq_file *m, void *unused)
u32 rp_state_cap;
u32 rpmodectl, rpinclimit, rpdeclimit;
u32 rpstat, cagf, reqf;
- u32 rpupei, rpcurup, rpprevup;
- u32 rpdownei, rpcurdown, rpprevdown;
+ u32 rpcurupei, rpcurup, rpprevup;
+ u32 rpcurdownei, rpcurdown, rpprevdown;
+ u32 rpupei, rpupt, rpdownei, rpdownt;
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
int max_freq;
@@ -334,12 +336,19 @@ static int frequency_show(struct seq_file *m, void *unused)
rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1);
- rpupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+ rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
- rpdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+ rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+
+ rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI);
+ rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
+
+ rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
+ rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
+
cagf = intel_rps_read_actual_frequency(rps);
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
@@ -372,7 +381,7 @@ static int frequency_show(struct seq_file *m, void *unused)
yesno(rpmodectl & GEN6_RP_ENABLE));
seq_printf(m, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
+ GEN6_RP_MEDIA_SW_MODE));
seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_mask);
@@ -394,23 +403,35 @@ static int frequency_show(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
- rpupei, GT_PM_INTERVAL_TO_US(i915, rpupei));
- seq_printf(m, "RP CUR UP: %d (%dus)\n",
- rpcurup, GT_PM_INTERVAL_TO_US(i915, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%dus)\n",
- rpprevup, GT_PM_INTERVAL_TO_US(i915, rpprevup));
+ seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+ rpcurupei,
+ intel_gt_pm_interval_to_ns(gt, rpcurupei));
+ seq_printf(m, "RP CUR UP: %d (%dns)\n",
+ rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
+ seq_printf(m, "RP PREV UP: %d (%dns)\n",
+ rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
rps->power.up_threshold);
-
- seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
- rpdownei, GT_PM_INTERVAL_TO_US(i915, rpdownei));
- seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
- rpcurdown, GT_PM_INTERVAL_TO_US(i915, rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
- rpprevdown, GT_PM_INTERVAL_TO_US(i915, rpprevdown));
+ seq_printf(m, "RP UP EI: %d (%dns)\n",
+ rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
+ seq_printf(m, "RP UP THRESHOLD: %d (%dns)\n",
+ rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
+
+ seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+ rpcurdownei,
+ intel_gt_pm_interval_to_ns(gt, rpcurdownei));
+ seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+ rpcurdown,
+ intel_gt_pm_interval_to_ns(gt, rpcurdown));
+ seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+ rpprevdown,
+ intel_gt_pm_interval_to_ns(gt, rpprevdown));
seq_printf(m, "Down threshold: %d%%\n",
rps->power.down_threshold);
+ seq_printf(m, "RP DOWN EI: %d (%dns)\n",
+ rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
+ seq_printf(m, "RP DOWN THRESHOLD: %d (%dns)\n",
+ rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
@@ -506,8 +527,10 @@ static int llc_show(struct seq_file *m, void *data)
return 0;
}
-static bool llc_eval(const struct intel_gt *gt)
+static bool llc_eval(void *data)
{
+ struct intel_gt *gt = data;
+
return HAS_LLC(gt->i915);
}
@@ -533,7 +556,8 @@ static int rps_boost_show(struct seq_file *m, void *data)
struct drm_i915_private *i915 = gt->i915;
struct intel_rps *rps = &gt->rps;
- seq_printf(m, "RPS enabled? %d\n", rps->enabled);
+ seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
+ seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
@@ -553,7 +577,7 @@ static int rps_boost_show(struct seq_file *m, void *data)
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
- if (INTEL_GEN(i915) >= 6 && rps->enabled && gt->awake) {
+ if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
struct intel_uncore *uncore = gt->uncore;
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -580,8 +604,10 @@ static int rps_boost_show(struct seq_file *m, void *data)
return 0;
}
-static bool rps_eval(const struct intel_gt *gt)
+static bool rps_eval(void *data)
{
+ struct intel_gt *gt = data;
+
return HAS_RPS(gt->i915);
}
@@ -597,5 +623,5 @@ void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
{ "rps_boost", &rps_boost_fops, rps_eval },
};
- debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
}
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 94e746af8926..699125928272 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -389,6 +389,16 @@ static int gen8_ppgtt_alloc(struct i915_address_space *vm,
return err;
}
+static __always_inline void
+write_pte(gen8_pte_t *pte, const gen8_pte_t val)
+{
+ /* Magic delays? Or can we refine these to flush all in one pass? */
+ *pte = val;
+ wmb(); /* cpu to cache */
+ clflush(pte); /* cache to memory */
+ wmb(); /* visible to all */
+}
+
static __always_inline u64
gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
struct i915_page_directory *pdp,
@@ -405,7 +415,8 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
- vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
+ write_pte(&vaddr[gen8_pd_index(idx, 0)],
+ pte_encode | iter->dma);
iter->dma += I915_GTT_PAGE_SIZE;
if (iter->dma >= iter->max) {
@@ -487,7 +498,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
do {
GEM_BUG_ON(iter->sg->length < page_size);
- vaddr[index++] = encode | iter->dma;
+ write_pte(&vaddr[index++], encode | iter->dma);
start += page_size;
iter->dma += page_size;
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index cbad7fe722ce..d907d538176e 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -64,7 +64,7 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
if (!--b->irq_enabled)
irq_disable(engine);
- b->irq_armed = false;
+ WRITE_ONCE(b->irq_armed, false);
intel_gt_pm_put_async(engine->gt);
}
@@ -73,7 +73,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
- if (!b->irq_armed)
+ if (!READ_ONCE(b->irq_armed))
return;
spin_lock_irqsave(&b->irq_lock, flags);
@@ -142,6 +142,18 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(engine, tl);
}
+static void __signal_request(struct i915_request *rq, struct list_head *signals)
+{
+ GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+
+ if (!__dma_fence_signal(&rq->fence))
+ return;
+
+ i915_request_get(rq);
+ list_add_tail(&rq->signal_link, signals);
+}
+
static void signal_irq_work(struct irq_work *work)
{
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
@@ -155,6 +167,8 @@ static void signal_irq_work(struct irq_work *work)
if (b->irq_armed && list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b);
+ list_splice_init(&b->signaled_requests, &signal);
+
list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
GEM_BUG_ON(list_empty(&ce->signals));
@@ -163,24 +177,15 @@ static void signal_irq_work(struct irq_work *work)
list_entry(pos, typeof(*rq), signal_link);
GEM_BUG_ON(!check_signal_order(ce, rq));
-
if (!__request_completed(rq))
break;
- GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
- &rq->fence.flags));
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
-
- if (!__dma_fence_signal(&rq->fence))
- continue;
-
/*
* Queue for execution after dropping the signaling
* spinlock as the callback chain may end up adding
* more signalers to the same context or engine.
*/
- i915_request_get(rq);
- list_add_tail(&rq->signal_link, &signal);
+ __signal_request(rq, &signal);
}
/*
@@ -233,7 +238,7 @@ static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
- b->irq_armed = true;
+ WRITE_ONCE(b->irq_armed, true);
/*
* Since we are waiting on a request, the GPU should be busy
@@ -255,6 +260,7 @@ void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
spin_lock_init(&b->irq_lock);
INIT_LIST_HEAD(&b->signalers);
+ INIT_LIST_HEAD(&b->signaled_requests);
init_irq_work(&b->irq_work, signal_irq_work);
}
@@ -274,6 +280,32 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
spin_unlock_irqrestore(&b->irq_lock, flags);
}
+void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
+ struct intel_context *ce)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&b->irq_lock, flags);
+ if (!list_empty(&ce->signals)) {
+ struct i915_request *rq, *next;
+
+ /* Queue for executing the signal callbacks in the irq_work */
+ list_for_each_entry_safe(rq, next, &ce->signals, signal_link) {
+ GEM_BUG_ON(rq->engine != engine);
+ GEM_BUG_ON(!__request_completed(rq));
+
+ __signal_request(rq, &b->signaled_requests);
+ }
+
+ INIT_LIST_HEAD(&ce->signals);
+ list_del_init(&ce->signal_link);
+
+ irq_work_queue(&b->irq_work);
+ }
+ spin_unlock_irqrestore(&b->irq_lock, flags);
+}
+
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
{
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index aea992e46c42..e4aece20bc80 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -97,8 +97,6 @@ int __intel_context_do_pin(struct intel_context *ce)
{
int err;
- GEM_BUG_ON(intel_context_is_closed(ce));
-
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
err = intel_context_alloc_state(ce);
if (err)
@@ -114,6 +112,11 @@ int __intel_context_do_pin(struct intel_context *ce)
goto out_release;
}
+ if (unlikely(intel_context_is_closed(ce))) {
+ err = -ENOENT;
+ goto out_unlock;
+ }
+
if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
err = intel_context_active_acquire(ce);
if (unlikely(err))
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index 57a30956c922..487299cb91f2 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -25,8 +25,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
return PTR_ERR(cs);
offset = i915_ggtt_offset(ce->state) +
- LRC_STATE_PN * PAGE_SIZE +
- CTX_R_PWR_CLK_STATE * 4;
+ LRC_STATE_OFFSET + CTX_R_PWR_CLK_STATE * 4;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index ca0d4f4f3615..4954b0df4864 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -102,6 +102,8 @@ struct intel_context {
/** sseu: Control eu/slice partitioning */
struct intel_sseu sseu;
+
+ u8 wa_bb_page; /* if set, page num reserved for context workarounds */
};
#endif /* __INTEL_CONTEXT_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index a1aa0d3e8be1..9bf6d4989968 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -199,6 +199,8 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
int intel_engines_init_mmio(struct intel_gt *gt);
int intel_engines_init(struct intel_gt *gt);
+void intel_engine_free_request_pool(struct intel_engine_cs *engine);
+
void intel_engines_release(struct intel_gt *gt);
void intel_engines_free(struct intel_gt *gt);
@@ -236,22 +238,35 @@ intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
+void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
+ struct intel_context *ce);
+
void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
struct drm_printer *p);
-static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
{
memset(batch, 0, 6 * sizeof(u32));
- batch[0] = GFX_OP_PIPE_CONTROL(6);
- batch[1] = flags;
+ batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
+ batch[1] = flags1;
batch[2] = offset;
return batch + 6;
}
+static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, 0, flags, offset);
+}
+
+static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
+}
+
static inline u32 *
-gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+__gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
{
/* We're using qword write, offset should be aligned to 8 bytes. */
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
@@ -260,8 +275,8 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- *cs++ = GFX_OP_PIPE_CONTROL(6);
- *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
+ *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
+ *cs++ = flags1 | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
*cs++ = gtt_offset;
*cs++ = 0;
*cs++ = value;
@@ -271,6 +286,18 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
return cs;
}
+static inline u32*
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, 0, flags);
+}
+
+static inline u32*
+gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
+{
+ return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, flags0, flags1);
+}
+
static inline u32 *
gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
{
@@ -308,9 +335,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...);
-int intel_enable_engine_stats(struct intel_engine_cs *engine);
-void intel_disable_engine_stats(struct intel_engine_cs *engine);
-
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
struct i915_request *
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 883a9b7fe88d..da5b61085257 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -31,7 +31,6 @@
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
#include "intel_engine_user.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
@@ -327,6 +326,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
engine->props.preempt_timeout_ms = 0;
+ engine->defaults = engine->props; /* never to change again */
+
engine->context_size = intel_engine_context_size(gt, engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
@@ -347,8 +348,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
gt->engine_class[info->class][info->instance] = engine;
gt->engine[id] = engine;
- i915->engine[id] = engine;
-
return 0;
}
@@ -425,17 +424,27 @@ void intel_engines_release(struct intel_gt *gt)
engine->release = NULL;
memset(&engine->reset, 0, sizeof(engine->reset));
-
- gt->i915->engine[id] = NULL;
}
}
+void intel_engine_free_request_pool(struct intel_engine_cs *engine)
+{
+ if (!engine->request_pool)
+ return;
+
+ kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
+}
+
void intel_engines_free(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ /* Free the requests! dma-resv keeps fences around for an eternity */
+ rcu_barrier();
+
for_each_engine(engine, gt, id) {
+ intel_engine_free_request_pool(engine);
kfree(engine);
gt->engine[id] = NULL;
}
@@ -623,8 +632,6 @@ static int engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
- intel_engine_pool_init(&engine->pool);
-
/* Use the whole device by default */
engine->sseu =
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
@@ -821,12 +828,11 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
cleanup_status_page(engine);
intel_engine_fini_retire(engine);
- intel_engine_pool_fini(&engine->pool);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
if (engine->default_state)
- i915_gem_object_put(engine->default_state);
+ fput(engine->default_state);
if (engine->kernel_context) {
intel_context_unpin(engine->kernel_context);
@@ -1225,6 +1231,49 @@ static void print_request(struct drm_printer *m,
name);
}
+static struct intel_timeline *get_timeline(struct i915_request *rq)
+{
+ struct intel_timeline *tl;
+
+ /*
+ * Even though we are holding the engine->active.lock here, there
+ * is no control over the submission queue per-se and we are
+ * inspecting the active state at a random point in time, with an
+ * unknown queue. Play safe and make sure the timeline remains valid.
+ * (Only being used for pretty printing, one extra kref shouldn't
+ * cause a camel stampede!)
+ */
+ rcu_read_lock();
+ tl = rcu_dereference(rq->timeline);
+ if (!kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+
+ return tl;
+}
+
+static int print_ring(char *buf, int sz, struct i915_request *rq)
+{
+ int len = 0;
+
+ if (!i915_request_signaled(rq)) {
+ struct intel_timeline *tl = get_timeline(rq);
+
+ len = scnprintf(buf, sz,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ tl ? tl->hwsp_offset : 0,
+ hwsp_seqno(rq),
+ DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
+ 1000 * 1000));
+
+ if (tl)
+ intel_timeline_put(tl);
+ }
+
+ return len;
+}
+
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
{
const size_t rowsize = 8 * sizeof(u32);
@@ -1254,27 +1303,6 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
}
}
-static struct intel_timeline *get_timeline(struct i915_request *rq)
-{
- struct intel_timeline *tl;
-
- /*
- * Even though we are holding the engine->active.lock here, there
- * is no control over the submission queue per-se and we are
- * inspecting the active state at a random point in time, with an
- * unknown queue. Play safe and make sure the timeline remains valid.
- * (Only being used for pretty printing, one extra kref shouldn't
- * cause a camel stampede!)
- */
- rcu_read_lock();
- tl = rcu_dereference(rq->timeline);
- if (!kref_get_unless_zero(&tl->kref))
- tl = NULL;
- rcu_read_unlock();
-
- return tl;
-}
-
static const char *repr_timer(const struct timer_list *t)
{
if (!READ_ONCE(t->expires))
@@ -1393,39 +1421,24 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
int len;
len = scnprintf(hdr, sizeof(hdr),
- "\t\tActive[%d]: ",
- (int)(port - execlists->active));
- if (!i915_request_signaled(rq)) {
- struct intel_timeline *tl = get_timeline(rq);
-
- len += scnprintf(hdr + len, sizeof(hdr) - len,
- "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
- i915_ggtt_offset(rq->ring->vma),
- tl ? tl->hwsp_offset : 0,
- hwsp_seqno(rq),
- DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
- 1000 * 1000));
-
- if (tl)
- intel_timeline_put(tl);
- }
+ "\t\tActive[%d]: ccid:%08x, ",
+ (int)(port - execlists->active),
+ rq->context->lrc.ccid);
+ len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
}
for (port = execlists->pending; (rq = *port); port++) {
- struct intel_timeline *tl = get_timeline(rq);
- char hdr[80];
-
- snprintf(hdr, sizeof(hdr),
- "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
- (int)(port - execlists->pending),
- i915_ggtt_offset(rq->ring->vma),
- tl ? tl->hwsp_offset : 0,
- hwsp_seqno(rq));
- print_request(m, rq, hdr);
+ char hdr[160];
+ int len;
- if (tl)
- intel_timeline_put(tl);
+ len = scnprintf(hdr, sizeof(hdr),
+ "\t\tPending[%d]: ccid:%08x, ",
+ (int)(port - execlists->pending),
+ rq->context->lrc.ccid);
+ len += print_ring(hdr + len, sizeof(hdr) - len, rq);
+ scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ print_request(m, rq, hdr);
}
rcu_read_unlock();
execlists_active_unlock_bh(execlists);
@@ -1574,58 +1587,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
intel_engine_print_breadcrumbs(engine, m);
}
-/**
- * intel_enable_engine_stats() - Enable engine busy tracking on engine
- * @engine: engine to enable stats collection
- *
- * Start collecting the engine busyness data for @engine.
- *
- * Returns 0 on success or a negative error code.
- */
-int intel_enable_engine_stats(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *execlists = &engine->execlists;
- unsigned long flags;
- int err = 0;
-
- if (!intel_engine_supports_stats(engine))
- return -ENODEV;
-
- execlists_active_lock_bh(execlists);
- write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (unlikely(engine->stats.enabled == ~0)) {
- err = -EBUSY;
- goto unlock;
- }
-
- if (engine->stats.enabled++ == 0) {
- struct i915_request * const *port;
- struct i915_request *rq;
-
- engine->stats.enabled_at = ktime_get();
-
- /* XXX submission method oblivious? */
- for (port = execlists->active; (rq = *port); port++)
- engine->stats.active++;
-
- for (port = execlists->pending; (rq = *port); port++) {
- /* Exclude any contexts already counted in active */
- if (!intel_context_inflight_count(rq->context))
- engine->stats.active++;
- }
-
- if (engine->stats.active)
- engine->stats.start = engine->stats.enabled_at;
- }
-
-unlock:
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
- execlists_active_unlock_bh(execlists);
-
- return err;
-}
-
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
{
ktime_t total = engine->stats.total;
@@ -1634,7 +1595,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
* If the engine is executing something at the moment
* add it to the total.
*/
- if (engine->stats.active)
+ if (atomic_read(&engine->stats.active))
total = ktime_add(total,
ktime_sub(ktime_get(), engine->stats.start));
@@ -1660,28 +1621,6 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
return total;
}
-/**
- * intel_disable_engine_stats() - Disable engine busy tracking on engine
- * @engine: engine to disable stats collection
- *
- * Stops collecting the engine busyness data for @engine.
- */
-void intel_disable_engine_stats(struct intel_engine_cs *engine)
-{
- unsigned long flags;
-
- if (!intel_engine_supports_stats(engine))
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
- WARN_ON_ONCE(engine->stats.enabled == 0);
- if (--engine->stats.enabled == 0) {
- engine->stats.total = __intel_engine_get_busy_time(engine);
- engine->stats.active = 0;
- }
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
static bool match_ring(struct i915_request *rq)
{
u32 ring = ENGINE_READ(rq->engine, RING_START);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index dd825718e4e5..5136c8bf112d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -31,7 +31,7 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
delay = msecs_to_jiffies_timeout(delay);
if (delay >= HZ)
delay = round_jiffies_up_relative(delay);
- schedule_delayed_work(&engine->heartbeat.work, delay);
+ mod_delayed_work(system_wq, &engine->heartbeat.work, delay);
return true;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index b6cf284e3a2d..d0a1078ef632 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -10,31 +10,22 @@
#include "intel_engine.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
#include "intel_ring.h"
+#include "shmem_utils.h"
static int __engine_unpark(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
container_of(wf, typeof(*engine), wakeref);
struct intel_context *ce;
- void *map;
ENGINE_TRACE(engine, "\n");
intel_gt_pm_get(engine->gt);
- /* Pin the default state for fast resets from atomic context. */
- map = NULL;
- if (engine->default_state)
- map = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
- if (!IS_ERR_OR_NULL(map))
- engine->pinned_default_state = map;
-
/* Discard stale context state from across idling */
ce = engine->kernel_context;
if (ce) {
@@ -44,6 +35,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
struct drm_i915_gem_object *obj = ce->state->obj;
int type = i915_coherent_map_type(engine->i915);
+ void *map;
map = i915_gem_object_pin_map(obj, type);
if (!IS_ERR(map)) {
@@ -181,7 +173,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
* Ergo, if we put ourselves on the timelines.active_list
* (se intel_timeline_enter()) before we increment the
* engine->wakeref.count, we may see the request completion and retire
- * it causing an undeflow of the engine->wakeref.
+ * it causing an underflow of the engine->wakeref.
*/
flags = __timeline_mark_lock(ce);
GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
@@ -255,7 +247,6 @@ static int __engine_park(struct intel_wakeref *wf)
intel_engine_park_heartbeat(engine);
intel_engine_disarm_breadcrumbs(engine);
- intel_engine_pool_park(&engine->pool);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
@@ -263,11 +254,6 @@ static int __engine_park(struct intel_wakeref *wf)
if (engine->park)
engine->park(engine);
- if (engine->pinned_default_state) {
- i915_gem_object_unpin_map(engine->default_state);
- engine->pinned_default_state = NULL;
- }
-
engine->execlists.no_priolist = false;
/* While gt calls i915_vma_parked(), we have to break the lock cycle */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index e52c2b0cb245..418df0a13145 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -37,6 +37,12 @@ static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
intel_wakeref_put_async(&engine->wakeref);
}
+static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
+ unsigned long delay)
+{
+ intel_wakeref_put_delay(&engine->wakeref, delay);
+}
+
static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
{
intel_wakeref_unlock_wait(&engine->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
deleted file mode 100644
index 1bd89cadc3b7..000000000000
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#ifndef INTEL_ENGINE_POOL_H
-#define INTEL_ENGINE_POOL_H
-
-#include "intel_engine_pool_types.h"
-#include "i915_active.h"
-#include "i915_request.h"
-
-struct intel_engine_pool_node *
-intel_engine_get_pool(struct intel_engine_cs *engine, size_t size);
-
-static inline int
-intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
- struct i915_request *rq)
-{
- return i915_active_add_request(&node->active, rq);
-}
-
-static inline void
-intel_engine_pool_put(struct intel_engine_pool_node *node)
-{
- i915_active_release(&node->active);
-}
-
-void intel_engine_pool_init(struct intel_engine_pool *pool);
-void intel_engine_pool_park(struct intel_engine_pool *pool);
-void intel_engine_pool_fini(struct intel_engine_pool *pool);
-
-#endif /* INTEL_ENGINE_POOL_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 0be674ae1cf6..2b6cdf47d428 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -22,7 +22,6 @@
#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
-#include "intel_engine_pool_types.h"
#include "intel_sseu.h"
#include "intel_timeline_types.h"
#include "intel_wakeref.h"
@@ -181,6 +180,11 @@ struct intel_engine_execlists {
u32 error_interrupt;
/**
+ * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
+ */
+ u32 reset_ccid;
+
+ /**
* @no_priolist: priority lists disabled
*/
bool no_priolist;
@@ -321,6 +325,9 @@ struct intel_engine_cs {
struct list_head hold; /* ready requests, but on hold */
} active;
+ /* keep a request in reserve for a [pm] barrier under oom */
+ struct i915_request *request_pool;
+
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
@@ -336,8 +343,7 @@ struct intel_engine_cs {
unsigned long wakeref_serial;
struct intel_wakeref wakeref;
- struct drm_i915_gem_object *default_state;
- void *pinned_default_state;
+ struct file *default_state;
struct {
struct intel_ring *ring;
@@ -371,6 +377,8 @@ struct intel_engine_cs {
spinlock_t irq_lock;
struct list_head signalers;
+ struct list_head signaled_requests;
+
struct irq_work irq_work; /* for use from inside irq_lock */
unsigned int irq_enabled;
@@ -402,13 +410,6 @@ struct intel_engine_cs {
struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
} pmu;
- /*
- * A pool of objects to use as shadow copies of client batch buffers
- * when the command parser is enabled. Prevents the client from
- * modifying the batch contents after software parsing.
- */
- struct intel_engine_pool pool;
-
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
struct i915_wa_list ctx_wa_list;
@@ -420,6 +421,7 @@ struct intel_engine_cs {
void (*irq_enable)(struct intel_engine_cs *engine);
void (*irq_disable)(struct intel_engine_cs *engine);
+ void (*sanitize)(struct intel_engine_cs *engine);
int (*resume)(struct intel_engine_cs *engine);
struct {
@@ -529,34 +531,34 @@ struct intel_engine_cs {
struct {
/**
- * @lock: Lock protecting the below fields.
- */
- seqlock_t lock;
- /**
- * @enabled: Reference count indicating number of listeners.
+ * @active: Number of contexts currently scheduled in.
*/
- unsigned int enabled;
+ atomic_t active;
+
/**
- * @active: Number of contexts currently scheduled in.
+ * @lock: Lock protecting the below fields.
*/
- unsigned int active;
+ seqlock_t lock;
+
/**
- * @enabled_at: Timestamp when busy stats were enabled.
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases
+ * where engine is currently busy (active > 0).
*/
- ktime_t enabled_at;
+ ktime_t total;
+
/**
* @start: Timestamp of the last idle to active transition.
*
* Idle is defined as active == 0, active is active > 0.
*/
ktime_t start;
+
/**
- * @total: Total time this engine was busy.
- *
- * Accumulated time not counting the most recent block in cases
- * where engine is currently busy (active > 0).
+ * @rps: Utilisation at last RPS sampling.
*/
- ktime_t total;
+ ktime_t rps;
} stats;
struct {
@@ -565,7 +567,7 @@ struct intel_engine_cs {
unsigned long preempt_timeout_ms;
unsigned long stop_timeout_ms;
unsigned long timeslice_duration_ms;
- } props;
+ } props, defaults;
};
static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 4c5a209cb669..66165b10256e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -65,7 +65,7 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
ggtt->mappable_end);
}
- i915_ggtt_init_fences(ggtt);
+ intel_ggtt_init_fences(ggtt);
return 0;
}
@@ -715,11 +715,13 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
*/
void i915_ggtt_driver_release(struct drm_i915_private *i915)
{
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct pagevec *pvec;
- fini_aliasing_ppgtt(&i915->ggtt);
+ fini_aliasing_ppgtt(ggtt);
- ggtt_cleanup_hw(&i915->ggtt);
+ intel_ggtt_fini_fences(ggtt);
+ ggtt_cleanup_hw(ggtt);
pvec = &i915->mm.wc_stash.pvec;
if (pvec->nr) {
@@ -784,13 +786,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
else
ggtt->gsm = ioremap_wc(phys_addr, size);
if (!ggtt->gsm) {
- DRM_ERROR("Failed to map the ggtt page table\n");
+ drm_err(&i915->drm, "Failed to map the ggtt page table\n");
return -ENOMEM;
}
ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
if (ret) {
- DRM_ERROR("Scratch setup failed\n");
+ drm_err(&i915->drm, "Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
iounmap(ggtt->gsm);
return ret;
@@ -838,7 +840,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
struct pci_dev *pdev = i915->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
- int err;
/* TODO: We're not aware of mappable constraints on gen8 yet */
if (!IS_DGFX(i915)) {
@@ -846,12 +847,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->mappable_end = resource_size(&ggtt->gmadr);
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
- if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
-
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
if (IS_CHERRYVIEW(i915))
size = chv_get_total_gtt_size(snb_gmch_ctl);
@@ -987,7 +982,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
struct pci_dev *pdev = i915->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
- int err;
ggtt->gmadr = pci_resource(pdev, 2);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
@@ -997,15 +991,11 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
* just a coarse sanity check.
*/
if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
+ drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
+ &ggtt->mappable_end);
return -ENXIO;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
- if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
size = gen6_get_total_gtt_size(snb_gmch_ctl);
@@ -1052,7 +1042,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
+ drm_err(&i915->drm, "failed to set up gmch\n");
return -EIO;
}
@@ -1075,7 +1065,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages;
if (unlikely(ggtt->do_idle_maps))
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"Applying Ironlake quirks for intel_iommu\n");
return 0;
@@ -1100,26 +1090,29 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
return ret;
if ((ggtt->vm.total - 1) >> 32) {
- DRM_ERROR("We never expected a Global GTT with more than 32bits"
- " of address space! Found %lldM!\n",
- ggtt->vm.total >> 20);
+ drm_err(&i915->drm,
+ "We never expected a Global GTT with more than 32bits"
+ " of address space! Found %lldM!\n",
+ ggtt->vm.total >> 20);
ggtt->vm.total = 1ULL << 32;
ggtt->mappable_end =
min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
if (ggtt->mappable_end > ggtt->vm.total) {
- DRM_ERROR("mappable aperture extends past end of GGTT,"
- " aperture=%pa, total=%llx\n",
- &ggtt->mappable_end, ggtt->vm.total);
+ drm_err(&i915->drm,
+ "mappable aperture extends past end of GGTT,"
+ " aperture=%pa, total=%llx\n",
+ &ggtt->mappable_end, ggtt->vm.total);
ggtt->mappable_end = ggtt->vm.total;
}
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("DSM size = %lluM\n",
- (u64)resource_size(&intel_graphics_stolen_res) >> 20);
+ drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
+ drm_dbg(&i915->drm, "GMADR size = %lluM\n",
+ (u64)ggtt->mappable_end >> 20);
+ drm_dbg(&i915->drm, "DSM size = %lluM\n",
+ (u64)resource_size(&intel_graphics_stolen_res) >> 20);
return 0;
}
@@ -1137,7 +1130,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
return ret;
if (intel_vtd_active())
- dev_info(i915->drm.dev, "VT-d active for gfx access\n");
+ drm_info(&i915->drm, "VT-d active for gfx access\n");
return 0;
}
@@ -1212,6 +1205,8 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
if (INTEL_GEN(ggtt->vm.i915) >= 8)
setup_private_pat(ggtt->vm.gt->uncore);
+
+ intel_ggtt_restore_fences(ggtt);
}
static struct scatterlist *
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index d152b648c73c..7fb36b12fe7a 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -68,8 +68,7 @@ static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
return fence->ggtt->vm.gt->uncore;
}
-static void i965_write_fence_reg(struct i915_fence_reg *fence,
- struct i915_vma *vma)
+static void i965_write_fence_reg(struct i915_fence_reg *fence)
{
i915_reg_t fence_reg_lo, fence_reg_hi;
int fence_pitch_shift;
@@ -87,18 +86,16 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
}
val = 0;
- if (vma) {
- unsigned int stride = i915_gem_object_get_stride(vma->obj);
+ if (fence->tiling) {
+ unsigned int stride = fence->stride;
- GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
- GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
GEM_BUG_ON(!IS_ALIGNED(stride, 128));
- val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
- val |= vma->node.start;
+ val = fence->start + fence->size - I965_FENCE_PAGE;
+ val <<= 32;
+ val |= fence->start;
val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
- if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
+ if (fence->tiling == I915_TILING_Y)
val |= BIT(I965_FENCE_TILING_Y_SHIFT);
val |= I965_FENCE_REG_VALID;
}
@@ -125,21 +122,15 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
}
}
-static void i915_write_fence_reg(struct i915_fence_reg *fence,
- struct i915_vma *vma)
+static void i915_write_fence_reg(struct i915_fence_reg *fence)
{
u32 val;
val = 0;
- if (vma) {
- unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+ if (fence->tiling) {
+ unsigned int stride = fence->stride;
+ unsigned int tiling = fence->tiling;
bool is_y_tiled = tiling == I915_TILING_Y;
- unsigned int stride = i915_gem_object_get_stride(vma->obj);
-
- GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
- GEM_BUG_ON(!is_power_of_2(vma->fence_size));
- GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
stride /= 128;
@@ -147,10 +138,10 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
stride /= 512;
GEM_BUG_ON(!is_power_of_2(stride));
- val = vma->node.start;
+ val = fence->start;
if (is_y_tiled)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
- val |= I915_FENCE_SIZE_BITS(vma->fence_size);
+ val |= I915_FENCE_SIZE_BITS(fence->size);
val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
@@ -165,25 +156,18 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
}
}
-static void i830_write_fence_reg(struct i915_fence_reg *fence,
- struct i915_vma *vma)
+static void i830_write_fence_reg(struct i915_fence_reg *fence)
{
u32 val;
val = 0;
- if (vma) {
- unsigned int stride = i915_gem_object_get_stride(vma->obj);
-
- GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
- GEM_BUG_ON(!is_power_of_2(vma->fence_size));
- GEM_BUG_ON(!is_power_of_2(stride / 128));
- GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
+ if (fence->tiling) {
+ unsigned int stride = fence->stride;
- val = vma->node.start;
- if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
+ val = fence->start;
+ if (fence->tiling == I915_TILING_Y)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
- val |= I830_FENCE_SIZE_BITS(vma->fence_size);
+ val |= I830_FENCE_SIZE_BITS(fence->size);
val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
}
@@ -197,8 +181,7 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
}
}
-static void fence_write(struct i915_fence_reg *fence,
- struct i915_vma *vma)
+static void fence_write(struct i915_fence_reg *fence)
{
struct drm_i915_private *i915 = fence_to_i915(fence);
@@ -209,18 +192,21 @@ static void fence_write(struct i915_fence_reg *fence,
*/
if (IS_GEN(i915, 2))
- i830_write_fence_reg(fence, vma);
+ i830_write_fence_reg(fence);
else if (IS_GEN(i915, 3))
- i915_write_fence_reg(fence, vma);
+ i915_write_fence_reg(fence);
else
- i965_write_fence_reg(fence, vma);
+ i965_write_fence_reg(fence);
/*
* Access through the fenced region afterwards is
* ordered by the posting reads whilst writing the registers.
*/
+}
- fence->dirty = false;
+static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
+{
+ return INTEL_GEN(fence_to_i915(fence)) < 4;
}
static int fence_update(struct i915_fence_reg *fence,
@@ -232,27 +218,32 @@ static int fence_update(struct i915_fence_reg *fence,
struct i915_vma *old;
int ret;
+ fence->tiling = 0;
if (vma) {
+ GEM_BUG_ON(!i915_gem_object_get_stride(vma->obj) ||
+ !i915_gem_object_get_tiling(vma->obj));
+
if (!i915_vma_is_map_and_fenceable(vma))
return -EINVAL;
- if (drm_WARN(&uncore->i915->drm,
- !i915_gem_object_get_stride(vma->obj) ||
- !i915_gem_object_get_tiling(vma->obj),
- "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
- i915_gem_object_get_stride(vma->obj),
- i915_gem_object_get_tiling(vma->obj)))
- return -EINVAL;
+ if (gpu_uses_fence_registers(fence)) {
+ /* implicit 'unfenced' GPU blits */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+ }
- ret = i915_vma_sync(vma);
- if (ret)
- return ret;
+ fence->start = vma->node.start;
+ fence->size = vma->fence_size;
+ fence->stride = i915_gem_object_get_stride(vma->obj);
+ fence->tiling = i915_gem_object_get_tiling(vma->obj);
}
+ WRITE_ONCE(fence->dirty, false);
old = xchg(&fence->vma, NULL);
if (old) {
/* XXX Ideally we would move the waiting to outside the mutex */
- ret = i915_vma_sync(old);
+ ret = i915_active_wait(&fence->active);
if (ret) {
fence->vma = old;
return ret;
@@ -276,7 +267,7 @@ static int fence_update(struct i915_fence_reg *fence,
/*
* We only need to update the register itself if the device is awake.
* If the device is currently powered down, we will defer the write
- * to the runtime resume, see i915_gem_restore_fences().
+ * to the runtime resume, see intel_ggtt_restore_fences().
*
* This only works for removing the fence register, on acquisition
* the caller must hold the rpm wakeref. The fence register must
@@ -290,7 +281,7 @@ static int fence_update(struct i915_fence_reg *fence,
}
WRITE_ONCE(fence->vma, vma);
- fence_write(fence, vma);
+ fence_write(fence);
if (vma) {
vma->fence = fence;
@@ -307,23 +298,26 @@ static int fence_update(struct i915_fence_reg *fence,
*
* This function force-removes any fence from the given object, which is useful
* if the kernel wants to do untiled GTT access.
- *
- * Returns:
- *
- * 0 on success, negative error code on failure.
*/
-int i915_vma_revoke_fence(struct i915_vma *vma)
+void i915_vma_revoke_fence(struct i915_vma *vma)
{
struct i915_fence_reg *fence = vma->fence;
+ intel_wakeref_t wakeref;
lockdep_assert_held(&vma->vm->mutex);
if (!fence)
- return 0;
+ return;
- if (atomic_read(&fence->pin_count))
- return -EBUSY;
+ GEM_BUG_ON(fence->vma != vma);
+ GEM_BUG_ON(!i915_active_is_idle(&fence->active));
+ GEM_BUG_ON(atomic_read(&fence->pin_count));
- return fence_update(fence, NULL);
+ fence->tiling = 0;
+ WRITE_ONCE(fence->vma, NULL);
+ vma->fence = NULL;
+
+ with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
+ fence_write(fence);
}
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
@@ -487,34 +481,19 @@ void i915_unreserve_fence(struct i915_fence_reg *fence)
}
/**
- * i915_gem_restore_fences - restore fence state
+ * intel_ggtt_restore_fences - restore fence state
* @ggtt: Global GTT
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct i915_ggtt *ggtt)
+void intel_ggtt_restore_fences(struct i915_ggtt *ggtt)
{
int i;
- rcu_read_lock(); /* keep obj alive as we dereference */
- for (i = 0; i < ggtt->num_fences; i++) {
- struct i915_fence_reg *reg = &ggtt->fence_regs[i];
- struct i915_vma *vma = READ_ONCE(reg->vma);
-
- GEM_BUG_ON(vma && vma->fence != reg);
-
- /*
- * Commit delayed tiling changes if we have an object still
- * attached to the fence, otherwise just clear the fence.
- */
- if (vma && !i915_gem_object_is_tiled(vma->obj))
- vma = NULL;
-
- fence_write(reg, vma);
- }
- rcu_read_unlock();
+ for (i = 0; i < ggtt->num_fences; i++)
+ fence_write(&ggtt->fence_regs[i]);
}
/**
@@ -746,7 +725,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
-static void i915_gem_swizzle_page(struct page *page)
+static void swizzle_page(struct page *page)
{
char temp[64];
char *vaddr;
@@ -791,7 +770,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
- i915_gem_swizzle_page(page);
+ swizzle_page(page);
set_page_dirty(page);
}
i++;
@@ -836,7 +815,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
}
}
-void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
+void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
@@ -864,18 +843,37 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
if (intel_vgpu_active(i915))
num_fences = intel_uncore_read(uncore,
vgtif_reg(avail_rs.fence_num));
+ ggtt->fence_regs = kcalloc(num_fences,
+ sizeof(*ggtt->fence_regs),
+ GFP_KERNEL);
+ if (!ggtt->fence_regs)
+ num_fences = 0;
/* Initialize fence registers to zero */
for (i = 0; i < num_fences; i++) {
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+ i915_active_init(&fence->active, NULL, NULL);
fence->ggtt = ggtt;
fence->id = i;
list_add_tail(&fence->link, &ggtt->fence_list);
}
ggtt->num_fences = num_fences;
- i915_gem_restore_fences(ggtt);
+ intel_ggtt_restore_fences(ggtt);
+}
+
+void intel_ggtt_fini_fences(struct i915_ggtt *ggtt)
+{
+ int i;
+
+ for (i = 0; i < ggtt->num_fences; i++) {
+ struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+
+ i915_active_fini(&fence->active);
+ }
+
+ kfree(ggtt->fence_regs);
}
void intel_gt_init_swizzling(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
index 7bd521cd7cd7..9eef679e1311 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
@@ -22,12 +22,14 @@
*
*/
-#ifndef __I915_FENCE_REG_H__
-#define __I915_FENCE_REG_H__
+#ifndef __INTEL_GGTT_FENCING_H__
+#define __INTEL_GGTT_FENCING_H__
#include <linux/list.h>
#include <linux/types.h>
+#include "i915_active.h"
+
struct drm_i915_gem_object;
struct i915_ggtt;
struct i915_vma;
@@ -41,6 +43,7 @@ struct i915_fence_reg {
struct i915_ggtt *ggtt;
struct i915_vma *vma;
atomic_t pin_count;
+ struct i915_active active;
int id;
/**
* Whether the tiling parameters for the currently
@@ -51,20 +54,24 @@ struct i915_fence_reg {
* command (such as BLT on gen2/3), as a "fence".
*/
bool dirty;
+ u32 start;
+ u32 size;
+ u32 tiling;
+ u32 stride;
};
-/* i915_gem_fence_reg.c */
struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt);
void i915_unreserve_fence(struct i915_fence_reg *fence);
-void i915_gem_restore_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_restore_fences(struct i915_ggtt *ggtt);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
-void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_init_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_fini_fences(struct i915_ggtt *ggtt);
void intel_gt_init_swizzling(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index f04214a54f75..534e435f20bc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -138,7 +138,7 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
-#define MI_LRI_CS_MMIO (1<<19)
+#define MI_LRI_LRM_CS_MMIO REG_BIT(19)
#define MI_LRI_FORCE_POSTED (1<<12)
#define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
@@ -156,6 +156,7 @@
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1)
+#define MI_LRR_SOURCE_CS_MMIO REG_BIT(18)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -235,9 +236,8 @@
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
-#define PIPE_CONTROL_L3_RO_CACHE_INVALIDATE REG_BIT(10) /* gen12 */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
-#define PIPE_CONTROL_HDC_PIPELINE_FLUSH REG_BIT(9) /* gen12 */
+#define PIPE_CONTROL0_HDC_PIPELINE_FLUSH REG_BIT(9) /* gen12 */
#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index d09f7596cb98..f069551e412f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -7,6 +7,8 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_gt.h"
+#include "intel_gt_buffer_pool.h"
+#include "intel_gt_clock_utils.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_mocs.h"
@@ -15,6 +17,7 @@
#include "intel_rps.h"
#include "intel_uncore.h"
#include "intel_pm.h"
+#include "shmem_utils.h"
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
@@ -26,6 +29,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
+ intel_gt_init_buffer_pool(gt);
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
@@ -370,18 +374,6 @@ static struct i915_address_space *kernel_vm(struct intel_gt *gt)
return i915_vm_get(&gt->ggtt->vm);
}
-static int __intel_context_flush_retire(struct intel_context *ce)
-{
- struct intel_timeline *tl;
-
- tl = intel_context_timeline_lock(ce);
- if (IS_ERR(tl))
- return PTR_ERR(tl);
-
- intel_context_timeline_unlock(tl);
- return 0;
-}
-
static int __engines_record_defaults(struct intel_gt *gt)
{
struct i915_request *requests[I915_NUM_ENGINES] = {};
@@ -447,8 +439,7 @@ err_rq:
for (id = 0; id < ARRAY_SIZE(requests); id++) {
struct i915_request *rq;
- struct i915_vma *state;
- void *vaddr;
+ struct file *state;
rq = requests[id];
if (!rq)
@@ -460,48 +451,16 @@ err_rq:
}
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
- state = rq->context->state;
- if (!state)
+ if (!rq->context->state)
continue;
- /* Serialise with retirement on another CPU */
- GEM_BUG_ON(!i915_request_completed(rq));
- err = __intel_context_flush_retire(rq->context);
- if (err)
- goto out;
-
- /* We want to be able to unbind the state from the GGTT */
- GEM_BUG_ON(intel_context_is_pinned(rq->context));
-
- /*
- * As we will hold a reference to the logical state, it will
- * not be torn down with the context, and importantly the
- * object will hold onto its vma (making it possible for a
- * stray GTT write to corrupt our defaults). Unmap the vma
- * from the GTT to prevent such accidents and reclaim the
- * space.
- */
- err = i915_vma_unbind(state);
- if (err)
- goto out;
-
- i915_gem_object_lock(state->obj);
- err = i915_gem_object_set_to_cpu_domain(state->obj, false);
- i915_gem_object_unlock(state->obj);
- if (err)
- goto out;
-
- i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
-
- /* Check we can acquire the image of the context state */
- vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
+ /* Keep a copy of the state's backing pages; free the obj */
+ state = shmem_create_from_object(rq->context->state->obj);
+ if (IS_ERR(state)) {
+ err = PTR_ERR(state);
goto out;
}
-
- rq->engine->default_state = i915_gem_object_get(state->obj);
- i915_gem_object_unpin_map(state->obj);
+ rq->engine->default_state = state;
}
out:
@@ -576,6 +535,8 @@ int intel_gt_init(struct intel_gt *gt)
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ intel_gt_init_clock_frequency(gt);
+
err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
if (err)
goto out_fw;
@@ -635,8 +596,7 @@ void intel_gt_driver_remove(struct intel_gt *gt)
{
__intel_gt_disable(gt);
- intel_uc_fini_hw(&gt->uc);
- intel_uc_fini(&gt->uc);
+ intel_uc_driver_remove(&gt->uc);
intel_engines_release(gt);
}
@@ -663,6 +623,7 @@ void intel_gt_driver_release(struct intel_gt *gt)
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
+ intel_gt_fini_buffer_pool(gt);
}
void intel_gt_driver_late_release(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 397186818305..1495054a4305 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2018 Intel Corporation
*/
@@ -8,15 +7,15 @@
#include "i915_drv.h"
#include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
+#include "intel_gt_buffer_pool.h"
-static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
+static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool)
{
- return container_of(pool, struct intel_engine_cs, pool);
+ return container_of(pool, struct intel_gt, buffer_pool);
}
static struct list_head *
-bucket_for_size(struct intel_engine_pool *pool, size_t sz)
+bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
{
int n;
@@ -32,16 +31,50 @@ bucket_for_size(struct intel_engine_pool *pool, size_t sz)
return &pool->cache_list[n];
}
-static void node_free(struct intel_engine_pool_node *node)
+static void node_free(struct intel_gt_buffer_pool_node *node)
{
i915_gem_object_put(node->obj);
i915_active_fini(&node->active);
kfree(node);
}
+static void pool_free_work(struct work_struct *wrk)
+{
+ struct intel_gt_buffer_pool *pool =
+ container_of(wrk, typeof(*pool), work.work);
+ struct intel_gt_buffer_pool_node *node, *next;
+ unsigned long old = jiffies - HZ;
+ bool active = false;
+ LIST_HEAD(stale);
+ int n;
+
+ /* Free buffers that have not been used in the past second */
+ spin_lock_irq(&pool->lock);
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+ struct list_head *list = &pool->cache_list[n];
+
+ /* Most recent at head; oldest at tail */
+ list_for_each_entry_safe_reverse(node, next, list, link) {
+ if (time_before(node->age, old))
+ break;
+
+ list_move(&node->link, &stale);
+ }
+ active |= !list_empty(list);
+ }
+ spin_unlock_irq(&pool->lock);
+
+ list_for_each_entry_safe(node, next, &stale, link)
+ node_free(node);
+
+ if (active)
+ schedule_delayed_work(&pool->work,
+ round_jiffies_up_relative(HZ));
+}
+
static int pool_active(struct i915_active *ref)
{
- struct intel_engine_pool_node *node =
+ struct intel_gt_buffer_pool_node *node =
container_of(ref, typeof(*node), active);
struct dma_resv *resv = node->obj->base.resv;
int err;
@@ -64,29 +97,31 @@ static int pool_active(struct i915_active *ref)
__i915_active_call
static void pool_retire(struct i915_active *ref)
{
- struct intel_engine_pool_node *node =
+ struct intel_gt_buffer_pool_node *node =
container_of(ref, typeof(*node), active);
- struct intel_engine_pool *pool = node->pool;
+ struct intel_gt_buffer_pool *pool = node->pool;
struct list_head *list = bucket_for_size(pool, node->obj->base.size);
unsigned long flags;
- GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
-
i915_gem_object_unpin_pages(node->obj);
/* Return this object to the shrinker pool */
i915_gem_object_make_purgeable(node->obj);
spin_lock_irqsave(&pool->lock, flags);
+ node->age = jiffies;
list_add(&node->link, list);
spin_unlock_irqrestore(&pool->lock, flags);
+
+ schedule_delayed_work(&pool->work,
+ round_jiffies_up_relative(HZ));
}
-static struct intel_engine_pool_node *
-node_create(struct intel_engine_pool *pool, size_t sz)
+static struct intel_gt_buffer_pool_node *
+node_create(struct intel_gt_buffer_pool *pool, size_t sz)
{
- struct intel_engine_cs *engine = to_engine(pool);
- struct intel_engine_pool_node *node;
+ struct intel_gt *gt = to_gt(pool);
+ struct intel_gt_buffer_pool_node *node;
struct drm_i915_gem_object *obj;
node = kmalloc(sizeof(*node),
@@ -97,7 +132,7 @@ node_create(struct intel_engine_pool *pool, size_t sz)
node->pool = pool;
i915_active_init(&node->active, pool_active, pool_retire);
- obj = i915_gem_object_create_internal(engine->i915, sz);
+ obj = i915_gem_object_create_internal(gt->i915, sz);
if (IS_ERR(obj)) {
i915_active_fini(&node->active);
kfree(node);
@@ -110,26 +145,15 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return node;
}
-static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine)
+struct intel_gt_buffer_pool_node *
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
{
- if (intel_engine_is_virtual(engine))
- engine = intel_virtual_engine_get_sibling(engine, 0);
-
- GEM_BUG_ON(!engine);
- return &engine->pool;
-}
-
-struct intel_engine_pool_node *
-intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
-{
- struct intel_engine_pool *pool = lookup_pool(engine);
- struct intel_engine_pool_node *node;
+ struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
+ struct intel_gt_buffer_pool_node *node;
struct list_head *list;
unsigned long flags;
int ret;
- GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
-
size = PAGE_ALIGN(size);
list = bucket_for_size(pool, size);
@@ -157,34 +181,48 @@ intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
return node;
}
-void intel_engine_pool_init(struct intel_engine_pool *pool)
+void intel_gt_init_buffer_pool(struct intel_gt *gt)
{
+ struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
int n;
spin_lock_init(&pool->lock);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
INIT_LIST_HEAD(&pool->cache_list[n]);
+ INIT_DELAYED_WORK(&pool->work, pool_free_work);
}
-void intel_engine_pool_park(struct intel_engine_pool *pool)
+static void pool_free_imm(struct intel_gt_buffer_pool *pool)
{
int n;
+ spin_lock_irq(&pool->lock);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+ struct intel_gt_buffer_pool_node *node, *next;
struct list_head *list = &pool->cache_list[n];
- struct intel_engine_pool_node *node, *nn;
- list_for_each_entry_safe(node, nn, list, link)
+ list_for_each_entry_safe(node, next, list, link)
node_free(node);
-
INIT_LIST_HEAD(list);
}
+ spin_unlock_irq(&pool->lock);
+}
+
+void intel_gt_flush_buffer_pool(struct intel_gt *gt)
+{
+ struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
+
+ if (cancel_delayed_work_sync(&pool->work))
+ pool_free_imm(pool);
}
-void intel_engine_pool_fini(struct intel_engine_pool *pool)
+void intel_gt_fini_buffer_pool(struct intel_gt *gt)
{
+ struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
int n;
+ intel_gt_flush_buffer_pool(gt);
+
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
new file mode 100644
index 000000000000..42cbac003e8a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_GT_BUFFER_POOL_H
+#define INTEL_GT_BUFFER_POOL_H
+
+#include <linux/types.h>
+
+#include "i915_active.h"
+#include "intel_gt_buffer_pool_types.h"
+
+struct intel_gt;
+struct i915_request;
+
+struct intel_gt_buffer_pool_node *
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size);
+
+static inline int
+intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
+ struct i915_request *rq)
+{
+ return i915_active_add_request(&node->active, rq);
+}
+
+static inline void
+intel_gt_buffer_pool_put(struct intel_gt_buffer_pool_node *node)
+{
+ i915_active_release(&node->active);
+}
+
+void intel_gt_init_buffer_pool(struct intel_gt *gt);
+void intel_gt_flush_buffer_pool(struct intel_gt *gt);
+void intel_gt_fini_buffer_pool(struct intel_gt *gt);
+
+#endif /* INTEL_GT_BUFFER_POOL_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
index e31ee361b76f..e28bdda771ed 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
@@ -4,26 +4,29 @@
* Copyright © 2014-2018 Intel Corporation
*/
-#ifndef INTEL_ENGINE_POOL_TYPES_H
-#define INTEL_ENGINE_POOL_TYPES_H
+#ifndef INTEL_GT_BUFFER_POOL_TYPES_H
+#define INTEL_GT_BUFFER_POOL_TYPES_H
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#include "i915_active_types.h"
struct drm_i915_gem_object;
-struct intel_engine_pool {
+struct intel_gt_buffer_pool {
spinlock_t lock;
struct list_head cache_list[4];
+ struct delayed_work work;
};
-struct intel_engine_pool_node {
+struct intel_gt_buffer_pool_node {
struct i915_active active;
struct drm_i915_gem_object *obj;
struct list_head link;
- struct intel_engine_pool *pool;
+ struct intel_gt_buffer_pool *pool;
+ unsigned long age;
};
-#endif /* INTEL_ENGINE_POOL_TYPES_H */
+#endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
new file mode 100644
index 000000000000..999079686846
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
+
+#define MHZ_12 12000000 /* 12MHz (24MHz/2), 83.333ns */
+#define MHZ_12_5 12500000 /* 12.5MHz (25MHz/2), 80ns */
+#define MHZ_19_2 19200000 /* 19.2MHz, 52.083ns */
+
+static u32 read_clock_frequency(const struct intel_gt *gt)
+{
+ if (INTEL_GEN(gt->i915) >= 11) {
+ u32 config;
+
+ config = intel_uncore_read(gt->uncore, RPM_CONFIG0);
+ config &= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK;
+ config >>= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+ switch (config) {
+ case 0: return MHZ_12;
+ case 1:
+ case 2: return MHZ_19_2;
+ default:
+ case 3: return MHZ_12_5;
+ }
+ } else if (INTEL_GEN(gt->i915) >= 9) {
+ if (IS_GEN9_LP(gt->i915))
+ return MHZ_19_2;
+ else
+ return MHZ_12;
+ } else {
+ return MHZ_12_5;
+ }
+}
+
+void intel_gt_init_clock_frequency(struct intel_gt *gt)
+{
+ /*
+ * Note that on gen11+, the clock frequency may be reconfigured.
+ * We do not, and we assume nobody else does.
+ */
+ gt->clock_frequency = read_clock_frequency(gt);
+ GT_TRACE(gt,
+ "Using clock frequency: %dkHz\n",
+ gt->clock_frequency / 1000);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void intel_gt_check_clock_frequency(const struct intel_gt *gt)
+{
+ if (gt->clock_frequency != read_clock_frequency(gt)) {
+ dev_err(gt->i915->drm.dev,
+ "GT clock frequency changed, was %uHz, now %uHz!\n",
+ gt->clock_frequency,
+ read_clock_frequency(gt));
+ }
+}
+#endif
+
+static u64 div_u64_roundup(u64 nom, u32 den)
+{
+ return div_u64(nom + den - 1, den);
+}
+
+u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count)
+{
+ return div_u64_roundup(mul_u32_u32(count, 1000 * 1000 * 1000),
+ gt->clock_frequency);
+}
+
+u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count)
+{
+ return intel_gt_clock_interval_to_ns(gt, 16 * count);
+}
+
+u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns)
+{
+ return div_u64_roundup(mul_u32_u32(gt->clock_frequency, ns),
+ 1000 * 1000 * 1000);
+}
+
+u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
+{
+ u32 val;
+
+ /*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+ val = DIV_ROUND_UP(intel_gt_ns_to_clock_interval(gt, ns), 16);
+ if (IS_GEN(gt->i915, 6))
+ val = roundup(val, 25);
+
+ return val;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
new file mode 100644
index 000000000000..f793c89f2cbd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_CLOCK_UTILS_H__
+#define __INTEL_GT_CLOCK_UTILS_H__
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+void intel_gt_init_clock_frequency(struct intel_gt *gt);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void intel_gt_check_clock_frequency(const struct intel_gt *gt);
+#else
+static inline void intel_gt_check_clock_frequency(const struct intel_gt *gt) {}
+#endif
+
+u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count);
+u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count);
+
+u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns);
+u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns);
+
+#endif /* __INTEL_GT_CLOCK_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 8b653c0f5e5f..6bdb434a442d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -12,6 +12,7 @@
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_llc.h"
@@ -138,6 +139,8 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ intel_gt_check_clock_frequency(gt);
+
/*
* As we have just resumed the machine and woken the device up from
* deep PCI sleep (presumably D3_cold), assume the HW has been reset
@@ -155,6 +158,10 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
intel_uc_reset_prepare(&gt->uc);
+ for_each_engine(engine, gt, id)
+ if (engine->sanitize)
+ engine->sanitize(engine);
+
if (reset_engines(gt) || force) {
for_each_engine(engine, gt, id)
__intel_engine_reset(engine, false);
@@ -164,6 +171,8 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
if (engine->reset.finish)
engine->reset.finish(engine);
+ intel_rps_sanitize(&gt->rps);
+
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
@@ -191,11 +200,12 @@ int intel_gt_resume(struct intel_gt *gt)
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
+ gt_sanitize(gt, true);
+
intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(&gt->rc6);
- gt_sanitize(gt, true);
if (intel_gt_is_wedged(gt)) {
err = -EIO;
goto out_fw;
@@ -204,7 +214,7 @@ int intel_gt_resume(struct intel_gt *gt)
/* Only when the HW is re-initialised, can we replay the requests */
err = intel_gt_init_hw(gt);
if (err) {
- dev_err(gt->i915->drm.dev,
+ drm_err(&gt->i915->drm,
"Failed to initialize GPU, declaring it wedged!\n");
goto err_wedged;
}
@@ -220,7 +230,7 @@ int intel_gt_resume(struct intel_gt *gt)
intel_engine_pm_put(engine);
if (err) {
- dev_err(gt->i915->drm.dev,
+ drm_err(&gt->i915->drm,
"Failed to restart %s (%d)\n",
engine->name, err);
goto err_wedged;
@@ -324,6 +334,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
{
GT_TRACE(gt, "\n");
intel_gt_init_swizzling(gt);
+ intel_ggtt_restore_fences(gt->ggtt);
return intel_uc_runtime_resume(&gt->uc);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 24c99d0838af..16ff47c83bd5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -26,6 +26,11 @@ static bool retire_requests(struct intel_timeline *tl)
return !i915_active_fence_isset(&tl->last_request);
}
+static bool engine_active(const struct intel_engine_cs *engine)
+{
+ return !list_empty(&engine->kernel_context->timeline->requests);
+}
+
static bool flush_submission(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
@@ -37,8 +42,13 @@ static bool flush_submission(struct intel_gt *gt)
for_each_engine(engine, gt, id) {
intel_engine_flush_submission(engine);
- active |= flush_work(&engine->retire_work);
- active |= flush_work(&engine->wakeref.work);
+
+ /* Flush the background retirement and idle barriers */
+ flush_work(&engine->retire_work);
+ flush_delayed_work(&engine->wakeref.work);
+
+ /* Is the idle barrier still outstanding? */
+ active |= engine_active(engine);
}
return active;
@@ -162,7 +172,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
}
}
- if (!retire_requests(tl) || flush_submission(gt))
+ if (!retire_requests(tl))
active_count++;
mutex_unlock(&tl->mutex);
@@ -173,7 +183,6 @@ out_active: spin_lock(&timelines->lock);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
-
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
GEM_BUG_ON(atomic_read(&tl->active_count));
@@ -185,6 +194,9 @@ out_active: spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
+ if (flush_submission(gt)) /* Wait, there's more! */
+ active_count++;
+
return active_count ? timeout : 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 96890dd12b5f..0cc1d6b185dc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -17,6 +17,7 @@
#include "i915_vma.h"
#include "intel_engine_types.h"
+#include "intel_gt_buffer_pool_types.h"
#include "intel_llc_types.h"
#include "intel_reset_types.h"
#include "intel_rc6_types.h"
@@ -61,6 +62,7 @@ struct intel_gt {
struct list_head closed_vma;
spinlock_t closed_lock; /* guards the list of closed_vma */
+ ktime_t last_init_time;
struct intel_reset reset;
/**
@@ -72,14 +74,12 @@ struct intel_gt {
*/
intel_wakeref_t awake;
+ u32 clock_frequency;
+
struct intel_llc llc;
struct intel_rc6 rc6;
struct intel_rps rps;
- ktime_t last_init_time;
-
- struct i915_vma *scratch;
-
spinlock_t irq_lock;
u32 gt_imr;
u32 pm_ier;
@@ -97,6 +97,18 @@ struct intel_gt {
* Reserved for exclusive use by the kernel.
*/
struct i915_address_space *vm;
+
+ /*
+ * A pool of objects to use as shadow copies of client batch buffers
+ * when the command parser is enabled. Prevents the client from
+ * modifying the batch contents after software parsing.
+ *
+ * Buffers older than 1s are periodically reaped from the pool,
+ * or may be reclaimed by the shrinker before then.
+ */
+ struct intel_gt_buffer_pool buffer_pool;
+
+ struct i915_vma *scratch;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index b3116fe8d180..d93ebdf3fa0e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -26,7 +26,6 @@
#include <drm/drm_mm.h>
#include "gt/intel_reset.h"
-#include "i915_gem_fence_reg.h"
#include "i915_selftest.h"
#include "i915_vma_types.h"
@@ -135,6 +134,8 @@ typedef u64 gen8_pte_t;
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
+struct i915_fence_reg;
+
#define for_each_sgt_daddr(__dp, __iter, __sgt) \
__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
@@ -333,7 +334,7 @@ struct i915_ggtt {
u32 pin_bias;
unsigned int num_fences;
- struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
+ struct i915_fence_reg *fence_regs;
struct list_head fence_list;
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 2dfaddb8811e..87e6c5bdd2dc 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -147,6 +147,7 @@
#include "intel_reset.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
+#include "shmem_utils.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
#define RING_EXECLIST1_VALID (1 << 0x3)
@@ -216,7 +217,7 @@ struct virtual_engine {
/* And finally, which physical engines this virtual engine maps onto. */
unsigned int num_siblings;
- struct intel_engine_cs *siblings[0];
+ struct intel_engine_cs *siblings[];
};
static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
@@ -238,6 +239,123 @@ __execlists_update_reg_state(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 head);
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x60;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
+}
+
+static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x74;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x68;
+ else if (engine->class == RENDER_CLASS)
+ return 0xd8;
+ else
+ return -1;
+}
+
+static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x12;
+ else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
+ return 0x18;
+ else
+ return -1;
+}
+
+static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
+{
+ int x;
+
+ x = lrc_ring_wa_bb_per_ctx(engine);
+ if (x < 0)
+ return x;
+
+ return x + 2;
+}
+
+static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
+{
+ int x;
+
+ x = lrc_ring_indirect_ptr(engine);
+ if (x < 0)
+ return x;
+
+ return x + 2;
+}
+
+static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
+{
+ if (engine->class != RENDER_CLASS)
+ return -1;
+
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0xb6;
+ else if (INTEL_GEN(engine->i915) >= 11)
+ return 0xaa;
+ else
+ return -1;
+}
+
+static u32
+lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
+{
+ switch (INTEL_GEN(engine->i915)) {
+ default:
+ MISSING_CASE(INTEL_GEN(engine->i915));
+ fallthrough;
+ case 12:
+ return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 11:
+ return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 10:
+ return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 9:
+ return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 8:
+ return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ }
+}
+
+static void
+lrc_ring_setup_indirect_ctx(u32 *regs,
+ const struct intel_engine_cs *engine,
+ u32 ctx_bb_ggtt_addr,
+ u32 size)
+{
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
+ GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
+ regs[lrc_ring_indirect_ptr(engine) + 1] =
+ ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
+
+ GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
+ regs[lrc_ring_indirect_offset(engine) + 1] =
+ lrc_ring_indirect_offset_default(engine) << 6;
+}
+
+static u32 intel_context_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
static void mark_eio(struct i915_request *rq)
{
if (i915_request_completed(rq))
@@ -311,18 +429,7 @@ static int effective_prio(const struct i915_request *rq)
if (i915_request_has_nopreempt(rq))
prio = I915_PRIORITY_UNPREEMPTABLE;
- /*
- * On unwinding the active request, we give it a priority bump
- * if it has completed waiting on any semaphore. If we know that
- * the request has already started, we can prevent an unwanted
- * preempt-to-idle cycle by taking that into account now.
- */
- if (__i915_request_has_started(rq))
- prio |= I915_PRIORITY_NOSEMAPHORE;
-
- /* Restrict mere WAIT boosts from triggering preemption */
- BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
- return prio | __NO_PREEMPTION;
+ return prio;
}
static int queue_prio(const struct intel_engine_execlists *execlists)
@@ -489,7 +596,7 @@ static void set_offsets(u32 *regs,
#define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f)
-#define END(x) 0, (x)
+#define END(total_state_size) 0, (total_state_size)
{
const u32 base = engine->mmio_base;
@@ -512,7 +619,7 @@ static void set_offsets(u32 *regs,
if (flags & POSTED)
*regs |= MI_LRI_FORCE_POSTED;
if (INTEL_GEN(engine->i915) >= 11)
- *regs |= MI_LRI_CS_MMIO;
+ *regs |= MI_LRI_LRM_CS_MMIO;
regs++;
GEM_BUG_ON(!count);
@@ -897,8 +1004,63 @@ static const u8 gen12_rcs_offsets[] = {
NOP(6),
LRI(1, 0),
REG(0x0c8),
+ NOP(3 + 9 + 1),
+
+ LRI(51, POSTED),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG(0x028),
+ REG(0x09c),
+ REG(0x0c0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x068),
+ REG(0x084),
+ NOP(1),
- END(80)
+ END(192)
};
#undef END
@@ -1026,17 +1188,14 @@ static void intel_engine_context_in(struct intel_engine_cs *engine)
{
unsigned long flags;
- if (READ_ONCE(engine->stats.enabled) == 0)
+ if (atomic_add_unless(&engine->stats.active, 1, 0))
return;
write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (engine->stats.enabled > 0) {
- if (engine->stats.active++ == 0)
- engine->stats.start = ktime_get();
- GEM_BUG_ON(engine->stats.active == 0);
+ if (!atomic_add_unless(&engine->stats.active, 1, 0)) {
+ engine->stats.start = ktime_get();
+ atomic_inc(&engine->stats.active);
}
-
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
@@ -1044,51 +1203,20 @@ static void intel_engine_context_out(struct intel_engine_cs *engine)
{
unsigned long flags;
- if (READ_ONCE(engine->stats.enabled) == 0)
+ GEM_BUG_ON(!atomic_read(&engine->stats.active));
+
+ if (atomic_add_unless(&engine->stats.active, -1, 1))
return;
write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (engine->stats.enabled > 0) {
- ktime_t last;
-
- if (engine->stats.active && --engine->stats.active == 0) {
- /*
- * Decrement the active context count and in case GPU
- * is now idle add up to the running total.
- */
- last = ktime_sub(ktime_get(), engine->stats.start);
-
- engine->stats.total = ktime_add(engine->stats.total,
- last);
- } else if (engine->stats.active == 0) {
- /*
- * After turning on engine stats, context out might be
- * the first event in which case we account from the
- * time stats gathering was turned on.
- */
- last = ktime_sub(ktime_get(), engine->stats.enabled_at);
-
- engine->stats.total = ktime_add(engine->stats.total,
- last);
- }
+ if (atomic_dec_and_test(&engine->stats.active)) {
+ engine->stats.total =
+ ktime_add(engine->stats.total,
+ ktime_sub(ktime_get(), engine->stats.start));
}
-
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
-static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x60;
- else if (INTEL_GEN(engine->i915) >= 9)
- return 0x54;
- else if (engine->class == RENDER_CLASS)
- return 0x58;
- else
- return -1;
-}
-
static void
execlists_check_context(const struct intel_context *ce,
const struct intel_engine_cs *engine)
@@ -1132,14 +1260,12 @@ execlists_check_context(const struct intel_context *ce,
static void restore_default_state(struct intel_context *ce,
struct intel_engine_cs *engine)
{
- u32 *regs = ce->lrc_reg_state;
+ u32 *regs;
- if (engine->pinned_default_state)
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
+ regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE);
+ execlists_init_reg_state(regs, ce, engine, ce->ring, true);
- execlists_init_reg_state(regs, ce, engine, ce->ring, false);
+ ce->runtime.last = intel_context_get_runtime(ce);
}
static void reset_active(struct i915_request *rq,
@@ -1181,17 +1307,6 @@ static void reset_active(struct i915_request *rq,
ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
}
-static u32 intel_context_get_runtime(const struct intel_context *ce)
-{
- /*
- * We can use either ppHWSP[16] which is recorded before the context
- * switch (and so excludes the cost of context switches) or use the
- * value from the context image itself, which is saved/restored earlier
- * and so includes the cost of the save.
- */
- return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
-}
-
static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
{
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
@@ -1243,7 +1358,7 @@ __execlists_schedule_in(struct i915_request *rq)
ce->lrc.ccid = ce->tag;
} else {
/* We don't need a strict matching tag, just different values */
- unsigned int tag = ffs(engine->context_tag);
+ unsigned int tag = ffs(READ_ONCE(engine->context_tag));
GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
clear_bit(tag - 1, &engine->context_tag);
@@ -1417,6 +1532,24 @@ static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc
}
}
+static __maybe_unused char *
+dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+{
+ if (!rq)
+ return "";
+
+ snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
+ prefix,
+ rq->context->lrc.ccid,
+ rq->fence.context, rq->fence.seqno,
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ rq_prio(rq));
+
+ return buf;
+}
+
static __maybe_unused void
trace_ports(const struct intel_engine_execlists *execlists,
const char *msg,
@@ -1424,18 +1557,14 @@ trace_ports(const struct intel_engine_execlists *execlists,
{
const struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
+ char __maybe_unused p0[40], p1[40];
if (!ports[0])
return;
- ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg,
- ports[0]->fence.context,
- ports[0]->fence.seqno,
- i915_request_completed(ports[0]) ? "!" :
- i915_request_started(ports[0]) ? "*" :
- "",
- ports[1] ? ports[1]->fence.context : 0,
- ports[1] ? ports[1]->fence.seqno : 0);
+ ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
+ dump_port(p0, sizeof(p0), "", ports[0]),
+ dump_port(p1, sizeof(p1), ", ", ports[1]));
}
static inline bool
@@ -1448,9 +1577,12 @@ static __maybe_unused bool
assert_pending_valid(const struct intel_engine_execlists *execlists,
const char *msg)
{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
struct i915_request * const *port, *rq;
struct intel_context *ce = NULL;
bool sentinel = false;
+ u32 ccid = -1;
trace_ports(execlists, msg, execlists->pending);
@@ -1459,13 +1591,14 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
return true;
if (!execlists->pending[0]) {
- GEM_TRACE_ERR("Nothing pending for promotion!\n");
+ GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
+ engine->name);
return false;
}
if (execlists->pending[execlists_num_ports(execlists)]) {
- GEM_TRACE_ERR("Excess pending[%d] for promotion!\n",
- execlists_num_ports(execlists));
+ GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
+ engine->name, execlists_num_ports(execlists));
return false;
}
@@ -1477,20 +1610,31 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
GEM_BUG_ON(!i915_request_is_active(rq));
if (ce == rq->context) {
- GEM_TRACE_ERR("Dup context:%llx in pending[%zd]\n",
+ GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
+ engine->name,
ce->timeline->fence_context,
port - execlists->pending);
return false;
}
ce = rq->context;
+ if (ccid == ce->lrc.ccid) {
+ GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
+ engine->name,
+ ccid, ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ccid = ce->lrc.ccid;
+
/*
* Sentinels are supposed to be lonely so they flush the
* current exection off the HW. Check that they are the
* only request in the pending submission.
*/
if (sentinel) {
- GEM_TRACE_ERR("context:%llx after sentinel in pending[%zd]\n",
+ GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
+ engine->name,
ce->timeline->fence_context,
port - execlists->pending);
return false;
@@ -1498,7 +1642,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
sentinel = i915_request_has_sentinel(rq);
if (sentinel && port != execlists->pending) {
- GEM_TRACE_ERR("sentinel context:%llx not in prime position[%zd]\n",
+ GEM_TRACE_ERR("%s: sentinel context:%llx not in prime position[%zd]\n",
+ engine->name,
ce->timeline->fence_context,
port - execlists->pending);
return false;
@@ -1513,7 +1658,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
if (i915_active_is_idle(&ce->active) &&
!intel_context_is_barrier(ce)) {
- GEM_TRACE_ERR("Inactive context:%llx in pending[%zd]\n",
+ GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
+ engine->name,
ce->timeline->fence_context,
port - execlists->pending);
ok = false;
@@ -1521,7 +1667,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
}
if (!i915_vma_is_pinned(ce->state)) {
- GEM_TRACE_ERR("Unpinned context:%llx in pending[%zd]\n",
+ GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
+ engine->name,
ce->timeline->fence_context,
port - execlists->pending);
ok = false;
@@ -1529,7 +1676,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
}
if (!i915_vma_is_pinned(ce->ring->vma)) {
- GEM_TRACE_ERR("Unpinned ring:%llx in pending[%zd]\n",
+ GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
+ engine->name,
ce->timeline->fence_context,
port - execlists->pending);
ok = false;
@@ -1664,30 +1812,16 @@ static bool virtual_matches(const struct virtual_engine *ve,
return true;
}
-static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
- struct i915_request *rq)
+static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
{
- struct intel_engine_cs *old = ve->siblings[0];
-
- /* All unattached (rq->engine == old) must already be completed */
-
- spin_lock(&old->breadcrumbs.irq_lock);
- if (!list_empty(&ve->context.signal_link)) {
- list_del_init(&ve->context.signal_link);
-
- /*
- * We cannot acquire the new engine->breadcrumbs.irq_lock
- * (as we are holding a breadcrumbs.irq_lock already),
- * so attach this request to the signaler on submission.
- * The queued irq_work will occur when we finally drop
- * the engine->active.lock after dequeue.
- */
- set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
-
- /* Also transfer the pending irq_work for the old breadcrumb. */
- intel_engine_signal_breadcrumbs(rq->engine);
- }
- spin_unlock(&old->breadcrumbs.irq_lock);
+ /*
+ * All the outstanding signals on ve->siblings[0] must have
+ * been completed, just pending the interrupt handler. As those
+ * signals still refer to the old sibling (via rq->engine), we must
+ * transfer those to the old irq_worker to keep our locking
+ * consistent.
+ */
+ intel_engine_transfer_stale_breadcrumbs(ve->siblings[0], &ve->context);
}
#define for_each_waiter(p__, rq__) \
@@ -1729,7 +1863,8 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
continue;
/* No waiter should start before its signaler */
- GEM_BUG_ON(i915_request_started(w) &&
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
+ i915_request_started(w) &&
!i915_request_completed(rq));
GEM_BUG_ON(i915_request_is_active(w));
@@ -1831,16 +1966,25 @@ static unsigned long active_timeslice(const struct intel_engine_cs *engine)
static void set_timeslice(struct intel_engine_cs *engine)
{
+ unsigned long duration;
+
if (!intel_engine_has_timeslices(engine))
return;
- set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
+ duration = active_timeslice(engine);
+ ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
+
+ set_timer_ms(&engine->execlists.timer, duration);
}
static void start_timeslice(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
- int prio = queue_prio(execlists);
+ const int prio = queue_prio(execlists);
+ unsigned long duration;
+
+ if (!intel_engine_has_timeslices(engine))
+ return;
WRITE_ONCE(execlists->switch_priority_hint, prio);
if (prio == INT_MIN)
@@ -1849,7 +1993,12 @@ static void start_timeslice(struct intel_engine_cs *engine)
if (timer_pending(&execlists->timer))
return;
- set_timer_ms(&execlists->timer, timeslice(engine));
+ duration = timeslice(engine);
+ ENGINE_TRACE(engine,
+ "start timeslicing, prio:%d, interval:%lu",
+ prio, duration);
+
+ set_timer_ms(&execlists->timer, duration);
}
static void record_preemption(struct intel_engine_execlists *execlists)
@@ -1946,11 +2095,26 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* of trouble.
*/
active = READ_ONCE(execlists->active);
- while ((last = *active) && i915_request_completed(last))
- active++;
- if (last) {
+ /*
+ * In theory we can skip over completed contexts that have not
+ * yet been processed by events (as those events are in flight):
+ *
+ * while ((last = *active) && i915_request_completed(last))
+ * active++;
+ *
+ * However, the GPU cannot handle this as it will ultimately
+ * find itself trying to jump back into a context it has just
+ * completed and barf.
+ */
+
+ if ((last = *active)) {
if (need_preempt(engine, last, rb)) {
+ if (i915_request_completed(last)) {
+ tasklet_hi_schedule(&execlists->tasklet);
+ return;
+ }
+
ENGINE_TRACE(engine,
"preempting last=%llx:%lld, prio=%d, hint=%d\n",
last->fence.context,
@@ -1978,6 +2142,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last = NULL;
} else if (need_timeslice(engine, last) &&
timeslice_expired(execlists, last)) {
+ if (i915_request_completed(last)) {
+ tasklet_hi_schedule(&execlists->tasklet);
+ return;
+ }
+
ENGINE_TRACE(engine,
"expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
last->fence.context,
@@ -2087,7 +2256,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
engine);
if (!list_empty(&ve->context.signals))
- virtual_xfer_breadcrumbs(ve, rq);
+ virtual_xfer_breadcrumbs(ve);
/*
* Move the bound engine to the top of the list
@@ -2246,8 +2415,8 @@ done:
clear_ports(port + 1, last_port - port);
WRITE_ONCE(execlists->yield, -1);
- execlists_submit_ports(engine);
set_preempt_timeout(engine, *active);
+ execlists_submit_ports(engine);
} else {
skip_submit:
ring_set_paused(engine, 0);
@@ -2417,8 +2586,6 @@ static void process_csb(struct intel_engine_cs *engine)
if (promote) {
struct i915_request * const *old = execlists->active;
- GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
-
ring_set_paused(engine, 0);
/* Point active to the new ELSP; prevent overwriting */
@@ -2431,6 +2598,7 @@ static void process_csb(struct intel_engine_cs *engine)
execlists_schedule_out(*old++);
/* switch pending to inflight */
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
memcpy(execlists->inflight,
execlists->pending,
execlists_num_ports(execlists) *
@@ -2449,17 +2617,21 @@ static void process_csb(struct intel_engine_cs *engine)
* We rely on the hardware being strongly
* ordered, that the breadcrumb write is
* coherent (visible from the CPU) before the
- * user interrupt and CSB is processed.
+ * user interrupt is processed. One might assume
+ * that the breadcrumb write being before the
+ * user interrupt and the CS event for the context
+ * switch would therefore be before the CS event
+ * itself...
*/
if (GEM_SHOW_DEBUG() &&
- !i915_request_completed(*execlists->active) &&
- !reset_in_progress(execlists)) {
- struct i915_request *rq __maybe_unused =
- *execlists->active;
+ !i915_request_completed(*execlists->active)) {
+ struct i915_request *rq = *execlists->active;
const u32 *regs __maybe_unused =
rq->context->lrc_reg_state;
ENGINE_TRACE(engine,
+ "context completed before request!\n");
+ ENGINE_TRACE(engine,
"ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
ENGINE_READ(engine, RING_START),
ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
@@ -2478,8 +2650,6 @@ static void process_csb(struct intel_engine_cs *engine)
regs[CTX_RING_START],
regs[CTX_RING_HEAD],
regs[CTX_RING_TAIL]);
-
- GEM_BUG_ON("context completed before request");
}
execlists_schedule_out(*execlists->active++);
@@ -2769,6 +2939,45 @@ err_cap:
return NULL;
}
+static struct i915_request *
+active_context(struct intel_engine_cs *engine, u32 ccid)
+{
+ const struct intel_engine_execlists * const el = &engine->execlists;
+ struct i915_request * const *port, *rq;
+
+ /*
+ * Use the most recent result from process_csb(), but just in case
+ * we trigger an error (via interrupt) before the first CS event has
+ * been written, peek at the next submission.
+ */
+
+ for (port = el->active; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid found at active:%zd\n",
+ port - el->active);
+ return rq;
+ }
+ }
+
+ for (port = el->pending; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid found at pending:%zd\n",
+ port - el->pending);
+ return rq;
+ }
+ }
+
+ ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
+ return NULL;
+}
+
+static u32 active_ccid(struct intel_engine_cs *engine)
+{
+ return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
+}
+
static bool execlists_capture(struct intel_engine_cs *engine)
{
struct execlists_capture *cap;
@@ -2786,7 +2995,7 @@ static bool execlists_capture(struct intel_engine_cs *engine)
return true;
spin_lock_irq(&engine->active.lock);
- cap->rq = execlists_active(&engine->execlists);
+ cap->rq = active_context(engine, active_ccid(engine));
if (cap->rq) {
cap->rq = active_request(cap->rq->context->timeline, cap->rq);
cap->rq = i915_request_get_rcu(cap->rq);
@@ -2934,10 +3143,14 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
if (reset_in_progress(execlists))
return; /* defer until we restart the engine following reset */
- if (execlists->tasklet.func == execlists_submission_tasklet)
- __execlists_submission_tasklet(engine);
- else
- tasklet_hi_schedule(&execlists->tasklet);
+ /* Hopefully we clear execlists->pending[] to let us through */
+ if (READ_ONCE(execlists->pending[0]) &&
+ tasklet_trylock(&execlists->tasklet)) {
+ process_csb(engine);
+ tasklet_unlock(&execlists->tasklet);
+ }
+
+ __execlists_submission_tasklet(engine);
}
static void submit_queue(struct intel_engine_cs *engine,
@@ -3023,19 +3236,139 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
vaddr += engine->context_size;
if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
- dev_err_once(engine->i915->drm.dev,
+ drm_err_once(&engine->i915->drm,
"%s context redzone overwritten!\n",
engine->name);
}
static void execlists_context_unpin(struct intel_context *ce)
{
- check_redzone((void *)ce->lrc_reg_state - LRC_STATE_PN * PAGE_SIZE,
+ check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
ce->engine);
i915_gem_object_unpin_map(ce->state->obj);
}
+static u32 *
+gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+ CTX_TIMESTAMP * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_REG |
+ MI_LRR_SOURCE_CS_MMIO |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
+
+ *cs++ = MI_LOAD_REGISTER_REG |
+ MI_LRR_SOURCE_CS_MMIO |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
+
+ return cs;
+}
+
+static u32 *
+gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs)
+{
+ GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1);
+
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+ (lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32);
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *
+gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs)
+{
+ GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1);
+
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+ (lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_REG |
+ MI_LRR_SOURCE_CS_MMIO |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0));
+
+ return cs;
+}
+
+static u32 *
+gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
+{
+ cs = gen12_emit_timestamp_wa(ce, cs);
+ cs = gen12_emit_cmd_buf_wa(ce, cs);
+ cs = gen12_emit_restore_scratch(ce, cs);
+
+ return cs;
+}
+
+static u32 *
+gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
+{
+ cs = gen12_emit_timestamp_wa(ce, cs);
+ cs = gen12_emit_restore_scratch(ce, cs);
+
+ return cs;
+}
+
+static inline u32 context_wa_bb_offset(const struct intel_context *ce)
+{
+ return PAGE_SIZE * ce->wa_bb_page;
+}
+
+static u32 *context_indirect_bb(const struct intel_context *ce)
+{
+ void *ptr;
+
+ GEM_BUG_ON(!ce->wa_bb_page);
+
+ ptr = ce->lrc_reg_state;
+ ptr -= LRC_STATE_OFFSET; /* back to start of context image */
+ ptr += context_wa_bb_offset(ce);
+
+ return ptr;
+}
+
+static void
+setup_indirect_ctx_bb(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 *(*emit)(const struct intel_context *, u32 *))
+{
+ u32 * const start = context_indirect_bb(ce);
+ u32 *cs;
+
+ cs = emit(ce, start);
+ GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
+ while ((unsigned long)cs % CACHELINE_BYTES)
+ *cs++ = MI_NOOP;
+
+ lrc_ring_setup_indirect_ctx(ce->lrc_reg_state, engine,
+ i915_ggtt_offset(ce->state) +
+ context_wa_bb_offset(ce),
+ (cs - start) * sizeof(*cs));
+}
+
static void
__execlists_update_reg_state(const struct intel_context *ce,
const struct intel_engine_cs *engine,
@@ -3059,6 +3392,18 @@ __execlists_update_reg_state(const struct intel_context *ce,
i915_oa_init_reg_state(ce, engine);
}
+
+ if (ce->wa_bb_page) {
+ u32 *(*fn)(const struct intel_context *ce, u32 *cs);
+
+ fn = gen12_emit_indirect_ctx_xcs;
+ if (ce->engine->class == RENDER_CLASS)
+ fn = gen12_emit_indirect_ctx_rcs;
+
+ /* Mutually exclusive wrt to global indirect bb */
+ GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
+ setup_indirect_ctx_bb(ce, engine, fn);
+ }
}
static int
@@ -3077,7 +3422,7 @@ __execlists_context_pin(struct intel_context *ce,
return PTR_ERR(vaddr);
ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
- ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
__execlists_update_reg_state(ce, engine, ce->ring->tail);
return 0;
@@ -3125,6 +3470,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
if (!i915_request_timeline(rq)->has_initial_breadcrumb)
return 0;
@@ -3151,6 +3497,56 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
/* Record the updated position of the request's payload */
rq->infix = intel_ring_offset(rq, cs);
+ __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+
+ return 0;
+}
+
+static int emit_pdps(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
+ int err, i;
+ u32 *cs;
+
+ GEM_BUG_ON(intel_vgpu_active(rq->i915));
+
+ /*
+ * Beware ye of the dragons, this sequence is magic!
+ *
+ * Small changes to this sequence can cause anything from
+ * GPU hangs to forcewake errors and machine lockups!
+ */
+
+ /* Flush any residual operations from the context load */
+ err = engine->emit_flush(rq, EMIT_FLUSH);
+ if (err)
+ return err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Ensure the LRI have landed before we invalidate & continue */
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+ u32 base = engine->mmio_base;
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
return 0;
}
@@ -3175,6 +3571,12 @@ static int execlists_request_alloc(struct i915_request *request)
* to cancel/unwind this request now.
*/
+ if (!i915_vm_is_4lvl(request->context->vm)) {
+ ret = emit_pdps(request);
+ if (ret)
+ return ret;
+ }
+
/* Unconditionally invalidate GPU caches and TLBs. */
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
@@ -3475,7 +3877,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
ret = lrc_setup_wa_ctx(engine);
if (ret) {
- DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
+ drm_dbg(&engine->i915->drm,
+ "Failed to setup context WA page: %d\n", ret);
return ret;
}
@@ -3508,6 +3911,72 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
+static void reset_csb_pointers(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ const unsigned int reset_value = execlists->csb_size - 1;
+
+ ring_set_paused(engine, 0);
+
+ /*
+ * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+ * Bludgeon them with a mmio update to be sure.
+ */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ /*
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
+ */
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
+ wmb(); /* Make sure this is visible to HW (paranoia?) */
+
+ invalidate_csb_entries(&execlists->csb_status[0],
+ &execlists->csb_status[reset_value]);
+
+ /* Once more for luck and our trusty paranoia */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
+}
+
+static void execlists_sanitize(struct intel_engine_cs *engine)
+{
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ reset_csb_pointers(engine);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ intel_timeline_reset_seqno(engine->kernel_context->timeline);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
static void enable_error_interrupt(struct intel_engine_cs *engine)
{
u32 status;
@@ -3518,7 +3987,7 @@ static void enable_error_interrupt(struct intel_engine_cs *engine)
status = ENGINE_READ(engine, RING_ESR);
if (unlikely(status)) {
- dev_err(engine->i915->drm.dev,
+ drm_err(&engine->i915->drm,
"engine '%s' resumed still in error: %08x\n",
engine->name, status);
__intel_gt_reset(engine->gt, engine->mask);
@@ -3582,7 +4051,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
bool unexpected = false;
if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
- DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
+ drm_dbg(&engine->i915->drm,
+ "STOP_RING still set in RING_MI_MODE\n");
unexpected = true;
}
@@ -3642,39 +4112,10 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
*
* FIXME: Wa for more modern gens needs to be validated
*/
+ ring_set_paused(engine, 1);
intel_engine_stop_cs(engine);
-}
-
-static void reset_csb_pointers(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- const unsigned int reset_value = execlists->csb_size - 1;
-
- ring_set_paused(engine, 0);
-
- /*
- * After a reset, the HW starts writing into CSB entry [0]. We
- * therefore have to set our HEAD pointer back one entry so that
- * the *first* entry we check is entry 0. To complicate this further,
- * as we don't wait for the first interrupt after reset, we have to
- * fake the HW write to point back to the last entry so that our
- * inline comparison of our cached head position against the last HW
- * write works even before the first interrupt.
- */
- execlists->csb_head = reset_value;
- WRITE_ONCE(*execlists->csb_write, reset_value);
- wmb(); /* Make sure this is visible to HW (paranoia?) */
- /*
- * Sometimes Icelake forgets to reset its pointers on a GPU reset.
- * Bludgeon them with a mmio update to be sure.
- */
- ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
- reset_value << 8 | reset_value);
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[reset_value]);
+ engine->execlists.reset_ccid = active_ccid(engine);
}
static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
@@ -3717,7 +4158,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* its request, it was still running at the time of the
* reset and will have been clobbered.
*/
- rq = execlists_active(execlists);
+ rq = active_context(engine, engine->execlists.reset_ccid);
if (!rq)
goto unwind;
@@ -3767,8 +4208,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* image back to the expected values to skip over the guilty request.
*/
__i915_request_reset(rq, stalled);
- if (!stalled)
- goto out_replay;
/*
* We want a simple context + ring to execute the breadcrumb update.
@@ -3778,9 +4217,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- GEM_BUG_ON(!intel_context_is_pinned(ce));
- restore_default_state(ce, engine);
-
out_replay:
ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
head, ce->ring->tail);
@@ -4146,6 +4582,42 @@ static u32 preparser_disable(bool state)
return MI_ARB_CHECK | 1 << 8 | state;
}
+static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
+{
+ static const i915_reg_t vd[] = {
+ GEN12_VD0_AUX_NV,
+ GEN12_VD1_AUX_NV,
+ GEN12_VD2_AUX_NV,
+ GEN12_VD3_AUX_NV,
+ };
+
+ static const i915_reg_t ve[] = {
+ GEN12_VE0_AUX_NV,
+ GEN12_VE1_AUX_NV,
+ };
+
+ if (engine->class == VIDEO_DECODE_CLASS)
+ return vd[engine->instance];
+
+ if (engine->class == VIDEO_ENHANCEMENT_CLASS)
+ return ve[engine->instance];
+
+ GEM_BUG_ON("unknown aux_inv_reg\n");
+
+ return INVALID_MMIO_REG;
+}
+
+static u32 *
+gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(inv_reg);
+ *cs++ = AUX_INV;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
static int gen12_emit_flush_render(struct i915_request *request,
u32 mode)
{
@@ -4154,13 +4626,13 @@ static int gen12_emit_flush_render(struct i915_request *request,
u32 *cs;
flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_FLUSH_L3;
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/* Wa_1409600907:tgl */
flags |= PIPE_CONTROL_DEPTH_STALL;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
flags |= PIPE_CONTROL_QW_WRITE;
@@ -4171,7 +4643,9 @@ static int gen12_emit_flush_render(struct i915_request *request,
if (IS_ERR(cs))
return PTR_ERR(cs);
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ cs = gen12_emit_pipe_control(cs,
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ flags, LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(request, cs);
}
@@ -4186,14 +4660,13 @@ static int gen12_emit_flush_render(struct i915_request *request,
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_L3_RO_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_CS_STALL;
- cs = intel_ring_begin(request, 8);
+ cs = intel_ring_begin(request, 8 + 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -4206,6 +4679,9 @@ static int gen12_emit_flush_render(struct i915_request *request,
cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ /* hsdes: 1809175790 */
+ cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
+
*cs++ = preparser_disable(false);
intel_ring_advance(request, cs);
}
@@ -4213,6 +4689,56 @@ static int gen12_emit_flush_render(struct i915_request *request,
return 0;
}
+static int gen12_emit_flush(struct i915_request *request, u32 mode)
+{
+ intel_engine_mask_t aux_inv = 0;
+ u32 cmd, *cs;
+
+ if (mode & EMIT_INVALIDATE)
+ aux_inv = request->engine->mask & ~BIT(BCS0);
+
+ cs = intel_ring_begin(request,
+ 4 + (aux_inv ? 2 * hweight8(aux_inv) + 2 : 0));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_FLUSH_DW + 1;
+
+ /* We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (request->engine->class == VIDEO_DECODE_CLASS)
+ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ *cs++ = cmd;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+
+ if (aux_inv) { /* hsdes: 1809175790 */
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
+ for_each_engine_masked(engine, request->engine->gt,
+ aux_inv, tmp) {
+ *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
+ *cs++ = AUX_INV;
+ }
+ *cs++ = MI_NOOP;
+ }
+ intel_ring_advance(request, cs);
+
+ return 0;
+}
+
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
@@ -4242,8 +4768,7 @@ static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
}
static __always_inline u32*
-gen8_emit_fini_breadcrumb_footer(struct i915_request *request,
- u32 *cs)
+gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
{
*cs++ = MI_USER_INTERRUPT;
@@ -4257,14 +4782,16 @@ gen8_emit_fini_breadcrumb_footer(struct i915_request *request,
return gen8_emit_wa_tail(request, cs);
}
-static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs)
{
- cs = gen8_emit_ggtt_write(cs,
- request->fence.seqno,
- i915_request_active_timeline(request)->hwsp_offset,
- 0);
+ u32 addr = i915_request_active_timeline(request)->hwsp_offset;
+
+ return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0);
+}
- return gen8_emit_fini_breadcrumb_footer(request, cs);
+static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
}
static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
@@ -4282,7 +4809,7 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL);
- return gen8_emit_fini_breadcrumb_footer(request, cs);
+ return gen8_emit_fini_breadcrumb_tail(request, cs);
}
static u32 *
@@ -4298,7 +4825,7 @@ gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE);
- return gen8_emit_fini_breadcrumb_footer(request, cs);
+ return gen8_emit_fini_breadcrumb_tail(request, cs);
}
/*
@@ -4336,7 +4863,7 @@ static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs)
}
static __always_inline u32*
-gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs)
+gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
{
*cs++ = MI_USER_INTERRUPT;
@@ -4350,33 +4877,29 @@ gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs)
return gen8_emit_wa_tail(request, cs);
}
-static u32 *gen12_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
{
- cs = gen8_emit_ggtt_write(cs,
- request->fence.seqno,
- i915_request_active_timeline(request)->hwsp_offset,
- 0);
-
- return gen12_emit_fini_breadcrumb_footer(request, cs);
+ return gen12_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
}
static u32 *
gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- i915_request_active_timeline(request)->hwsp_offset,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_TILE_CACHE_FLUSH |
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- /* Wa_1409600907:tgl */
- PIPE_CONTROL_DEPTH_STALL |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_HDC_PIPELINE_FLUSH);
+ cs = gen12_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ /* Wa_1409600907:tgl */
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
- return gen12_emit_fini_breadcrumb_footer(request, cs);
+ return gen12_emit_fini_breadcrumb_tail(request, cs);
}
static void execlists_park(struct intel_engine_cs *engine)
@@ -4428,6 +4951,8 @@ static void execlists_shutdown(struct intel_engine_cs *engine)
static void execlists_release(struct intel_engine_cs *engine)
{
+ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
execlists_shutdown(engine);
intel_engine_cleanup_common(engine);
@@ -4447,9 +4972,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_flush = gen8_emit_flush;
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
- if (INTEL_GEN(engine->i915) >= 12)
+ if (INTEL_GEN(engine->i915) >= 12) {
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb;
-
+ engine->emit_flush = gen12_emit_flush;
+ }
engine->set_default_submission = intel_execlists_set_default_submission;
if (INTEL_GEN(engine->i915) < 11) {
@@ -4530,7 +5056,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
* because we only expect rare glitches but nothing
* critical to prevent us from using GPU
*/
- DRM_ERROR("WA batch buffer initialization failed\n");
+ drm_err(&i915->drm, "WA batch buffer initialization failed\n");
if (HAS_LOGICAL_RING_ELSQ(i915)) {
execlists->submit_reg = uncore->regs +
@@ -4558,48 +5084,13 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
}
- reset_csb_pointers(engine);
-
/* Finally, take ownership and responsibility for cleanup! */
+ engine->sanitize = execlists_sanitize;
engine->release = execlists_release;
return 0;
}
-static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine)
-{
- u32 indirect_ctx_offset;
-
- switch (INTEL_GEN(engine->i915)) {
- default:
- MISSING_CASE(INTEL_GEN(engine->i915));
- /* fall through */
- case 12:
- indirect_ctx_offset =
- GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- break;
- case 11:
- indirect_ctx_offset =
- GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- break;
- case 10:
- indirect_ctx_offset =
- GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- break;
- case 9:
- indirect_ctx_offset =
- GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- break;
- case 8:
- indirect_ctx_offset =
- GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- break;
- }
-
- return indirect_ctx_offset;
-}
-
-
static void init_common_reg_state(u32 * const regs,
const struct intel_engine_cs *engine,
const struct intel_ring *ring,
@@ -4617,30 +5108,27 @@ static void init_common_reg_state(u32 * const regs,
regs[CTX_CONTEXT_CONTROL] = ctl;
regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ regs[CTX_TIMESTAMP] = 0;
}
static void init_wa_bb_reg_state(u32 * const regs,
- const struct intel_engine_cs *engine,
- u32 pos_bb_per_ctx)
+ const struct intel_engine_cs *engine)
{
const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
if (wa_ctx->per_ctx.size) {
const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
- regs[pos_bb_per_ctx] =
+ GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
+ regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
(ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
}
if (wa_ctx->indirect_ctx.size) {
- const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
-
- regs[pos_bb_per_ctx + 2] =
- (ggtt_offset + wa_ctx->indirect_ctx.offset) |
- (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
-
- regs[pos_bb_per_ctx + 4] =
- intel_lr_indirect_ctx_offset(engine) << 6;
+ lrc_ring_setup_indirect_ctx(regs, engine,
+ i915_ggtt_offset(wa_ctx->vma) +
+ wa_ctx->indirect_ctx.offset,
+ wa_ctx->indirect_ctx.size);
}
}
@@ -4689,10 +5177,7 @@ static void execlists_init_reg_state(u32 *regs,
init_common_reg_state(regs, engine, ring, inhibit);
init_ppgtt_reg_state(regs, vm_alias(ce->vm));
- init_wa_bb_reg_state(regs, engine,
- INTEL_GEN(engine->i915) >= 12 ?
- GEN12_CTX_BB_PER_CTX_PTR :
- CTX_BB_PER_CTX_PTR);
+ init_wa_bb_reg_state(regs, engine);
__reset_stop_ring(regs, engine);
}
@@ -4705,29 +5190,18 @@ populate_lr_context(struct intel_context *ce,
{
bool inhibit = true;
void *vaddr;
- int ret;
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
- return ret;
+ drm_dbg(&engine->i915->drm, "Could not map object pages!\n");
+ return PTR_ERR(vaddr);
}
set_redzone(vaddr, engine);
if (engine->default_state) {
- void *defaults;
-
- defaults = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
- if (IS_ERR(defaults)) {
- ret = PTR_ERR(defaults);
- goto err_unpin_ctx;
- }
-
- memcpy(vaddr, defaults, engine->context_size);
- i915_gem_object_unpin_map(engine->default_state);
+ shmem_read(engine->default_state, 0,
+ vaddr, engine->context_size);
__set_bit(CONTEXT_VALID_BIT, &ce->flags);
inhibit = false;
}
@@ -4739,14 +5213,12 @@ populate_lr_context(struct intel_context *ce,
* The second page of the context object contains some registers which
* must be set up prior to the first execution.
*/
- execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
+ execlists_init_reg_state(vaddr + LRC_STATE_OFFSET,
ce, engine, ring, inhibit);
- ret = 0;
-err_unpin_ctx:
__i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
i915_gem_object_unpin_map(ctx_obj);
- return ret;
+ return 0;
}
static int __execlists_context_alloc(struct intel_context *ce,
@@ -4764,6 +5236,11 @@ static int __execlists_context_alloc(struct intel_context *ce,
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
context_size += I915_GTT_PAGE_SIZE; /* for redzone */
+ if (INTEL_GEN(engine->i915) == 12) {
+ ce->wa_bb_page = context_size / PAGE_SIZE;
+ context_size += PAGE_SIZE;
+ }
+
ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
@@ -4803,7 +5280,8 @@ static int __execlists_context_alloc(struct intel_context *ce,
ret = populate_lr_context(ce, ctx_obj, engine, ring);
if (ret) {
- DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
+ drm_dbg(&engine->i915->drm,
+ "Failed to populate LRC: %d\n", ret);
goto error_ring_free;
}
@@ -4856,6 +5334,8 @@ static void virtual_context_destroy(struct kref *kref)
__execlists_context_fini(&ve->context);
intel_context_fini(&ve->context);
+ intel_engine_free_request_pool(&ve->base);
+
kfree(ve->bonds);
kfree(ve);
}
@@ -4980,12 +5460,15 @@ static void virtual_submission_tasklet(unsigned long data)
return;
local_irq_disable();
- for (n = 0; READ_ONCE(ve->request) && n < ve->num_siblings; n++) {
- struct intel_engine_cs *sibling = ve->siblings[n];
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
struct ve_node * const node = &ve->nodes[sibling->id];
struct rb_node **parent, *rb;
bool first;
+ if (!READ_ONCE(ve->request))
+ break; /* already handled by a sibling's tasklet */
+
if (unlikely(!(mask & sibling->mask))) {
if (!RB_EMPTY_NODE(&node->rb)) {
spin_lock(&sibling->active.lock);
@@ -5036,10 +5519,8 @@ static void virtual_submission_tasklet(unsigned long data)
submit_engine:
GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
node->prio = prio;
- if (first && prio > sibling->execlists.queue_priority_hint) {
- sibling->execlists.queue_priority_hint = prio;
+ if (first && prio > sibling->execlists.queue_priority_hint)
tasklet_hi_schedule(&sibling->execlists.tasklet);
- }
spin_unlock(&sibling->active.lock);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index dfbc214e14f5..91fd8e452d9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -90,6 +90,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
#define LRC_PPHWSP_SZ (1)
/* After the PPHWSP we have the logical state for the context */
#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
+#define LRC_STATE_OFFSET (LRC_STATE_PN * PAGE_SIZE)
/* Space within PPHWSP reserved to be used as scratch */
#define LRC_PPHWSP_SCRATCH 0x34
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index d39b72590e40..93cb6c460508 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -9,14 +9,13 @@
#include <linux/types.h>
-/* GEN8 to GEN11 Reg State Context */
+/* GEN8 to GEN12 Reg State Context */
#define CTX_CONTEXT_CONTROL (0x02 + 1)
#define CTX_RING_HEAD (0x04 + 1)
#define CTX_RING_TAIL (0x06 + 1)
#define CTX_RING_START (0x08 + 1)
#define CTX_RING_CTL (0x0a + 1)
#define CTX_BB_STATE (0x10 + 1)
-#define CTX_BB_PER_CTX_PTR (0x18 + 1)
#define CTX_TIMESTAMP (0x22 + 1)
#define CTX_PDP3_UDW (0x24 + 1)
#define CTX_PDP3_LDW (0x26 + 1)
@@ -30,9 +29,6 @@
#define GEN9_CTX_RING_MI_MODE 0x54
-/* GEN12+ Reg State Context */
-#define GEN12_CTX_BB_PER_CTX_PTR (0x12 + 1)
-
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
u32 *reg_state__ = (reg_state); \
const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 3847ee44b181..ab675d35030d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -113,7 +113,6 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
struct intel_uncore *uncore = rc6_to_uncore(rc6);
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 rc6_mode;
/* 2b: Program RC6 thresholds.*/
if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
@@ -165,16 +164,11 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
/* 3a: Enable RC6 */
set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
- /* WaRsUseTimeoutMode:cnl (pre-prod) */
- if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0))
- rc6_mode = GEN7_RC_CTL_TO_MODE;
- else
- rc6_mode = GEN6_RC_CTL_EI_MODE(1);
rc6->ctl_enable =
GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_RC6_ENABLE |
- rc6_mode;
+ GEN6_RC_CTL_EI_MODE(1);
/*
* WaRsDisableCoarsePowerGating:skl,cnl
@@ -246,16 +240,18 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
&rc6vids, NULL);
if (IS_GEN(i915, 6) && ret) {
- DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
} else if (IS_GEN(i915, 6) &&
(GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
- DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
- GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ drm_dbg(&i915->drm,
+ "You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
rc6vids |= GEN6_ENCODE_RC6_VID(450);
ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
if (ret)
- DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ drm_err(&i915->drm,
+ "Couldn't fix incorrect rc6 voltage\n");
}
}
@@ -263,14 +259,15 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
static int chv_rc6_init(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
resource_size_t pctx_paddr, paddr;
resource_size_t pctx_size = 32 * SZ_1K;
u32 pcbr;
pcbr = intel_uncore_read(uncore, VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
- paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size;
+ drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
+ paddr = i915->dsm.end + 1 - pctx_size;
GEM_BUG_ON(paddr > U32_MAX);
pctx_paddr = (paddr & ~4095);
@@ -304,7 +301,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
goto out;
}
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+ drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
/*
* From the Gunit register HAS:
@@ -316,7 +313,8 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
*/
pctx = i915_gem_object_create_stolen(i915, pctx_size);
if (IS_ERR(pctx)) {
- DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+ drm_dbg(&i915->drm,
+ "not enough stolen space for PCTX, disabling\n");
return PTR_ERR(pctx);
}
@@ -398,14 +396,14 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
rc_sw_target &= RC_SW_TARGET_STATE_MASK;
rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
- DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+ drm_dbg(&i915->drm, "BIOS enabled RC states: "
"HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
rc_sw_target);
if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
- DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
+ drm_dbg(&i915->drm, "RC6 Base location not set properly.\n");
enable_rc6 = false;
}
@@ -417,7 +415,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
- DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
+ drm_dbg(&i915->drm, "RC6 Base address not as expected.\n");
enable_rc6 = false;
}
@@ -425,24 +423,25 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
(intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 &&
(intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 &&
(intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) {
- DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
+ drm_dbg(&i915->drm,
+ "Engine Idle wait time not set properly.\n");
enable_rc6 = false;
}
if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
!intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
!intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
- DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+ drm_dbg(&i915->drm, "Pushbus not setup properly.\n");
enable_rc6 = false;
}
if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
- DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
+ drm_dbg(&i915->drm, "GFX pause not setup properly.\n");
enable_rc6 = false;
}
if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
- DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
+ drm_dbg(&i915->drm, "GPM control not setup properly.\n");
enable_rc6 = false;
}
@@ -463,7 +462,7 @@ static bool rc6_supported(struct intel_rc6 *rc6)
return false;
if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"RC6 and powersaving disabled by BIOS\n");
return false;
}
@@ -495,7 +494,7 @@ static bool pctx_corrupted(struct intel_rc6 *rc6)
if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO))
return false;
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"RC6 context corruption, disabling runtime power management\n");
return true;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 5954ecc3207f..f59e7875cc5e 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -102,7 +102,7 @@ static int render_state_setup(struct intel_renderstate *so,
}
if (rodata->reloc[reloc_index] != -1) {
- DRM_ERROR("only %d relocs resolved\n", reloc_index);
+ drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index);
goto err;
}
@@ -194,7 +194,7 @@ int intel_renderstate_init(struct intel_renderstate *so,
err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
- goto err_vma;
+ goto err_obj;
err = render_state_setup(so, engine->i915);
if (err)
@@ -204,8 +204,6 @@ int intel_renderstate_init(struct intel_renderstate *so,
err_unpin:
i915_vma_unpin(so->vma);
-err_vma:
- i915_vma_close(so->vma);
err_obj:
i915_gem_object_put(obj);
so->vma = NULL;
@@ -221,6 +219,14 @@ int intel_renderstate_emit(struct intel_renderstate *so,
if (!so->vma)
return 0;
+ i915_vma_lock(so->vma);
+ err = i915_request_await_object(rq, so->vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(so->vma, rq, 0);
+ i915_vma_unlock(so->vma);
+ if (err)
+ return err;
+
err = engine->emit_bb_start(rq,
so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
@@ -235,13 +241,7 @@ int intel_renderstate_emit(struct intel_renderstate *so,
return err;
}
- i915_vma_lock(so->vma);
- err = i915_request_await_object(rq, so->vma->obj, false);
- if (err == 0)
- err = i915_vma_move_to_active(so->vma, rq, 0);
- i915_vma_unlock(so->vma);
-
- return err;
+ return 0;
}
void intel_renderstate_fini(struct intel_renderstate *so)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 80db3c9d785e..39070b514e65 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -109,7 +109,7 @@ static bool mark_guilty(struct i915_request *rq)
goto out;
}
- dev_notice(ctx->i915->drm.dev,
+ drm_notice(&ctx->i915->drm,
"%s context reset due to GPU hang\n",
ctx->name);
@@ -755,7 +755,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
for_each_engine(engine, gt, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
- i915_gem_restore_fences(gt->ggtt);
+ intel_ggtt_restore_fences(gt->ggtt);
return err;
}
@@ -1031,7 +1031,7 @@ void intel_gt_reset(struct intel_gt *gt,
goto unlock;
if (reason)
- dev_notice(gt->i915->drm.dev,
+ drm_notice(&gt->i915->drm,
"Resetting chip for %s\n", reason);
atomic_inc(&gt->i915->gpu_error.reset_count);
@@ -1039,7 +1039,7 @@ void intel_gt_reset(struct intel_gt *gt,
if (!intel_has_gpu_reset(gt)) {
if (i915_modparams.reset)
- dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
+ drm_err(&gt->i915->drm, "GPU reset not supported\n");
else
drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
goto error;
@@ -1049,7 +1049,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_runtime_pm_disable_interrupts(gt->i915);
if (do_reset(gt, stalled_mask)) {
- dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
+ drm_err(&gt->i915->drm, "Failed to reset chip\n");
goto taint;
}
@@ -1111,7 +1111,7 @@ static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
/**
* intel_engine_reset - reset GPU engine to recover from a hang
* @engine: engine to reset
- * @msg: reason for GPU reset; or NULL for no dev_notice()
+ * @msg: reason for GPU reset; or NULL for no drm_notice()
*
* Reset a specific GPU engine. Useful if a hang is detected.
* Returns zero on successful reset or otherwise an error code.
@@ -1136,7 +1136,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
reset_prepare_engine(engine);
if (msg)
- dev_notice(engine->i915->drm.dev,
+ drm_notice(&engine->i915->drm,
"Resetting %s for %s\n", engine->name, msg);
atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
@@ -1381,7 +1381,7 @@ static void intel_wedge_me(struct work_struct *work)
{
struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
- dev_err(w->gt->i915->drm.dev,
+ drm_err(&w->gt->i915->drm,
"%s timed out, cancelling all in-flight rendering.\n",
w->name);
intel_gt_set_wedged(w->gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
index 5bdce24994aa..cc0ebca65167 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -88,6 +88,8 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
+ unsigned int head = READ_ONCE(ring->head);
+
GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
/*
@@ -105,8 +107,7 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
* into the same cacheline as ring->head.
*/
#define cacheline(a) round_down(a, CACHELINE_BYTES)
- GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
- tail < ring->head);
+ GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head);
#undef cacheline
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index fdc3f10e12aa..ca7286e58409 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -42,6 +42,7 @@
#include "intel_reset.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
+#include "shmem_utils.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
@@ -577,8 +578,9 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
RING_INSTPM(engine->mmio_base),
INSTPM_SYNC_FLUSH, 0,
1000))
- DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- engine->name);
+ drm_err(&dev_priv->drm,
+ "%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ engine->name);
}
static void ring_setup_status_page(struct intel_engine_cs *engine)
@@ -601,8 +603,9 @@ static bool stop_ring(struct intel_engine_cs *engine)
MODE_IDLE,
MODE_IDLE,
1000)) {
- DRM_ERROR("%s : timed out trying to stop ring\n",
- engine->name);
+ drm_err(&dev_priv->drm,
+ "%s : timed out trying to stop ring\n",
+ engine->name);
/*
* Sometimes we observe that the idle flag is not
@@ -661,22 +664,23 @@ static int xcs_resume(struct intel_engine_cs *engine)
/* WaClearRingBufHeadRegAtInit:ctg,elk */
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
- DRM_DEBUG_DRIVER("%s head not reset to zero "
+ drm_dbg(&dev_priv->drm, "%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_HEAD),
+ ENGINE_READ(engine, RING_TAIL),
+ ENGINE_READ(engine, RING_START));
+
+ if (!stop_ring(engine)) {
+ drm_err(&dev_priv->drm,
+ "failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
ENGINE_READ(engine, RING_CTL),
ENGINE_READ(engine, RING_HEAD),
ENGINE_READ(engine, RING_TAIL),
ENGINE_READ(engine, RING_START));
-
- if (!stop_ring(engine)) {
- DRM_ERROR("failed to set %s head to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_HEAD),
- ENGINE_READ(engine, RING_TAIL),
- ENGINE_READ(engine, RING_START));
ret = -EIO;
goto out;
}
@@ -719,7 +723,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
RING_CTL(engine->mmio_base),
RING_VALID, RING_VALID,
50)) {
- DRM_ERROR("%s initialization failed "
+ drm_err(&dev_priv->drm, "%s initialization failed "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
engine->name,
ENGINE_READ(engine, RING_CTL),
@@ -1238,7 +1242,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
if (engine->default_state) {
- void *defaults, *vaddr;
+ void *vaddr;
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
@@ -1246,15 +1250,8 @@ alloc_context_vma(struct intel_engine_cs *engine)
goto err_obj;
}
- defaults = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
- if (IS_ERR(defaults)) {
- err = PTR_ERR(defaults);
- goto err_map;
- }
-
- memcpy(vaddr, defaults, engine->context_size);
- i915_gem_object_unpin_map(engine->default_state);
+ shmem_read(engine->default_state, 0,
+ vaddr, engine->context_size);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
@@ -1268,8 +1265,6 @@ alloc_context_vma(struct intel_engine_cs *engine)
return vma;
-err_map:
- i915_gem_object_unpin_map(obj);
err_obj:
i915_gem_object_put(obj);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 19542fd9e207..2f59fc6df3c2 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -8,12 +8,15 @@
#include "i915_drv.h"
#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
#include "intel_rps.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
+#define BUSY_MAX_EI 20u /* ms */
+
/*
* Lock protecting IPS related data structures
*/
@@ -44,6 +47,100 @@ static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
intel_uncore_write_fw(uncore, reg, val);
}
+static void rps_timer(struct timer_list *t)
+{
+ struct intel_rps *rps = from_timer(rps, t, timer);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ s64 max_busy[3] = {};
+ ktime_t dt, last;
+
+ for_each_engine(engine, rps_to_gt(rps), id) {
+ s64 busy;
+ int i;
+
+ dt = intel_engine_get_busy_time(engine);
+ last = engine->stats.rps;
+ engine->stats.rps = dt;
+
+ busy = ktime_to_ns(ktime_sub(dt, last));
+ for (i = 0; i < ARRAY_SIZE(max_busy); i++) {
+ if (busy > max_busy[i])
+ swap(busy, max_busy[i]);
+ }
+ }
+
+ dt = ktime_get();
+ last = rps->pm_timestamp;
+ rps->pm_timestamp = dt;
+
+ if (intel_rps_is_active(rps)) {
+ s64 busy;
+ int i;
+
+ dt = ktime_sub(dt, last);
+
+ /*
+ * Our goal is to evaluate each engine independently, so we run
+ * at the lowest clocks required to sustain the heaviest
+ * workload. However, a task may be split into sequential
+ * dependent operations across a set of engines, such that
+ * the independent contributions do not account for high load,
+ * but overall the task is GPU bound. For example, consider
+ * video decode on vcs followed by colour post-processing
+ * on vecs, followed by general post-processing on rcs.
+ * Since multi-engines being active does imply a single
+ * continuous workload across all engines, we hedge our
+ * bets by only contributing a factor of the distributed
+ * load into our busyness calculation.
+ */
+ busy = max_busy[0];
+ for (i = 1; i < ARRAY_SIZE(max_busy); i++) {
+ if (!max_busy[i])
+ break;
+
+ busy += div_u64(max_busy[i], 1 << i);
+ }
+ GT_TRACE(rps_to_gt(rps),
+ "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
+ busy, (int)div64_u64(100 * busy, dt),
+ max_busy[0], max_busy[1], max_busy[2],
+ rps->pm_interval);
+
+ if (100 * busy > rps->power.up_threshold * dt &&
+ rps->cur_freq < rps->max_freq_softlimit) {
+ rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
+ rps->pm_interval = 1;
+ schedule_work(&rps->work);
+ } else if (100 * busy < rps->power.down_threshold * dt &&
+ rps->cur_freq > rps->min_freq_softlimit) {
+ rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
+ rps->pm_interval = 1;
+ schedule_work(&rps->work);
+ } else {
+ rps->last_adj = 0;
+ }
+
+ mod_timer(&rps->timer,
+ jiffies + msecs_to_jiffies(rps->pm_interval));
+ rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI);
+ }
+}
+
+static void rps_start_timer(struct intel_rps *rps)
+{
+ rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
+ rps->pm_interval = 1;
+ mod_timer(&rps->timer, jiffies + 1);
+}
+
+static void rps_stop_timer(struct intel_rps *rps)
+{
+ del_timer_sync(&rps->timer);
+ rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
+ cancel_work_sync(&rps->work);
+}
+
static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
{
u32 mask = 0;
@@ -57,7 +154,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
if (val < rps->max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask &= READ_ONCE(rps->pm_events);
+ mask &= rps->pm_events;
return rps_pm_sanitize_mask(rps, ~mask);
}
@@ -70,18 +167,11 @@ static void rps_reset_ei(struct intel_rps *rps)
static void rps_enable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- u32 events;
- rps_reset_ei(rps);
+ GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
+ rps->pm_events, rps_pm_mask(rps, rps->last_freq));
- if (IS_VALLEYVIEW(gt->i915))
- /* WaGsvRC0ResidencyMethod:vlv */
- events = GEN6_PM_RP_UP_EI_EXPIRED;
- else
- events = (GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_DOWN_TIMEOUT);
- WRITE_ONCE(rps->pm_events, events);
+ rps_reset_ei(rps);
spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
@@ -120,8 +210,6 @@ static void rps_disable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- WRITE_ONCE(rps->pm_events, 0);
-
intel_uncore_write(gt->uncore,
GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
@@ -140,6 +228,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
cancel_work_sync(&rps->work);
rps_reset_interrupts(rps);
+ GT_TRACE(gt, "interrupts:off\n");
}
static const struct cparams {
@@ -186,14 +275,12 @@ static void gen5_rps_init(struct intel_rps *rps)
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
- fmax, fmin, fstart);
+ drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
rps->min_freq = fmax;
+ rps->efficient_freq = fstart;
rps->max_freq = fmin;
-
- rps->idle_freq = rps->min_freq;
- rps->cur_freq = rps->idle_freq;
}
static unsigned long
@@ -456,7 +543,8 @@ static bool gen5_rps_enable(struct intel_rps *rps)
if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
MEMCTL_CMD_STS) == 0, 10))
- DRM_ERROR("stuck trying to change perf mode\n");
+ drm_err(&uncore->i915->drm,
+ "stuck trying to change perf mode\n");
mdelay(1);
gen5_rps_set(rps, rps->cur_freq);
@@ -533,8 +621,8 @@ static u32 rps_limits(struct intel_rps *rps, u8 val)
static void rps_set_power(struct intel_rps *rps, int new_power)
{
- struct intel_uncore *uncore = rps_to_uncore(rps);
- struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_gt *gt = rps_to_gt(rps);
+ struct intel_uncore *uncore = gt->uncore;
u32 threshold_up = 0, threshold_down = 0; /* in % */
u32 ei_up = 0, ei_down = 0;
@@ -543,55 +631,49 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
if (new_power == rps->power.mode)
return;
+ threshold_up = 95;
+ threshold_down = 85;
+
/* Note the units here are not exactly 1us, but 1280ns. */
switch (new_power) {
case LOW_POWER:
- /* Upclock if more than 95% busy over 16ms */
ei_up = 16000;
- threshold_up = 95;
-
- /* Downclock if less than 85% busy over 32ms */
ei_down = 32000;
- threshold_down = 85;
break;
case BETWEEN:
- /* Upclock if more than 90% busy over 13ms */
ei_up = 13000;
- threshold_up = 90;
-
- /* Downclock if less than 75% busy over 32ms */
ei_down = 32000;
- threshold_down = 75;
break;
case HIGH_POWER:
- /* Upclock if more than 85% busy over 10ms */
ei_up = 10000;
- threshold_up = 85;
-
- /* Downclock if less than 60% busy over 32ms */
ei_down = 32000;
- threshold_down = 60;
break;
}
/* When byt can survive without system hang with dynamic
* sw freq adjustments, this restriction can be lifted.
*/
- if (IS_VALLEYVIEW(i915))
+ if (IS_VALLEYVIEW(gt->i915))
goto skip_hw_write;
- set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up));
+ GT_TRACE(gt,
+ "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
+ new_power, threshold_up, ei_up, threshold_down, ei_down);
+
+ set(uncore, GEN6_RP_UP_EI,
+ intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
set(uncore, GEN6_RP_UP_THRESHOLD,
- GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100));
+ intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10));
- set(uncore, GEN6_RP_DOWN_EI, GT_INTERVAL_FROM_US(i915, ei_down));
+ set(uncore, GEN6_RP_DOWN_EI,
+ intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
set(uncore, GEN6_RP_DOWN_THRESHOLD,
- GT_INTERVAL_FROM_US(i915, ei_down * threshold_down / 100));
+ intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
set(uncore, GEN6_RP_CONTROL,
- (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+ (INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
@@ -646,9 +728,11 @@ static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
{
+ GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive));
+
mutex_lock(&rps->power.mutex);
if (interactive) {
- if (!rps->power.interactive++ && READ_ONCE(rps->active))
+ if (!rps->power.interactive++ && intel_rps_is_active(rps))
rps_set_power(rps, HIGH_POWER);
} else {
GEM_BUG_ON(!rps->power.interactive);
@@ -673,6 +757,9 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val)
GEN6_AGGRESSIVE_TURBO);
set(uncore, GEN6_RPNSWREQ, swreq);
+ GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
+ val, intel_gpu_freq(rps, val), swreq);
+
return 0;
}
@@ -685,6 +772,9 @@ static int vlv_rps_set(struct intel_rps *rps, u8 val)
err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
vlv_punit_put(i915);
+ GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
+ val, intel_gpu_freq(rps, val));
+
return err;
}
@@ -715,29 +805,30 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
void intel_rps_unpark(struct intel_rps *rps)
{
- u8 freq;
-
- if (!rps->enabled)
+ if (!intel_rps_is_enabled(rps))
return;
+ GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
+
/*
* Use the user's desired frequency as a guide, but for better
* performance, jump directly to RPe as our starting frequency.
*/
mutex_lock(&rps->lock);
- WRITE_ONCE(rps->active, true);
-
- freq = max(rps->cur_freq, rps->efficient_freq),
- freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
- intel_rps_set(rps, freq);
-
- rps->last_adj = 0;
+ intel_rps_set_active(rps);
+ intel_rps_set(rps,
+ clamp(rps->cur_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit));
mutex_unlock(&rps->lock);
- if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+ rps->pm_iir = 0;
+ if (intel_rps_has_interrupts(rps))
rps_enable_interrupts(rps);
+ if (intel_rps_uses_timer(rps))
+ rps_start_timer(rps);
if (IS_GEN(rps_to_i915(rps), 5))
gen5_rps_update(rps);
@@ -745,15 +836,16 @@ void intel_rps_unpark(struct intel_rps *rps)
void intel_rps_park(struct intel_rps *rps)
{
- struct drm_i915_private *i915 = rps_to_i915(rps);
+ int adj;
- if (!rps->enabled)
+ if (!intel_rps_clear_active(rps))
return;
- if (INTEL_GEN(i915) >= 6)
+ if (intel_rps_uses_timer(rps))
+ rps_stop_timer(rps);
+ if (intel_rps_has_interrupts(rps))
rps_disable_interrupts(rps);
- WRITE_ONCE(rps->active, false);
if (rps->last_freq <= rps->idle_freq)
return;
@@ -784,8 +876,15 @@ void intel_rps_park(struct intel_rps *rps)
* (Note we accommodate Cherryview's limitation of only using an
* even bin by applying it to all.)
*/
- rps->cur_freq =
- max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
+ adj = rps->last_adj;
+ if (adj < 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = -2;
+ rps->last_adj = adj;
+ rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
+
+ GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
}
void intel_rps_boost(struct i915_request *rq)
@@ -793,7 +892,7 @@ void intel_rps_boost(struct i915_request *rq)
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
unsigned long flags;
- if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
+ if (i915_request_signaled(rq) || !intel_rps_is_active(rps))
return;
/* Serializes with i915_request_retire() */
@@ -802,6 +901,9 @@ void intel_rps_boost(struct i915_request *rq)
!dma_fence_is_signaled_locked(&rq->fence)) {
set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
+ GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
+ rq->fence.context, rq->fence.seqno);
+
if (!atomic_fetch_inc(&rps->num_waiters) &&
READ_ONCE(rps->cur_freq) < rps->boost_freq)
schedule_work(&rps->work);
@@ -819,7 +921,7 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
GEM_BUG_ON(val > rps->max_freq);
GEM_BUG_ON(val < rps->min_freq);
- if (rps->active) {
+ if (intel_rps_is_active(rps)) {
err = rps_set(rps, val, true);
if (err)
return err;
@@ -828,7 +930,7 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
- if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
+ if (intel_rps_has_interrupts(rps)) {
struct intel_uncore *uncore = rps_to_uncore(rps);
set(uncore,
@@ -896,12 +998,14 @@ static void gen6_rps_init(struct intel_rps *rps)
static bool rps_reset(struct intel_rps *rps)
{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
/* force a reset */
rps->power.mode = -1;
rps->last_freq = -1;
if (rps_set(rps, rps->min_freq, true)) {
- DRM_ERROR("Failed to reset RPS to initial values\n");
+ drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
return false;
}
@@ -912,20 +1016,18 @@ static bool rps_reset(struct intel_rps *rps)
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
static bool gen9_rps_enable(struct intel_rps *rps)
{
- struct drm_i915_private *i915 = rps_to_i915(rps);
- struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct intel_gt *gt = rps_to_gt(rps);
+ struct intel_uncore *uncore = gt->uncore;
/* Program defaults and thresholds for RPS */
- if (IS_GEN(i915, 9))
+ if (IS_GEN(gt->i915, 9))
intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
GEN9_FREQUENCY(rps->rp1_freq));
- /* 1 second timeout */
- intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
- GT_INTERVAL_FROM_US(i915, 1000000));
-
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
+ rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
+
return rps_reset(rps);
}
@@ -936,12 +1038,10 @@ static bool gen8_rps_enable(struct intel_rps *rps)
intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
HSW_FREQUENCY(rps->rp1_freq));
- /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
- intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
- 100000000 / 128); /* 1 second timeout */
-
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+ rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
+
return rps_reset(rps);
}
@@ -953,6 +1053,10 @@ static bool gen6_rps_enable(struct intel_rps *rps)
intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+ rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
return rps_reset(rps);
}
@@ -1038,6 +1142,10 @@ static bool chv_rps_enable(struct intel_rps *rps)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
+ rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
/* Setting Fixed Bias */
vlv_punit_get(i915);
@@ -1052,8 +1160,8 @@ static bool chv_rps_enable(struct intel_rps *rps)
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
"GPLL not enabled\n");
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+ drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
return rps_reset(rps);
}
@@ -1136,6 +1244,9 @@ static bool vlv_rps_enable(struct intel_rps *rps)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_CONT);
+ /* WaGsvRC0ResidencyMethod:vlv */
+ rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+
vlv_punit_get(i915);
/* Setting Fixed Bias */
@@ -1150,8 +1261,8 @@ static bool vlv_rps_enable(struct intel_rps *rps)
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
"GPLL not enabled\n");
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+ drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
return rps_reset(rps);
}
@@ -1194,33 +1305,71 @@ static unsigned long __ips_gfx_val(struct intel_ips *ips)
return ips->gfx_power + state2;
}
+static bool has_busy_stats(struct intel_rps *rps)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, rps_to_gt(rps), id) {
+ if (!intel_engine_supports_stats(engine))
+ return false;
+ }
+
+ return true;
+}
+
void intel_rps_enable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
+ bool enabled = false;
+
+ if (!HAS_RPS(i915))
+ return;
+
+ intel_gt_check_clock_frequency(rps_to_gt(rps));
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- if (IS_CHERRYVIEW(i915))
- rps->enabled = chv_rps_enable(rps);
+ if (rps->max_freq <= rps->min_freq)
+ /* leave disabled, no room for dynamic reclocking */;
+ else if (IS_CHERRYVIEW(i915))
+ enabled = chv_rps_enable(rps);
else if (IS_VALLEYVIEW(i915))
- rps->enabled = vlv_rps_enable(rps);
+ enabled = vlv_rps_enable(rps);
else if (INTEL_GEN(i915) >= 9)
- rps->enabled = gen9_rps_enable(rps);
+ enabled = gen9_rps_enable(rps);
else if (INTEL_GEN(i915) >= 8)
- rps->enabled = gen8_rps_enable(rps);
+ enabled = gen8_rps_enable(rps);
else if (INTEL_GEN(i915) >= 6)
- rps->enabled = gen6_rps_enable(rps);
+ enabled = gen6_rps_enable(rps);
else if (IS_IRONLAKE_M(i915))
- rps->enabled = gen5_rps_enable(rps);
+ enabled = gen5_rps_enable(rps);
+ else
+ MISSING_CASE(INTEL_GEN(i915));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
- if (!rps->enabled)
+ if (!enabled)
return;
- drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq);
- drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq);
+ GT_TRACE(rps_to_gt(rps),
+ "min:%x, max:%x, freq:[%d, %d]\n",
+ rps->min_freq, rps->max_freq,
+ intel_gpu_freq(rps, rps->min_freq),
+ intel_gpu_freq(rps, rps->max_freq));
- drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq);
- drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq);
+ GEM_BUG_ON(rps->max_freq < rps->min_freq);
+ GEM_BUG_ON(rps->idle_freq > rps->max_freq);
+
+ GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
+ GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
+
+ if (has_busy_stats(rps))
+ intel_rps_set_timer(rps);
+ else if (INTEL_GEN(i915) >= 6)
+ intel_rps_set_interrupts(rps);
+ else
+ /* Ironlake currently uses intel_ips.ko */ {}
+
+ intel_rps_set_enabled(rps);
}
static void gen6_rps_disable(struct intel_rps *rps)
@@ -1232,7 +1381,9 @@ void intel_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- rps->enabled = false;
+ intel_rps_clear_enabled(rps);
+ intel_rps_clear_interrupts(rps);
+ intel_rps_clear_timer(rps);
if (INTEL_GEN(i915) >= 6)
gen6_rps_disable(rps);
@@ -1308,7 +1459,8 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
CCK_GPLL_CLOCK_CONTROL,
i915->czclk_freq);
- DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq);
+ drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
+ rps->gpll_ref_freq);
}
static void vlv_rps_init(struct intel_rps *rps)
@@ -1336,28 +1488,24 @@ static void vlv_rps_init(struct intel_rps *rps)
i915->mem_freq = 1333;
break;
}
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+ drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
rps->max_freq = vlv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->max_freq),
- rps->max_freq);
+ drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
rps->efficient_freq = vlv_rps_rpe_freq(rps);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->efficient_freq),
- rps->efficient_freq);
+ drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
rps->rp1_freq = vlv_rps_guar_freq(rps);
- DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->rp1_freq),
- rps->rp1_freq);
+ drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
rps->min_freq = vlv_rps_min_freq(rps);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->min_freq),
- rps->min_freq);
+ drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
vlv_iosf_sb_put(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1387,28 +1535,24 @@ static void chv_rps_init(struct intel_rps *rps)
i915->mem_freq = 1600;
break;
}
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+ drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
rps->max_freq = chv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->max_freq),
- rps->max_freq);
+ drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
rps->efficient_freq = chv_rps_rpe_freq(rps);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->efficient_freq),
- rps->efficient_freq);
+ drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
rps->rp1_freq = chv_rps_guar_freq(rps);
- DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->rp1_freq),
- rps->rp1_freq);
+ drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
rps->min_freq = chv_rps_min_freq(rps);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->min_freq),
- rps->min_freq);
+ drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
vlv_iosf_sb_put(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1471,12 +1615,13 @@ static void rps_work(struct work_struct *work)
{
struct intel_rps *rps = container_of(work, typeof(*rps), work);
struct intel_gt *gt = rps_to_gt(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
bool client_boost = false;
int new_freq, adj, min, max;
u32 pm_iir = 0;
spin_lock_irq(&gt->irq_lock);
- pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
+ pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
client_boost = atomic_read(&rps->num_waiters);
spin_unlock_irq(&gt->irq_lock);
@@ -1485,6 +1630,10 @@ static void rps_work(struct work_struct *work)
goto out;
mutex_lock(&rps->lock);
+ if (!intel_rps_is_active(rps)) {
+ mutex_unlock(&rps->lock);
+ return;
+ }
pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
@@ -1494,6 +1643,12 @@ static void rps_work(struct work_struct *work)
max = rps->max_freq_softlimit;
if (client_boost)
max = rps->max_freq;
+
+ GT_TRACE(gt,
+ "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
+ pm_iir, yesno(client_boost),
+ adj, new_freq, min, max);
+
if (client_boost && new_freq < rps->boost_freq) {
new_freq = rps->boost_freq;
adj = 0;
@@ -1525,30 +1680,18 @@ static void rps_work(struct work_struct *work)
adj = 0;
}
- rps->last_adj = adj;
-
/*
- * Limit deboosting and boosting to keep ourselves at the extremes
- * when in the respective power modes (i.e. slowly decrease frequencies
- * while in the HIGH_POWER zone and slowly increase frequencies while
- * in the LOW_POWER zone). On idle, we will hit the timeout and drop
- * to the next level quickly, and conversely if busy we expect to
- * hit a waitboost and rapidly switch into max power.
- */
- if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
- (adj > 0 && rps->power.mode == LOW_POWER))
- rps->last_adj = 0;
-
- /* sysfs frequency interfaces may have snuck in while servicing the
- * interrupt
+ * sysfs frequency limits may have snuck in while
+ * servicing the interrupt
*/
new_freq += adj;
new_freq = clamp_t(int, new_freq, min, max);
if (intel_rps_set(rps, new_freq)) {
- DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
- rps->last_adj = 0;
+ drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
+ adj = 0;
}
+ rps->last_adj = adj;
mutex_unlock(&rps->lock);
@@ -1568,6 +1711,8 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
if (unlikely(!events))
return;
+ GT_TRACE(gt, "irq events:%x\n", events);
+
gen6_gt_pm_mask_irq(gt, events);
rps->pm_iir |= events;
@@ -1579,10 +1724,12 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
struct intel_gt *gt = rps_to_gt(rps);
u32 events;
- events = pm_iir & READ_ONCE(rps->pm_events);
+ events = pm_iir & rps->pm_events;
if (events) {
spin_lock(&gt->irq_lock);
+ GT_TRACE(gt, "irq events:%x\n", events);
+
gen6_gt_pm_mask_irq(gt, events);
rps->pm_iir |= events;
@@ -1640,6 +1787,7 @@ void intel_rps_init_early(struct intel_rps *rps)
mutex_init(&rps->power.mutex);
INIT_WORK(&rps->work, rps_work);
+ timer_setup(&rps->timer, rps_timer, 0);
atomic_set(&rps->num_waiters, 0);
}
@@ -1668,9 +1816,10 @@ void intel_rps_init(struct intel_rps *rps)
sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
&params, NULL);
if (params & BIT(31)) { /* OC supported */
- DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
- (rps->max_freq & 0xff) * 50,
- (params & 0xff) * 50);
+ drm_dbg(&i915->drm,
+ "Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+ (rps->max_freq & 0xff) * 50,
+ (params & 0xff) * 50);
rps->max_freq = params & 0xff;
}
}
@@ -1678,7 +1827,9 @@ void intel_rps_init(struct intel_rps *rps)
/* Finally allow us to boost to max by default */
rps->boost_freq = rps->max_freq;
rps->idle_freq = rps->min_freq;
- rps->cur_freq = rps->idle_freq;
+
+ /* Start in the middle, from here we will autotune based on workload */
+ rps->cur_freq = rps->efficient_freq;
rps->pm_intrmsk_mbz = 0;
@@ -1695,6 +1846,12 @@ void intel_rps_init(struct intel_rps *rps)
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
+void intel_rps_sanitize(struct intel_rps *rps)
+{
+ if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+ rps_disable_interrupts(rps);
+}
+
u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
@@ -1722,7 +1879,7 @@ static u32 read_cagf(struct intel_rps *rps)
freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
vlv_punit_put(i915);
} else {
- freq = intel_uncore_read(rps_to_gt(rps)->uncore, GEN6_RPSTAT1);
+ freq = intel_uncore_read(rps_to_uncore(rps), GEN6_RPSTAT1);
}
return intel_rps_get_cagf(rps, freq);
@@ -1730,7 +1887,7 @@ static u32 read_cagf(struct intel_rps *rps)
u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
{
- struct intel_runtime_pm *rpm = rps_to_gt(rps)->uncore->rpm;
+ struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
intel_wakeref_t wakeref;
u32 freq = 0;
@@ -1930,3 +2087,7 @@ bool i915_gpu_turbo_disable(void)
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_rps.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index dfa98194f3b2..8d3c9d663662 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -13,6 +13,7 @@ struct i915_request;
void intel_rps_init_early(struct intel_rps *rps);
void intel_rps_init(struct intel_rps *rps);
+void intel_rps_sanitize(struct intel_rps *rps);
void intel_rps_driver_register(struct intel_rps *rps);
void intel_rps_driver_unregister(struct intel_rps *rps);
@@ -36,4 +37,64 @@ void gen5_rps_irq_handler(struct intel_rps *rps);
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+static inline bool intel_rps_is_enabled(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline void intel_rps_set_enabled(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline void intel_rps_clear_enabled(struct intel_rps *rps)
+{
+ clear_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline bool intel_rps_is_active(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline void intel_rps_set_active(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline bool intel_rps_clear_active(struct intel_rps *rps)
+{
+ return test_and_clear_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline bool intel_rps_has_interrupts(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
+}
+
+static inline void intel_rps_set_interrupts(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
+}
+
+static inline void intel_rps_clear_interrupts(struct intel_rps *rps)
+{
+ clear_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
+}
+
+static inline bool intel_rps_uses_timer(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_TIMER, &rps->flags);
+}
+
+static inline void intel_rps_set_timer(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_TIMER, &rps->flags);
+}
+
+static inline void intel_rps_clear_timer(struct intel_rps *rps)
+{
+ clear_bit(INTEL_RPS_TIMER, &rps->flags);
+}
+
#endif /* INTEL_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index c2e279154bd5..38083f0402d9 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -31,6 +31,13 @@ struct intel_rps_ei {
u32 media_c0;
};
+enum {
+ INTEL_RPS_ENABLED = 0,
+ INTEL_RPS_ACTIVE,
+ INTEL_RPS_INTERRUPTS,
+ INTEL_RPS_TIMER,
+};
+
struct intel_rps {
struct mutex lock; /* protects enabling and the worker */
@@ -38,9 +45,12 @@ struct intel_rps {
* work, interrupts_enabled and pm_iir are protected by
* dev_priv->irq_lock
*/
+ struct timer_list timer;
struct work_struct work;
- bool enabled;
- bool active;
+ unsigned long flags;
+
+ ktime_t pm_timestamp;
+ u32 pm_interval;
u32 pm_iir;
/* PM interrupt bits that should never be masked */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 74f793423231..d173271c7397 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -65,7 +65,6 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
bool subslice_pg = sseu->has_subslice_pg;
- struct intel_sseu ctx_sseu;
u8 slices, subslices;
u32 rpcs = 0;
@@ -78,31 +77,13 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
/*
* If i915/perf is active, we want a stable powergating configuration
- * on the system.
- *
- * We could choose full enablement, but on ICL we know there are use
- * cases which disable slices for functional, apart for performance
- * reasons. So in this case we select a known stable subset.
+ * on the system. Use the configuration pinned by i915/perf.
*/
- if (!i915->perf.exclusive_stream) {
- ctx_sseu = *req_sseu;
- } else {
- ctx_sseu = intel_sseu_from_device_info(sseu);
-
- if (IS_GEN(i915, 11)) {
- /*
- * We only need subslice count so it doesn't matter
- * which ones we select - just turn off low bits in the
- * amount of half of all available subslices per slice.
- */
- ctx_sseu.subslice_mask =
- ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
- ctx_sseu.slice_mask = 0x1;
- }
- }
+ if (i915->perf.exclusive_stream)
+ req_sseu = &i915->perf.sseu;
- slices = hweight8(ctx_sseu.slice_mask);
- subslices = hweight8(ctx_sseu.subslice_mask);
+ slices = hweight8(req_sseu->slice_mask);
+ subslices = hweight8(req_sseu->subslice_mask);
/*
* Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
@@ -175,13 +156,13 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
if (sseu->has_eu_pg) {
u32 val;
- val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
+ val = req_sseu->min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
val &= GEN8_RPCS_EU_MIN_MASK;
rpcs |= val;
- val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
+ val = req_sseu->max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
val &= GEN8_RPCS_EU_MAX_MASK;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 08b56d7ab4f4..4546284fede1 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -119,6 +119,15 @@ static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
spin_unlock_irqrestore(&gt->hwsp_lock, flags);
}
+static void __rcu_cacheline_free(struct rcu_head *rcu)
+{
+ struct intel_timeline_cacheline *cl =
+ container_of(rcu, typeof(*cl), rcu);
+
+ i915_active_fini(&cl->active);
+ kfree(cl);
+}
+
static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
@@ -127,8 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
i915_vma_put(cl->hwsp->vma);
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
- i915_active_fini(&cl->active);
- kfree_rcu(cl, rcu);
+ call_rcu(&cl->rcu, __rcu_cacheline_free);
}
__i915_active_call
@@ -203,9 +211,9 @@ static void cacheline_free(struct intel_timeline_cacheline *cl)
i915_active_release(&cl->active);
}
-int intel_timeline_init(struct intel_timeline *timeline,
- struct intel_gt *gt,
- struct i915_vma *hwsp)
+static int intel_timeline_init(struct intel_timeline *timeline,
+ struct intel_gt *gt,
+ struct i915_vma *hwsp)
{
void *vaddr;
@@ -272,7 +280,7 @@ void intel_gt_init_timelines(struct intel_gt *gt)
INIT_LIST_HEAD(&timelines->hwsp_free_list);
}
-void intel_timeline_fini(struct intel_timeline *timeline)
+static void intel_timeline_fini(struct intel_timeline *timeline)
{
GEM_BUG_ON(atomic_read(&timeline->pin_count));
GEM_BUG_ON(!list_empty(&timeline->requests));
@@ -329,6 +337,13 @@ int intel_timeline_pin(struct intel_timeline *tl)
return 0;
}
+void intel_timeline_reset_seqno(const struct intel_timeline *tl)
+{
+ /* Must be pinned to be writable, and no requests in flight. */
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+}
+
void intel_timeline_enter(struct intel_timeline *tl)
{
struct intel_gt_timelines *timelines = &tl->gt->timelines;
@@ -357,8 +372,16 @@ void intel_timeline_enter(struct intel_timeline *tl)
return;
spin_lock(&timelines->lock);
- if (!atomic_fetch_inc(&tl->active_count))
+ if (!atomic_fetch_inc(&tl->active_count)) {
+ /*
+ * The HWSP is volatile, and may have been lost while inactive,
+ * e.g. across suspend/resume. Be paranoid, and ensure that
+ * the HWSP value matches our seqno so we don't proclaim
+ * the next request as already complete.
+ */
+ intel_timeline_reset_seqno(tl);
list_add_tail(&tl->link, &timelines->active_list);
+ }
spin_unlock(&timelines->lock);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index f5b7eade3809..4298b9ac7327 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -31,11 +31,6 @@
#include "i915_syncmap.h"
#include "gt/intel_timeline_types.h"
-int intel_timeline_init(struct intel_timeline *tl,
- struct intel_gt *gt,
- struct i915_vma *hwsp);
-void intel_timeline_fini(struct intel_timeline *tl);
-
struct intel_timeline *
intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp);
@@ -84,6 +79,8 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
void intel_timeline_exit(struct intel_timeline *tl);
void intel_timeline_unpin(struct intel_timeline *tl);
+void intel_timeline_reset_seqno(const struct intel_timeline *tl);
+
int intel_timeline_read_hwsp(struct i915_request *from,
struct i915_request *until,
u32 *hwsp_offset);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 5176ad1a3976..90a2b9e399b0 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -485,25 +485,14 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- struct drm_i915_private *i915 = engine->i915;
-
/* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
- /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
- if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
-
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
- /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
- if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
-
/* WaPushConstantDereferenceHoldDisable:cnl */
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
@@ -837,7 +826,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
GEN10_L3BANK_MASK;
- DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse);
+ drm_dbg(&i915->drm, "L3 fuse = %x\n", l3_fuse);
l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
} else {
l3_en = ~0;
@@ -846,7 +835,8 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
slice = fls(sseu->slice_mask) - 1;
subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
if (!subslice) {
- DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
+ drm_warn(&i915->drm,
+ "No common index found between subslice mask %x and L3 bank mask %x!\n",
intel_sseu_get_subslices(sseu, slice), l3_en);
subslice = fls(l3_en);
drm_WARN_ON(&i915->drm, !subslice);
@@ -861,7 +851,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
}
- DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr);
+ drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
}
@@ -871,12 +861,6 @@ cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
wa_init_mcr(i915, wal);
- /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
- if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
- wa_write_or(wal,
- GAMT_CHKN_BIT_REG,
- GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
-
/* WaInPlaceDecompressionHang:cnl */
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
@@ -933,15 +917,20 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
- /* Wa_1607087056:icl */
- wa_write_or(wal,
- SLICE_UNIT_LEVEL_CLKGATE,
- L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+ /* Wa_1607087056:icl,ehl,jsl */
+ if (IS_ICELAKE(i915) ||
+ IS_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) {
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+ }
}
static void
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
+ wa_init_mcr(i915, wal);
+
/* Wa_1409420604:tgl */
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
wa_write_or(wal,
@@ -1379,12 +1368,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
- /*
- * Wa_1409085225:tgl
- * Wa_14010229206:tgl
- */
- wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
-
/* Wa_1408615072:tgl */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
VSUNIT_CLKGATE_DIS_TGL);
@@ -1402,6 +1385,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
FF_DOP_CLOCK_GATE_DISABLE);
+
+ /*
+ * Wa_1409085225:tgl
+ * Wa_14010229206:tgl
+ */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
}
if (IS_GEN(i915, 11)) {
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 4a53ded7c2dd..b8dd3cbc8696 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -28,7 +28,6 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
-#include "intel_engine_pool.h"
#include "mock_engine.h"
#include "selftests/mock_request.h"
@@ -328,7 +327,6 @@ int mock_engine_init(struct intel_engine_cs *engine)
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
- intel_engine_pool_init(&engine->pool);
ce = create_kernel_context(engine);
if (IS_ERR(ce))
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index e874dfaa5316..52af1cee9a94 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -24,6 +24,7 @@ static int request_sync(struct i915_request *rq)
/* Opencode i915_request_add() so we can keep the timeline locked. */
__i915_request_commit(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
__i915_request_queue(rq, NULL);
timeout = i915_request_wait(rq, 0, HZ / 10);
@@ -154,10 +155,7 @@ static int live_context_size(void *arg)
*/
for_each_engine(engine, gt, id) {
- struct {
- struct drm_i915_gem_object *state;
- void *pinned;
- } saved;
+ struct file *saved;
if (!engine->context_size)
continue;
@@ -171,8 +169,7 @@ static int live_context_size(void *arg)
* active state is sufficient, we are only checking that we
* don't use more than we planned.
*/
- saved.state = fetch_and_zero(&engine->default_state);
- saved.pinned = fetch_and_zero(&engine->pinned_default_state);
+ saved = fetch_and_zero(&engine->default_state);
/* Overlaps with the execlists redzone */
engine->context_size += I915_GTT_PAGE_SIZE;
@@ -181,8 +178,7 @@ static int live_context_size(void *arg)
engine->context_size -= I915_GTT_PAGE_SIZE;
- engine->pinned_default_state = saved.pinned;
- engine->default_state = saved.state;
+ engine->default_state = saved;
intel_engine_pm_put(engine);
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 09ff8e4f88af..242181a5214c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -7,6 +7,7 @@
#include "selftest_llc.h"
#include "selftest_rc6.h"
+#include "selftest_rps.h"
static int live_gt_resume(void *arg)
{
@@ -52,6 +53,13 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_rc6_manual),
+ SUBTEST(live_rps_clock_interval),
+ SUBTEST(live_rps_control),
+ SUBTEST(live_rps_frequency_cs),
+ SUBTEST(live_rps_frequency_srm),
+ SUBTEST(live_rps_power),
+ SUBTEST(live_rps_interrupt),
+ SUBTEST(live_rps_dynamic),
SUBTEST(live_gt_resume),
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index f95ae15ce865..824f99c4cc7c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -21,7 +21,8 @@
#include "gem/selftests/mock_context.h"
#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
-#define NUM_GPR_DW (16 * 2) /* each GPR is 2 dwords */
+#define NUM_GPR 16
+#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
@@ -68,26 +69,41 @@ static void engine_heartbeat_enable(struct intel_engine_cs *engine,
engine->props.heartbeat_interval_ms = saved;
}
+static bool is_active(struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return true;
+
+ if (i915_request_on_hold(rq))
+ return true;
+
+ if (i915_request_started(rq))
+ return true;
+
+ return false;
+}
+
static int wait_for_submit(struct intel_engine_cs *engine,
struct i915_request *rq,
unsigned long timeout)
{
timeout += jiffies;
do {
- cond_resched();
- intel_engine_flush_submission(engine);
+ bool done = time_after(jiffies, timeout);
- if (READ_ONCE(engine->execlists.pending[0]))
- continue;
-
- if (i915_request_is_active(rq))
+ if (i915_request_completed(rq)) /* that was quick! */
return 0;
- if (i915_request_started(rq)) /* that was quick! */
+ /* Wait until the HW has acknowleged the submission (or err) */
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
return 0;
- } while (time_before(jiffies, timeout));
- return -ETIME;
+ if (done)
+ return -ETIME;
+
+ cond_resched();
+ } while (1);
}
static int wait_for_reset(struct intel_engine_cs *engine,
@@ -634,9 +650,9 @@ static int live_error_interrupt(void *arg)
error_repr(p->error[i]));
if (!i915_request_started(client[i])) {
- pr_debug("%s: %s request not stated!\n",
- engine->name,
- error_repr(p->error[i]));
+ pr_err("%s: %s request not started!\n",
+ engine->name,
+ error_repr(p->error[i]));
err = -ETIME;
goto out;
}
@@ -644,9 +660,10 @@ static int live_error_interrupt(void *arg)
/* Kick the tasklet to process the error */
intel_engine_flush_submission(engine);
if (client[i]->fence.error != p->error[i]) {
- pr_err("%s: %s request completed with wrong error code: %d\n",
+ pr_err("%s: %s request (%s) with wrong error code: %d\n",
engine->name,
error_repr(p->error[i]),
+ i915_request_completed(client[i]) ? "completed" : "running",
client[i]->fence.error);
err = -EINVAL;
goto out;
@@ -1057,7 +1074,6 @@ static int live_timeslice_rewind(void *arg)
engine->name);
goto err;
}
- GEM_BUG_ON(!timer_pending(&engine->execlists.timer));
/* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
@@ -1230,8 +1246,14 @@ static int live_timeslice_queue(void *arg)
if (err)
goto err_rq;
- intel_engine_flush_submission(engine);
+ /* Wait until we ack the release_queue and start timeslicing */
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+ } while (READ_ONCE(engine->execlists.pending[0]));
+
if (!READ_ONCE(engine->execlists.timer.expires) &&
+ execlists_active(&engine->execlists) == rq &&
!i915_request_completed(rq)) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
@@ -2032,6 +2054,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
return 0;
+ if (!intel_has_reset_engine(arg->engine->gt))
+ return 0;
+
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
@@ -2632,7 +2657,7 @@ static int create_gang(struct intel_engine_cs *engine,
if (IS_ERR(rq))
goto err_obj;
- rq->batch = vma;
+ rq->batch = i915_vma_get(vma);
i915_request_get(rq);
i915_vma_lock(vma);
@@ -2656,6 +2681,7 @@ static int create_gang(struct intel_engine_cs *engine,
return 0;
err_rq:
+ i915_vma_put(rq->batch);
i915_request_put(rq);
err_obj:
i915_gem_object_put(obj);
@@ -2752,6 +2778,7 @@ static int live_preempt_gang(void *arg)
err = -ETIME;
}
+ i915_vma_put(rq->batch);
i915_request_put(rq);
rq = n;
}
@@ -2765,6 +2792,331 @@ static int live_preempt_gang(void *arg)
return 0;
}
+static struct i915_vma *
+create_gpr_user(struct intel_engine_cs *engine,
+ struct i915_vma *result,
+ unsigned int offset)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+ int i;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, result->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(vma);
+ return ERR_CAST(cs);
+ }
+
+ /* All GPR are clear for new contexts. We use GPR(0) as a constant */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, 0);
+ *cs++ = 1;
+
+ for (i = 1; i < NUM_GPR; i++) {
+ u64 addr;
+
+ /*
+ * Perform: GPR[i]++
+ *
+ * As we read and write into the context saved GPR[i], if
+ * we restart this batch buffer from an earlier point, we
+ * will repeat the increment and store a value > 1.
+ */
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
+
+ addr = result->node.start + offset + i * sizeof(*cs);
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = CS_GPR(engine, 2 * i);
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = i;
+ *cs++ = lower_32_bits(result->node.start);
+ *cs++ = upper_32_bits(result->node.start);
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ return vma;
+}
+
+static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, sz);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_ggtt_pin(vma, 0, 0);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static struct i915_request *
+create_gpr_client(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ unsigned int offset)
+{
+ struct i915_vma *batch, *vma;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ vma = i915_vma_instance(global->obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_ce;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_ce;
+
+ batch = create_gpr_user(engine, vma, offset);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_vma;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+
+ i915_vma_lock(batch);
+ if (!err)
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(batch);
+ i915_vma_unpin(batch);
+
+ if (!err)
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+out_batch:
+ i915_vma_put(batch);
+out_vma:
+ i915_vma_unpin(vma);
+out_ce:
+ intel_context_put(ce);
+ return err ? ERR_PTR(err) : rq;
+}
+
+static int preempt_user(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ int id)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_PRIORITY_MAX
+ };
+ struct i915_request *rq;
+ int err = 0;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(global);
+ *cs++ = 0;
+ *cs++ = id;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ engine->schedule(rq, &attr);
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int live_preempt_user(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_vma *global;
+ enum intel_engine_id id;
+ u32 *result;
+ int err = 0;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ /*
+ * In our other tests, we look at preemption in carefully
+ * controlled conditions in the ringbuffer. Since most of the
+ * time is spent in user batches, most of our preemptions naturally
+ * occur there. We want to verify that when we preempt inside a batch
+ * we continue on from the current instruction and do not roll back
+ * to the start, or another earlier arbitration point.
+ *
+ * To verify this, we create a batch which is a mixture of
+ * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
+ * a few preempting contexts thrown into the mix, we look for any
+ * repeated instructions (which show up as incorrect values).
+ */
+
+ global = create_global(gt, 4096);
+ if (IS_ERR(global))
+ return PTR_ERR(global);
+
+ result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ i915_vma_unpin_and_release(&global, 0);
+ return PTR_ERR(result);
+ }
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *client[3] = {};
+ struct igt_live_test t;
+ int i;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
+ continue; /* we need per-context GPR */
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ memset(result, 0, 4096);
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_request *rq;
+
+ rq = create_gpr_client(engine, global,
+ NUM_GPR * i * sizeof(u32));
+ if (IS_ERR(rq))
+ goto end_test;
+
+ client[i] = rq;
+ }
+
+ /* Continuously preempt the set of 3 running contexts */
+ for (i = 1; i <= NUM_GPR; i++) {
+ err = preempt_user(engine, global, i);
+ if (err)
+ goto end_test;
+ }
+
+ if (READ_ONCE(result[0]) != NUM_GPR) {
+ pr_err("%s: Failed to release semaphore\n",
+ engine->name);
+ err = -EIO;
+ goto end_test;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ int gpr;
+
+ if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
+ err = -ETIME;
+ goto end_test;
+ }
+
+ for (gpr = 1; gpr < NUM_GPR; gpr++) {
+ if (result[NUM_GPR * i + gpr] != 1) {
+ pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
+ engine->name,
+ i, gpr, result[NUM_GPR * i + gpr]);
+ err = -EINVAL;
+ goto end_test;
+ }
+ }
+ }
+
+end_test:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i])
+ break;
+
+ i915_request_put(client[i]);
+ }
+
+ /* Flush the semaphores on error */
+ smp_store_mb(result[0], -1);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
static int live_preempt_timeout(void *arg)
{
struct intel_gt *gt = arg;
@@ -3972,6 +4324,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_gang),
SUBTEST(live_preempt_timeout),
+ SUBTEST(live_preempt_user),
SUBTEST(live_preempt_smoke),
SUBTEST(live_virtual_engine),
SUBTEST(live_virtual_mask),
@@ -3989,35 +4342,6 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
return intel_gt_live_subtests(tests, &i915->gt);
}
-static void hexdump(const void *buf, size_t len)
-{
- const size_t rowsize = 8 * sizeof(u32);
- const void *prev = NULL;
- bool skip = false;
- size_t pos;
-
- for (pos = 0; pos < len; pos += rowsize) {
- char line[128];
-
- if (prev && !memcmp(prev, buf + pos, rowsize)) {
- if (!skip) {
- pr_info("*\n");
- skip = true;
- }
- continue;
- }
-
- WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
- rowsize, sizeof(u32),
- line, sizeof(line),
- false) >= sizeof(line));
- pr_info("[%04zx] %s\n", pos, line);
-
- prev = buf + pos;
- skip = false;
- }
-}
-
static int emit_semaphore_signal(struct intel_context *ce, void *slot)
{
const u32 offset =
@@ -4099,13 +4423,12 @@ static int live_lrc_layout(void *arg)
if (!engine->default_state)
continue;
- hw = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
+ hw = shmem_pin_map(engine->default_state);
if (IS_ERR(hw)) {
err = PTR_ERR(hw);
break;
}
- hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
engine->kernel_context,
@@ -4166,13 +4489,13 @@ static int live_lrc_layout(void *arg)
if (err) {
pr_info("%s: HW register image:\n", engine->name);
- hexdump(hw, PAGE_SIZE);
+ igt_hexdump(hw, PAGE_SIZE);
pr_info("%s: SW register image:\n", engine->name);
- hexdump(lrc, PAGE_SIZE);
+ igt_hexdump(lrc, PAGE_SIZE);
}
- i915_gem_object_unpin_map(engine->default_state);
+ shmem_unpin_map(engine->default_state, hw);
if (err)
break;
}
@@ -4241,10 +4564,35 @@ static int live_lrc_fixed(void *arg)
"BB_STATE"
},
{
+ i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(engine->mmio_base)),
+ lrc_ring_wa_bb_per_ctx(engine),
+ "RING_BB_PER_CTX_PTR"
+ },
+ {
+ i915_mmio_reg_offset(RING_INDIRECT_CTX(engine->mmio_base)),
+ lrc_ring_indirect_ptr(engine),
+ "RING_INDIRECT_CTX_PTR"
+ },
+ {
+ i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(engine->mmio_base)),
+ lrc_ring_indirect_offset(engine),
+ "RING_INDIRECT_CTX_OFFSET"
+ },
+ {
i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
CTX_TIMESTAMP - 1,
"RING_CTX_TIMESTAMP"
},
+ {
+ i915_mmio_reg_offset(GEN8_RING_CS_GPR(engine->mmio_base, 0)),
+ lrc_ring_gpr0(engine),
+ "RING_CS_GPR0"
+ },
+ {
+ i915_mmio_reg_offset(RING_CMD_BUF_CCTL(engine->mmio_base)),
+ lrc_ring_cmd_buf_cctl(engine),
+ "RING_CMD_BUF_CCTL"
+ },
{ },
}, *t;
u32 *hw;
@@ -4252,13 +4600,12 @@ static int live_lrc_fixed(void *arg)
if (!engine->default_state)
continue;
- hw = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
+ hw = shmem_pin_map(engine->default_state);
if (IS_ERR(hw)) {
err = PTR_ERR(hw);
break;
}
- hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
for (t = tbl; t->name; t++) {
int dw = find_offset(hw, t->reg);
@@ -4274,7 +4621,7 @@ static int live_lrc_fixed(void *arg)
}
}
- i915_gem_object_unpin_map(engine->default_state);
+ shmem_unpin_map(engine->default_state, hw);
}
return err;
@@ -4830,6 +5177,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
{
struct i915_vma *batch;
u32 dw, x, *cs, *hw;
+ u32 *defaults;
batch = create_user_vma(ce->vm, SZ_64K);
if (IS_ERR(batch))
@@ -4841,10 +5189,17 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
return ERR_CAST(cs);
}
+ defaults = shmem_pin_map(ce->engine->default_state);
+ if (!defaults) {
+ i915_gem_object_unpin_map(batch->obj);
+ i915_vma_put(batch);
+ return ERR_PTR(-ENOMEM);
+ }
+
x = 0;
dw = 0;
- hw = ce->engine->pinned_default_state;
- hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ hw = defaults;
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
u32 len = hw[dw] & 0x7f;
@@ -4874,6 +5229,8 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
*cs++ = MI_BATCH_BUFFER_END;
+ shmem_unpin_map(ce->engine->default_state, defaults);
+
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
@@ -4984,6 +5341,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
{
struct i915_vma *batch;
u32 dw, *cs, *hw;
+ u32 *defaults;
batch = create_user_vma(ce->vm, SZ_64K);
if (IS_ERR(batch))
@@ -4995,9 +5353,16 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
return ERR_CAST(cs);
}
+ defaults = shmem_pin_map(ce->engine->default_state);
+ if (!defaults) {
+ i915_gem_object_unpin_map(batch->obj);
+ i915_vma_put(batch);
+ return ERR_PTR(-ENOMEM);
+ }
+
dw = 0;
- hw = ce->engine->pinned_default_state;
- hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ hw = defaults;
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
u32 len = hw[dw] & 0x7f;
@@ -5024,6 +5389,8 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
*cs++ = MI_BATCH_BUFFER_END;
+ shmem_unpin_map(ce->engine->default_state, defaults);
+
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
@@ -5091,6 +5458,7 @@ static int compare_isolation(struct intel_engine_cs *engine,
{
u32 x, dw, *hw, *lrc;
u32 *A[2], *B[2];
+ u32 *defaults;
int err = 0;
A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
@@ -5121,12 +5489,18 @@ static int compare_isolation(struct intel_engine_cs *engine,
err = PTR_ERR(lrc);
goto err_B1;
}
- lrc += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ lrc += LRC_STATE_OFFSET / sizeof(*hw);
+
+ defaults = shmem_pin_map(ce->engine->default_state);
+ if (!defaults) {
+ err = -ENOMEM;
+ goto err_lrc;
+ }
x = 0;
dw = 0;
- hw = engine->pinned_default_state;
- hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ hw = defaults;
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
u32 len = hw[dw] & 0x7f;
@@ -5157,7 +5531,6 @@ static int compare_isolation(struct intel_engine_cs *engine,
A[0][x], B[0][x], B[1][x],
poison, lrc[dw + 1]);
err = -EINVAL;
- break;
}
}
dw += 2;
@@ -5166,6 +5539,8 @@ static int compare_isolation(struct intel_engine_cs *engine,
} while (dw < PAGE_SIZE / sizeof(u32) &&
(hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+ shmem_unpin_map(ce->engine->default_state, defaults);
+err_lrc:
i915_gem_object_unpin_map(ce->state->obj);
err_B1:
i915_gem_object_unpin_map(result[1]->obj);
@@ -5296,6 +5671,7 @@ static int live_lrc_isolation(void *arg)
0xffffffff,
0xffff0000,
};
+ int err = 0;
/*
* Our goal is try and verify that per-context state cannot be
@@ -5306,7 +5682,6 @@ static int live_lrc_isolation(void *arg)
*/
for_each_engine(engine, gt, id) {
- int err = 0;
int i;
/* Just don't even ask */
@@ -5315,25 +5690,180 @@ static int live_lrc_isolation(void *arg)
continue;
intel_engine_pm_get(engine);
- if (engine->pinned_default_state) {
- for (i = 0; i < ARRAY_SIZE(poison); i++) {
- err = __lrc_isolation(engine, poison[i]);
- if (err)
- break;
+ for (i = 0; i < ARRAY_SIZE(poison); i++) {
+ int result;
- err = __lrc_isolation(engine, ~poison[i]);
- if (err)
- break;
- }
+ result = __lrc_isolation(engine, poison[i]);
+ if (result && !err)
+ err = result;
+
+ result = __lrc_isolation(engine, ~poison[i]);
+ if (result && !err)
+ err = result;
}
intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int indirect_ctx_submit_req(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err = 0;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ return err;
+}
+
+#define CTX_BB_CANARY_OFFSET (3 * 1024)
+#define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
+
+static u32 *
+emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(RING_START(0));
+ *cs++ = i915_ggtt_offset(ce->state) +
+ context_wa_bb_offset(ce) +
+ CTX_BB_CANARY_OFFSET;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static void
+indirect_ctx_bb_setup(struct intel_context *ce)
+{
+ u32 *cs = context_indirect_bb(ce);
+
+ cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
+
+ setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
+}
+
+static bool check_ring_start(struct intel_context *ce)
+{
+ const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
+ LRC_STATE_OFFSET + context_wa_bb_offset(ce);
+
+ if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
+ return true;
+
+ pr_err("ring start mismatch: canary 0x%08x vs state 0x%08x\n",
+ ctx_bb[CTX_BB_CANARY_INDEX],
+ ce->lrc_reg_state[CTX_RING_START]);
+
+ return false;
+}
+
+static int indirect_ctx_bb_check(struct intel_context *ce)
+{
+ int err;
+
+ err = indirect_ctx_submit_req(ce);
+ if (err)
+ return err;
+
+ if (!check_ring_start(ce))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
+{
+ struct intel_context *a, *b;
+ int err;
+
+ a = intel_context_create(engine);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+ err = intel_context_pin(a);
+ if (err)
+ goto put_a;
+
+ b = intel_context_create(engine);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto unpin_a;
+ }
+ err = intel_context_pin(b);
+ if (err)
+ goto put_b;
+
+ /* We use the already reserved extra page in context state */
+ if (!a->wa_bb_page) {
+ GEM_BUG_ON(b->wa_bb_page);
+ GEM_BUG_ON(INTEL_GEN(engine->i915) == 12);
+ goto unpin_b;
+ }
+
+ /*
+ * In order to test that our per context bb is truly per context,
+ * and executes at the intended spot on context restoring process,
+ * make the batch store the ring start value to memory.
+ * As ring start is restored apriori of starting the indirect ctx bb and
+ * as it will be different for each context, it fits to this purpose.
+ */
+ indirect_ctx_bb_setup(a);
+ indirect_ctx_bb_setup(b);
+
+ err = indirect_ctx_bb_check(a);
+ if (err)
+ goto unpin_b;
+
+ err = indirect_ctx_bb_check(b);
+
+unpin_b:
+ intel_context_unpin(b);
+put_b:
+ intel_context_put(b);
+unpin_a:
+ intel_context_unpin(a);
+put_a:
+ intel_context_put(a);
+
+ return err;
+}
+
+static int live_lrc_indirect_ctx_bb(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = __live_lrc_indirect_ctx_bb(engine);
+ intel_engine_pm_put(engine);
+
if (igt_flush_test(gt->i915))
err = -EIO;
+
if (err)
- return err;
+ break;
}
- return 0;
+ return err;
}
static void garbage_reset(struct intel_engine_cs *engine,
@@ -5367,7 +5897,7 @@ static struct i915_request *garbage(struct intel_context *ce,
prandom_bytes_state(prng,
ce->lrc_reg_state,
ce->engine->context_size -
- LRC_STATE_PN * PAGE_SIZE);
+ LRC_STATE_OFFSET);
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
@@ -5571,6 +6101,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_timestamp),
SUBTEST(live_lrc_garbage),
SUBTEST(live_pphwsp_runtime),
+ SUBTEST(live_lrc_indirect_ctx_bb),
};
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 95b165faeba7..2dc460624bbc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -11,6 +11,7 @@
#include "selftest_rc6.h"
#include "selftests/i915_random.h"
+#include "selftests/librapl.h"
static u64 rc6_residency(struct intel_rc6 *rc6)
{
@@ -31,7 +32,9 @@ int live_rc6_manual(void *arg)
{
struct intel_gt *gt = arg;
struct intel_rc6 *rc6 = &gt->rc6;
+ u64 rc0_power, rc6_power;
intel_wakeref_t wakeref;
+ ktime_t dt;
u64 res[2];
int err = 0;
@@ -54,7 +57,12 @@ int live_rc6_manual(void *arg)
msleep(1); /* wakeup is not immediate, takes about 100us on icl */
res[0] = rc6_residency(rc6);
+
+ dt = ktime_get();
+ rc0_power = librapl_energy_uJ();
msleep(250);
+ rc0_power = librapl_energy_uJ() - rc0_power;
+ dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
if ((res[1] - res[0]) >> 10) {
pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
@@ -63,13 +71,24 @@ int live_rc6_manual(void *arg)
goto out_unlock;
}
+ rc0_power = div64_u64(NSEC_PER_SEC * rc0_power, ktime_to_ns(dt));
+ if (!rc0_power) {
+ pr_err("No power measured while in RC0\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
/* Manually enter RC6 */
intel_rc6_park(rc6);
res[0] = rc6_residency(rc6);
+ intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
+ dt = ktime_get();
+ rc6_power = librapl_energy_uJ();
msleep(100);
+ rc6_power = librapl_energy_uJ() - rc6_power;
+ dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
-
if (res[1] == res[0]) {
pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
@@ -78,6 +97,15 @@ int live_rc6_manual(void *arg)
err = -EINVAL;
}
+ rc6_power = div64_u64(NSEC_PER_SEC * rc6_power, ktime_to_ns(dt));
+ pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
+ rc0_power, rc6_power);
+ if (2 * rc6_power > rc0_power) {
+ pr_err("GPU leaked energy while in RC6!\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
/* Restore what should have been the original state! */
intel_rc6_unpark(rc6);
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
index 9995faadd7e8..3350e7c995bc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -54,6 +54,8 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
*cs++ = STACK_MAGIC;
*cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
vma->private = intel_context_create(engine); /* dummy residuals */
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
new file mode 100644
index 000000000000..6275d69aa9cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -0,0 +1,1331 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/pm_qos.h>
+#include <linux/sort.h>
+
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt_clock_utils.h"
+#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "selftest_rps.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/librapl.h"
+
+/* Try to isolate the impact of cstates from determing frequency response */
+#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
+
+static unsigned long engine_heartbeat_disable(struct intel_engine_cs *engine)
+{
+ unsigned long old;
+
+ old = fetch_and_zero(&engine->props.heartbeat_interval_ms);
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+
+ return old;
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
+static void dummy_rps_work(struct work_struct *wrk)
+{
+}
+
+static int cmp_u64(const void *A, const void *B)
+{
+ const u64 *a = A, *b = B;
+
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ else
+ return 0;
+}
+
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ else
+ return 0;
+}
+
+static struct i915_vma *
+create_spin_counter(struct intel_engine_cs *engine,
+ struct i915_address_space *vm,
+ bool srm,
+ u32 **cancel,
+ u32 **counter)
+{
+ enum {
+ COUNT,
+ INC,
+ __NGPR__,
+ };
+#define CS_GPR(x) GEN8_RING_CS_GPR(engine->mmio_base, x)
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned long end;
+ u32 *base, *cs;
+ int loop, i;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, 64 << 10);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ end = obj->base.size / sizeof(u32) - 1;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ base = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(base)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(base);
+ }
+ cs = base;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(__NGPR__ * 2);
+ for (i = 0; i < __NGPR__; i++) {
+ *cs++ = i915_mmio_reg_offset(CS_GPR(i));
+ *cs++ = 0;
+ *cs++ = i915_mmio_reg_offset(CS_GPR(i)) + 4;
+ *cs++ = 0;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(INC));
+ *cs++ = 1;
+
+ loop = cs - base;
+
+ /* Unroll the loop to avoid MI_BB_START stalls impacting measurements */
+ for (i = 0; i < 1024; i++) {
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(COUNT));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(INC));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(COUNT), MI_MATH_REG_ACCU);
+
+ if (srm) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = i915_mmio_reg_offset(CS_GPR(COUNT));
+ *cs++ = lower_32_bits(vma->node.start + end * sizeof(*cs));
+ *cs++ = upper_32_bits(vma->node.start + end * sizeof(*cs));
+ }
+ }
+
+ *cs++ = MI_BATCH_BUFFER_START_GEN8;
+ *cs++ = lower_32_bits(vma->node.start + loop * sizeof(*cs));
+ *cs++ = upper_32_bits(vma->node.start + loop * sizeof(*cs));
+ GEM_BUG_ON(cs - base > end);
+
+ i915_gem_object_flush_map(obj);
+
+ *cancel = base + loop;
+ *counter = srm ? memset32(base + end, 0, 1) : NULL;
+ return vma;
+}
+
+static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms)
+{
+ u8 history[64], i;
+ unsigned long end;
+ int sleep;
+
+ i = 0;
+ memset(history, freq, sizeof(history));
+ sleep = 20;
+
+ /* The PCU does not change instantly, but drifts towards the goal? */
+ end = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ u8 act;
+
+ act = read_cagf(rps);
+ if (time_after(jiffies, end))
+ return act;
+
+ /* Target acquired */
+ if (act == freq)
+ return act;
+
+ /* Any change within the last N samples? */
+ if (!memchr_inv(history, act, sizeof(history)))
+ return act;
+
+ history[i] = act;
+ i = (i + 1) % ARRAY_SIZE(history);
+
+ usleep_range(sleep, 2 * sleep);
+ sleep *= 2;
+ if (sleep > timeout_ms * 20)
+ sleep = timeout_ms * 20;
+ } while (1);
+}
+
+static u8 rps_set_check(struct intel_rps *rps, u8 freq)
+{
+ mutex_lock(&rps->lock);
+ GEM_BUG_ON(!intel_rps_is_active(rps));
+ intel_rps_set(rps, freq);
+ GEM_BUG_ON(rps->last_freq != freq);
+ mutex_unlock(&rps->lock);
+
+ return wait_for_freq(rps, freq, 50);
+}
+
+static void show_pstate_limits(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (IS_BROXTON(i915)) {
+ pr_info("P_STATE_CAP[%x]: 0x%08x\n",
+ i915_mmio_reg_offset(BXT_RP_STATE_CAP),
+ intel_uncore_read(rps_to_uncore(rps),
+ BXT_RP_STATE_CAP));
+ } else if (IS_GEN(i915, 9)) {
+ pr_info("P_STATE_LIMITS[%x]: 0x%08x\n",
+ i915_mmio_reg_offset(GEN9_RP_STATE_LIMITS),
+ intel_uncore_read(rps_to_uncore(rps),
+ GEN9_RP_STATE_LIMITS));
+ }
+}
+
+int live_rps_clock_interval(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ intel_gt_pm_get(gt);
+ intel_rps_disable(&gt->rps);
+
+ intel_gt_check_clock_frequency(gt);
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_heartbeat;
+ struct i915_request *rq;
+ u32 cycles;
+ u64 dt;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ break;
+ }
+
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ intel_uncore_write_fw(gt->uncore, GEN6_RP_CUR_UP_EI, 0);
+
+ /* Set the evaluation interval to infinity! */
+ intel_uncore_write_fw(gt->uncore,
+ GEN6_RP_UP_EI, 0xffffffff);
+ intel_uncore_write_fw(gt->uncore,
+ GEN6_RP_UP_THRESHOLD, 0xffffffff);
+
+ intel_uncore_write_fw(gt->uncore, GEN6_RP_CONTROL,
+ GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG);
+
+ if (wait_for(intel_uncore_read_fw(gt->uncore,
+ GEN6_RP_CUR_UP_EI),
+ 10)) {
+ /* Just skip the test; assume lack of HW support */
+ pr_notice("%s: rps evaluation interval not ticking\n",
+ engine->name);
+ err = -ENODEV;
+ } else {
+ ktime_t dt_[5];
+ u32 cycles_[5];
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ preempt_disable();
+
+ dt_[i] = ktime_get();
+ cycles_[i] = -intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+
+ udelay(1000);
+
+ dt_[i] = ktime_sub(ktime_get(), dt_[i]);
+ cycles_[i] += intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+
+ preempt_enable();
+ }
+
+ /* Use the median of both cycle/dt; close enough */
+ sort(cycles_, 5, sizeof(*cycles_), cmp_u32, NULL);
+ cycles = (cycles_[1] + 2 * cycles_[2] + cycles_[3]) / 4;
+ sort(dt_, 5, sizeof(*dt_), cmp_u64, NULL);
+ dt = div_u64(dt_[1] + 2 * dt_[2] + dt_[3], 4);
+ }
+
+ intel_uncore_write_fw(gt->uncore, GEN6_RP_CONTROL, 0);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+
+ if (err == 0) {
+ u64 time = intel_gt_pm_interval_to_ns(gt, cycles);
+ u32 expected =
+ intel_gt_ns_to_pm_interval(gt, dt);
+
+ pr_info("%s: rps counted %d C0 cycles [%lldns] in %lldns [%d cycles], using GT clock frequency of %uKHz\n",
+ engine->name, cycles, time, dt, expected,
+ gt->clock_frequency / 1000);
+
+ if (10 * time < 8 * dt ||
+ 8 * time > 10 * dt) {
+ pr_err("%s: rps clock time does not match walltime!\n",
+ engine->name);
+ err = -EINVAL;
+ }
+
+ if (10 * expected < 8 * cycles ||
+ 8 * expected > 10 * cycles) {
+ pr_err("%s: walltime does not match rps clock ticks!\n",
+ engine->name);
+ err = -EINVAL;
+ }
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ break; /* once is enough */
+ }
+
+ intel_rps_enable(&gt->rps);
+ intel_gt_pm_put(gt);
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ if (err == -ENODEV) /* skipped, don't report a fail */
+ err = 0;
+
+ return err;
+}
+
+int live_rps_control(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * Check that the actual frequency matches our requested frequency,
+ * to verify our control mechanism. We have to be careful that the
+ * PCU may throttle the GPU in which case the actual frequency used
+ * will be lowered than requested.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (IS_CHERRYVIEW(gt->i915)) /* XXX fragile PCU */
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ intel_gt_pm_get(gt);
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_heartbeat;
+ struct i915_request *rq;
+ ktime_t min_dt, max_dt;
+ int f, limit;
+ int min, max;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ break;
+ }
+
+ if (rps_set_check(rps, rps->min_freq) != rps->min_freq) {
+ pr_err("%s: could not set minimum frequency [%x], only %x!\n",
+ engine->name, rps->min_freq, read_cagf(rps));
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ show_pstate_limits(rps);
+ err = -EINVAL;
+ break;
+ }
+
+ for (f = rps->min_freq + 1; f < rps->max_freq; f++) {
+ if (rps_set_check(rps, f) < f)
+ break;
+ }
+
+ limit = rps_set_check(rps, f);
+
+ if (rps_set_check(rps, rps->min_freq) != rps->min_freq) {
+ pr_err("%s: could not restore minimum frequency [%x], only %x!\n",
+ engine->name, rps->min_freq, read_cagf(rps));
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ show_pstate_limits(rps);
+ err = -EINVAL;
+ break;
+ }
+
+ max_dt = ktime_get();
+ max = rps_set_check(rps, limit);
+ max_dt = ktime_sub(ktime_get(), max_dt);
+
+ min_dt = ktime_get();
+ min = rps_set_check(rps, rps->min_freq);
+ min_dt = ktime_sub(ktime_get(), min_dt);
+
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+
+ pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n",
+ engine->name,
+ rps->min_freq, intel_gpu_freq(rps, rps->min_freq),
+ rps->max_freq, intel_gpu_freq(rps, rps->max_freq),
+ limit, intel_gpu_freq(rps, limit),
+ min, max, ktime_to_ns(min_dt), ktime_to_ns(max_dt));
+
+ if (limit == rps->min_freq) {
+ pr_err("%s: GPU throttled to minimum!\n",
+ engine->name);
+ show_pstate_limits(rps);
+ err = -ENODEV;
+ break;
+ }
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+ intel_gt_pm_put(gt);
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ return err;
+}
+
+static void show_pcu_config(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ unsigned int max_gpu_freq, min_gpu_freq;
+ intel_wakeref_t wakeref;
+ int gpu_freq;
+
+ if (!HAS_LLC(i915))
+ return;
+
+ min_gpu_freq = rps->min_freq;
+ max_gpu_freq = rps->max_freq;
+ if (INTEL_GEN(i915) >= 9) {
+ /* Convert GT frequency to 50 HZ units */
+ min_gpu_freq /= GEN9_FREQ_SCALER;
+ max_gpu_freq /= GEN9_FREQ_SCALER;
+ }
+
+ wakeref = intel_runtime_pm_get(rps_to_uncore(rps)->rpm);
+
+ pr_info("%5s %5s %5s\n", "GPU", "eCPU", "eRing");
+ for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
+ int ia_freq = gpu_freq;
+
+ sandybridge_pcode_read(i915,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &ia_freq, NULL);
+
+ pr_info("%5d %5d %5d\n",
+ gpu_freq * 50,
+ ((ia_freq >> 0) & 0xff) * 100,
+ ((ia_freq >> 8) & 0xff) * 100);
+ }
+
+ intel_runtime_pm_put(rps_to_uncore(rps)->rpm, wakeref);
+}
+
+static u64 __measure_frequency(u32 *cntr, int duration_ms)
+{
+ u64 dc, dt;
+
+ dt = ktime_get();
+ dc = READ_ONCE(*cntr);
+ usleep_range(1000 * duration_ms, 2000 * duration_ms);
+ dc = READ_ONCE(*cntr) - dc;
+ dt = ktime_get() - dt;
+
+ return div64_u64(1000 * 1000 * dc, dt);
+}
+
+static u64 measure_frequency_at(struct intel_rps *rps, u32 *cntr, int *freq)
+{
+ u64 x[5];
+ int i;
+
+ *freq = rps_set_check(rps, *freq);
+ for (i = 0; i < 5; i++)
+ x[i] = __measure_frequency(cntr, 2);
+ *freq = (*freq + read_cagf(rps)) / 2;
+
+ /* A simple triangle filter for better result stability */
+ sort(x, 5, sizeof(*x), cmp_u64, NULL);
+ return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
+static u64 __measure_cs_frequency(struct intel_engine_cs *engine,
+ int duration_ms)
+{
+ u64 dc, dt;
+
+ dt = ktime_get();
+ dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0));
+ usleep_range(1000 * duration_ms, 2000 * duration_ms);
+ dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0)) - dc;
+ dt = ktime_get() - dt;
+
+ return div64_u64(1000 * 1000 * dc, dt);
+}
+
+static u64 measure_cs_frequency_at(struct intel_rps *rps,
+ struct intel_engine_cs *engine,
+ int *freq)
+{
+ u64 x[5];
+ int i;
+
+ *freq = rps_set_check(rps, *freq);
+ for (i = 0; i < 5; i++)
+ x[i] = __measure_cs_frequency(engine, 2);
+ *freq = (*freq + read_cagf(rps)) / 2;
+
+ /* A simple triangle filter for better result stability */
+ sort(x, 5, sizeof(*x), cmp_u64, NULL);
+ return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
+static bool scaled_within(u64 x, u64 y, u32 f_n, u32 f_d)
+{
+ return f_d * x > f_n * y && f_n * x < f_d * y;
+}
+
+int live_rps_frequency_cs(void *arg)
+{
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ struct intel_engine_cs *engine;
+ struct pm_qos_request qos;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * The premise is that the GPU does change freqency at our behest.
+ * Let's check there is a correspondence between the requested
+ * frequency, the actual frequency, and the observed clock rate.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
+ return 0;
+
+ if (CPU_LATENCY >= 0)
+ cpu_latency_qos_add_request(&qos, CPU_LATENCY);
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_heartbeat;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cancel, *cntr;
+ struct {
+ u64 count;
+ int freq;
+ } min, max;
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+
+ vma = create_spin_counter(engine,
+ engine->kernel_context->vm, false,
+ &cancel, &cntr);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ break;
+ }
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_vma;
+
+ if (wait_for(intel_uncore_read(engine->uncore, CS_GPR(0)),
+ 10)) {
+ pr_err("%s: timed loop did not start\n",
+ engine->name);
+ goto err_vma;
+ }
+
+ min.freq = rps->min_freq;
+ min.count = measure_cs_frequency_at(rps, engine, &min.freq);
+
+ max.freq = rps->max_freq;
+ max.count = measure_cs_frequency_at(rps, engine, &max.freq);
+
+ pr_info("%s: min:%lluKHz @ %uMHz, max:%lluKHz @ %uMHz [%d%%]\n",
+ engine->name,
+ min.count, intel_gpu_freq(rps, min.freq),
+ max.count, intel_gpu_freq(rps, max.freq),
+ (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * max.count,
+ max.freq * min.count));
+
+ if (!scaled_within(max.freq * min.count,
+ min.freq * max.count,
+ 2, 3)) {
+ int f;
+
+ pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n",
+ engine->name,
+ max.freq * min.count,
+ min.freq * max.count);
+ show_pcu_config(rps);
+
+ for (f = min.freq + 1; f <= rps->max_freq; f++) {
+ int act = f;
+ u64 count;
+
+ count = measure_cs_frequency_at(rps, engine, &act);
+ if (act < f)
+ break;
+
+ pr_info("%s: %x:%uMHz: %lluKHz [%d%%]\n",
+ engine->name,
+ act, intel_gpu_freq(rps, act), count,
+ (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * count,
+ act * min.count));
+
+ f = act; /* may skip ahead [pcu granularity] */
+ }
+
+ err = -EINVAL;
+ }
+
+err_vma:
+ *cancel = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ if (CPU_LATENCY >= 0)
+ cpu_latency_qos_remove_request(&qos);
+
+ return err;
+}
+
+int live_rps_frequency_srm(void *arg)
+{
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ struct intel_engine_cs *engine;
+ struct pm_qos_request qos;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * The premise is that the GPU does change freqency at our behest.
+ * Let's check there is a correspondence between the requested
+ * frequency, the actual frequency, and the observed clock rate.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
+ return 0;
+
+ if (CPU_LATENCY >= 0)
+ cpu_latency_qos_add_request(&qos, CPU_LATENCY);
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_heartbeat;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cancel, *cntr;
+ struct {
+ u64 count;
+ int freq;
+ } min, max;
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+
+ vma = create_spin_counter(engine,
+ engine->kernel_context->vm, true,
+ &cancel, &cntr);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ break;
+ }
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_vma;
+
+ if (wait_for(READ_ONCE(*cntr), 10)) {
+ pr_err("%s: timed loop did not start\n",
+ engine->name);
+ goto err_vma;
+ }
+
+ min.freq = rps->min_freq;
+ min.count = measure_frequency_at(rps, cntr, &min.freq);
+
+ max.freq = rps->max_freq;
+ max.count = measure_frequency_at(rps, cntr, &max.freq);
+
+ pr_info("%s: min:%lluKHz @ %uMHz, max:%lluKHz @ %uMHz [%d%%]\n",
+ engine->name,
+ min.count, intel_gpu_freq(rps, min.freq),
+ max.count, intel_gpu_freq(rps, max.freq),
+ (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * max.count,
+ max.freq * min.count));
+
+ if (!scaled_within(max.freq * min.count,
+ min.freq * max.count,
+ 1, 2)) {
+ int f;
+
+ pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n",
+ engine->name,
+ max.freq * min.count,
+ min.freq * max.count);
+ show_pcu_config(rps);
+
+ for (f = min.freq + 1; f <= rps->max_freq; f++) {
+ int act = f;
+ u64 count;
+
+ count = measure_frequency_at(rps, cntr, &act);
+ if (act < f)
+ break;
+
+ pr_info("%s: %x:%uMHz: %lluKHz [%d%%]\n",
+ engine->name,
+ act, intel_gpu_freq(rps, act), count,
+ (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * count,
+ act * min.count));
+
+ f = act; /* may skip ahead [pcu granularity] */
+ }
+
+ err = -EINVAL;
+ }
+
+err_vma:
+ *cancel = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ if (CPU_LATENCY >= 0)
+ cpu_latency_qos_remove_request(&qos);
+
+ return err;
+}
+
+static void sleep_for_ei(struct intel_rps *rps, int timeout_us)
+{
+ /* Flush any previous EI */
+ usleep_range(timeout_us, 2 * timeout_us);
+
+ /* Reset the interrupt status */
+ rps_disable_interrupts(rps);
+ GEM_BUG_ON(rps->pm_iir);
+ rps_enable_interrupts(rps);
+
+ /* And then wait for the timeout, for real this time */
+ usleep_range(2 * timeout_us, 3 * timeout_us);
+}
+
+static int __rps_up_interrupt(struct intel_rps *rps,
+ struct intel_engine_cs *engine,
+ struct igt_spinner *spin)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ struct i915_request *rq;
+ u32 timeout;
+
+ if (!intel_engine_can_store_dword(engine))
+ return 0;
+
+ rps_set_check(rps, rps->min_freq);
+
+ rq = igt_spinner_create_request(spin, engine->kernel_context, MI_NOOP);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ i915_request_put(rq);
+ intel_gt_set_wedged(engine->gt);
+ return -EIO;
+ }
+
+ if (!intel_rps_is_active(rps)) {
+ pr_err("%s: RPS not enabled on starting spinner\n",
+ engine->name);
+ igt_spinner_end(spin);
+ i915_request_put(rq);
+ return -EINVAL;
+ }
+
+ if (!(rps->pm_events & GEN6_PM_RP_UP_THRESHOLD)) {
+ pr_err("%s: RPS did not register UP interrupt\n",
+ engine->name);
+ i915_request_put(rq);
+ return -EINVAL;
+ }
+
+ if (rps->last_freq != rps->min_freq) {
+ pr_err("%s: RPS did not program min frequency\n",
+ engine->name);
+ i915_request_put(rq);
+ return -EINVAL;
+ }
+
+ timeout = intel_uncore_read(uncore, GEN6_RP_UP_EI);
+ timeout = intel_gt_pm_interval_to_ns(engine->gt, timeout);
+ timeout = DIV_ROUND_UP(timeout, 1000);
+
+ sleep_for_ei(rps, timeout);
+ GEM_BUG_ON(i915_request_completed(rq));
+
+ igt_spinner_end(spin);
+ i915_request_put(rq);
+
+ if (rps->cur_freq != rps->min_freq) {
+ pr_err("%s: Frequency unexpectedly changed [up], now %d!\n",
+ engine->name, intel_rps_read_actual_frequency(rps));
+ return -EINVAL;
+ }
+
+ if (!(rps->pm_iir & GEN6_PM_RP_UP_THRESHOLD)) {
+ pr_err("%s: UP interrupt not recorded for spinner, pm_iir:%x, prev_up:%x, up_threshold:%x, up_ei:%x\n",
+ engine->name, rps->pm_iir,
+ intel_uncore_read(uncore, GEN6_RP_PREV_UP),
+ intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD),
+ intel_uncore_read(uncore, GEN6_RP_UP_EI));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __rps_down_interrupt(struct intel_rps *rps,
+ struct intel_engine_cs *engine)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ u32 timeout;
+
+ rps_set_check(rps, rps->max_freq);
+
+ if (!(rps->pm_events & GEN6_PM_RP_DOWN_THRESHOLD)) {
+ pr_err("%s: RPS did not register DOWN interrupt\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ if (rps->last_freq != rps->max_freq) {
+ pr_err("%s: RPS did not program max frequency\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
+ timeout = intel_gt_pm_interval_to_ns(engine->gt, timeout);
+ timeout = DIV_ROUND_UP(timeout, 1000);
+
+ sleep_for_ei(rps, timeout);
+
+ if (rps->cur_freq != rps->max_freq) {
+ pr_err("%s: Frequency unexpectedly changed [down], now %d!\n",
+ engine->name,
+ intel_rps_read_actual_frequency(rps));
+ return -EINVAL;
+ }
+
+ if (!(rps->pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT))) {
+ pr_err("%s: DOWN interrupt not recorded for idle, pm_iir:%x, prev_down:%x, down_threshold:%x, down_ei:%x [prev_up:%x, up_threshold:%x, up_ei:%x]\n",
+ engine->name, rps->pm_iir,
+ intel_uncore_read(uncore, GEN6_RP_PREV_DOWN),
+ intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD),
+ intel_uncore_read(uncore, GEN6_RP_DOWN_EI),
+ intel_uncore_read(uncore, GEN6_RP_PREV_UP),
+ intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD),
+ intel_uncore_read(uncore, GEN6_RP_UP_EI));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int live_rps_interrupt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ u32 pm_events;
+ int err = 0;
+
+ /*
+ * First, let's check whether or not we are receiving interrupts.
+ */
+
+ if (!intel_rps_has_interrupts(rps))
+ return 0;
+
+ intel_gt_pm_get(gt);
+ pm_events = rps->pm_events;
+ intel_gt_pm_put(gt);
+ if (!pm_events) {
+ pr_err("No RPS PM events registered, but RPS is enabled?\n");
+ return -ENODEV;
+ }
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ /* Keep the engine busy with a spinner; expect an UP! */
+ if (pm_events & GEN6_PM_RP_UP_THRESHOLD) {
+ unsigned long saved_heartbeat;
+
+ intel_gt_pm_wait_for_idle(engine->gt);
+ GEM_BUG_ON(intel_rps_is_active(rps));
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+
+ err = __rps_up_interrupt(rps, engine, &spin);
+
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ if (err)
+ goto out;
+
+ intel_gt_pm_wait_for_idle(engine->gt);
+ }
+
+ /* Keep the engine awake but idle and check for DOWN */
+ if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) {
+ unsigned long saved_heartbeat;
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+ intel_rc6_disable(&gt->rc6);
+
+ err = __rps_down_interrupt(rps, engine);
+
+ intel_rc6_enable(&gt->rc6);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ return err;
+}
+
+static u64 __measure_power(int duration_ms)
+{
+ u64 dE, dt;
+
+ dt = ktime_get();
+ dE = librapl_energy_uJ();
+ usleep_range(1000 * duration_ms, 2000 * duration_ms);
+ dE = librapl_energy_uJ() - dE;
+ dt = ktime_get() - dt;
+
+ return div64_u64(1000 * 1000 * dE, dt);
+}
+
+static u64 measure_power_at(struct intel_rps *rps, int *freq)
+{
+ u64 x[5];
+ int i;
+
+ *freq = rps_set_check(rps, *freq);
+ for (i = 0; i < 5; i++)
+ x[i] = __measure_power(5);
+ *freq = (*freq + read_cagf(rps)) / 2;
+
+ /* A simple triangle filter for better result stability */
+ sort(x, 5, sizeof(*x), cmp_u64, NULL);
+ return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
+int live_rps_power(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * Our fundamental assumption is that running at lower frequency
+ * actually saves power. Let's see if our RAPL measurement support
+ * that theory.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (!librapl_energy_uJ())
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_heartbeat;
+ struct i915_request *rq;
+ struct {
+ u64 power;
+ int freq;
+ } min, max;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ saved_heartbeat = engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ break;
+ }
+
+ max.freq = rps->max_freq;
+ max.power = measure_power_at(rps, &max.freq);
+
+ min.freq = rps->min_freq;
+ min.power = measure_power_at(rps, &min.freq);
+
+ igt_spinner_end(&spin);
+ engine_heartbeat_enable(engine, saved_heartbeat);
+
+ pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
+ engine->name,
+ min.power, intel_gpu_freq(rps, min.freq),
+ max.power, intel_gpu_freq(rps, max.freq));
+
+ if (10 * min.freq >= 9 * max.freq) {
+ pr_notice("Could not control frequency, ran at [%d:%uMHz, %d:%uMhz]\n",
+ min.freq, intel_gpu_freq(rps, min.freq),
+ max.freq, intel_gpu_freq(rps, max.freq));
+ continue;
+ }
+
+ if (11 * min.power > 10 * max.power) {
+ pr_err("%s: did not conserve power when setting lower frequency!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ return err;
+}
+
+int live_rps_dynamic(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * We've looked at the bascs, and have established that we
+ * can change the clock frequency and that the HW will generate
+ * interrupts based on load. Now we check how we integrate those
+ * moving parts into dynamic reclocking based on load.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+ struct {
+ ktime_t dt;
+ u8 freq;
+ } min, max;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ intel_gt_pm_wait_for_idle(gt);
+ GEM_BUG_ON(intel_rps_is_active(rps));
+ rps->cur_freq = rps->min_freq;
+
+ intel_engine_pm_get(engine);
+ intel_rc6_disable(&gt->rc6);
+ GEM_BUG_ON(rps->last_freq != rps->min_freq);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ i915_request_add(rq);
+
+ max.dt = ktime_get();
+ max.freq = wait_for_freq(rps, rps->max_freq, 500);
+ max.dt = ktime_sub(ktime_get(), max.dt);
+
+ igt_spinner_end(&spin);
+
+ min.dt = ktime_get();
+ min.freq = wait_for_freq(rps, rps->min_freq, 2000);
+ min.dt = ktime_sub(ktime_get(), min.dt);
+
+ pr_info("%s: dynamically reclocked to %u:%uMHz while busy in %lluns, and %u:%uMHz while idle in %lluns\n",
+ engine->name,
+ max.freq, intel_gpu_freq(rps, max.freq),
+ ktime_to_ns(max.dt),
+ min.freq, intel_gpu_freq(rps, min.freq),
+ ktime_to_ns(min.dt));
+ if (min.freq >= max.freq) {
+ pr_err("%s: dynamic reclocking of spinner failed\n!",
+ engine->name);
+ err = -EINVAL;
+ }
+
+err:
+ intel_rc6_enable(&gt->rc6);
+ intel_engine_pm_put(engine);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.h b/drivers/gpu/drm/i915/gt/selftest_rps.h
new file mode 100644
index 000000000000..6e82a631cfa1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SELFTEST_RPS_H
+#define SELFTEST_RPS_H
+
+int live_rps_control(void *arg);
+int live_rps_clock_interval(void *arg);
+int live_rps_frequency_cs(void *arg);
+int live_rps_frequency_srm(void *arg);
+int live_rps_power(void *arg);
+int live_rps_interrupt(void *arg);
+int live_rps_dynamic(void *arg);
+
+#endif /* SELFTEST_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
new file mode 100644
index 000000000000..43c7acbdc79d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+
+#include "gem/i915_gem_object.h"
+#include "shmem_utils.h"
+
+struct file *shmem_create_from_data(const char *name, void *data, size_t len)
+{
+ struct file *file;
+ int err;
+
+ file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
+ if (IS_ERR(file))
+ return file;
+
+ err = shmem_write(file, 0, data, len);
+ if (err) {
+ fput(file);
+ return ERR_PTR(err);
+ }
+
+ return file;
+}
+
+struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
+{
+ struct file *file;
+ void *ptr;
+
+ if (obj->ops == &i915_gem_shmem_ops) {
+ file = obj->base.filp;
+ atomic_long_inc(&file->f_count);
+ return file;
+ }
+
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
+
+ file = shmem_create_from_data("", ptr, obj->base.size);
+ i915_gem_object_unpin_map(obj);
+
+ return file;
+}
+
+static size_t shmem_npte(struct file *file)
+{
+ return file->f_mapping->host->i_size >> PAGE_SHIFT;
+}
+
+static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
+{
+ unsigned long pfn;
+
+ vunmap(ptr);
+
+ for (pfn = 0; pfn < n_pte; pfn++) {
+ struct page *page;
+
+ page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+ GFP_KERNEL);
+ if (!WARN_ON(IS_ERR(page))) {
+ put_page(page);
+ put_page(page);
+ }
+ }
+}
+
+void *shmem_pin_map(struct file *file)
+{
+ const size_t n_pte = shmem_npte(file);
+ pte_t *stack[32], **ptes, **mem;
+ struct vm_struct *area;
+ unsigned long pfn;
+
+ mem = stack;
+ if (n_pte > ARRAY_SIZE(stack)) {
+ mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return NULL;
+ }
+
+ area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
+ if (!area) {
+ if (mem != stack)
+ kvfree(mem);
+ return NULL;
+ }
+
+ ptes = mem;
+ for (pfn = 0; pfn < n_pte; pfn++) {
+ struct page *page;
+
+ page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+ GFP_KERNEL);
+ if (IS_ERR(page))
+ goto err_page;
+
+ **ptes++ = mk_pte(page, PAGE_KERNEL);
+ }
+
+ if (mem != stack)
+ kvfree(mem);
+
+ mapping_set_unevictable(file->f_mapping);
+ return area->addr;
+
+err_page:
+ if (mem != stack)
+ kvfree(mem);
+
+ __shmem_unpin_map(file, area->addr, pfn);
+ return NULL;
+}
+
+void shmem_unpin_map(struct file *file, void *ptr)
+{
+ mapping_clear_unevictable(file->f_mapping);
+ __shmem_unpin_map(file, ptr, shmem_npte(file));
+}
+
+static int __shmem_rw(struct file *file, loff_t off,
+ void *ptr, size_t len,
+ bool write)
+{
+ unsigned long pfn;
+
+ for (pfn = off >> PAGE_SHIFT; len; pfn++) {
+ unsigned int this =
+ min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
+ struct page *page;
+ void *vaddr;
+
+ page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+ GFP_KERNEL);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap(page);
+ if (write)
+ memcpy(vaddr + offset_in_page(off), ptr, this);
+ else
+ memcpy(ptr, vaddr + offset_in_page(off), this);
+ kunmap(page);
+ put_page(page);
+
+ len -= this;
+ ptr += this;
+ off = 0;
+ }
+
+ return 0;
+}
+
+int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
+{
+ return __shmem_rw(file, off, dst, len, false);
+}
+
+int shmem_write(struct file *file, loff_t off, void *src, size_t len)
+{
+ return __shmem_rw(file, off, src, len, true);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "st_shmem_utils.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.h b/drivers/gpu/drm/i915/gt/shmem_utils.h
new file mode 100644
index 000000000000..c1669170c351
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SHMEM_UTILS_H
+#define SHMEM_UTILS_H
+
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct file;
+
+struct file *shmem_create_from_data(const char *name, void *data, size_t len);
+struct file *shmem_create_from_object(struct drm_i915_gem_object *obj);
+
+void *shmem_pin_map(struct file *file);
+void shmem_unpin_map(struct file *file, void *ptr);
+
+int shmem_read(struct file *file, loff_t off, void *dst, size_t len);
+int shmem_write(struct file *file, loff_t off, void *src, size_t len);
+
+#endif /* SHMEM_UTILS_H */
diff --git a/drivers/gpu/drm/i915/gt/st_shmem_utils.c b/drivers/gpu/drm/i915/gt/st_shmem_utils.c
new file mode 100644
index 000000000000..b279fe88b70e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/st_shmem_utils.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/* Just a quick and causal check of the shmem_utils API */
+
+static int igt_shmem_basic(void *ignored)
+{
+ u32 datum = 0xdeadbeef, result;
+ struct file *file;
+ u32 *map;
+ int err;
+
+ file = shmem_create_from_data("mock", &datum, sizeof(datum));
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ result = 0;
+ err = shmem_read(file, 0, &result, sizeof(result));
+ if (err)
+ goto out_file;
+
+ if (result != datum) {
+ pr_err("Incorrect read back from shmemfs: %x != %x\n",
+ result, datum);
+ err = -EINVAL;
+ goto out_file;
+ }
+
+ result = 0xc0ffee;
+ err = shmem_write(file, 0, &result, sizeof(result));
+ if (err)
+ goto out_file;
+
+ map = shmem_pin_map(file);
+ if (!map) {
+ err = -ENOMEM;
+ goto out_file;
+ }
+
+ if (*map != result) {
+ pr_err("Incorrect read back via mmap of last write: %x != %x\n",
+ *map, result);
+ err = -EINVAL;
+ goto out_map;
+ }
+
+out_map:
+ shmem_unpin_map(file, map);
+out_file:
+ fput(file);
+ return err;
+}
+
+int shmem_utils_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_shmem_basic),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index 8f9b2f33dbaf..535cc1169e54 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -192,6 +192,17 @@ static struct kobj_attribute max_spin_attr =
__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
static ssize_t
+max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
+}
+
+static struct kobj_attribute max_spin_def =
+__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
+
+static ssize_t
timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
@@ -234,6 +245,17 @@ static struct kobj_attribute timeslice_duration_attr =
__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
static ssize_t
+timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
+}
+
+static struct kobj_attribute timeslice_duration_def =
+__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
+
+static ssize_t
stop_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
@@ -273,6 +295,17 @@ static struct kobj_attribute stop_timeout_attr =
__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
static ssize_t
+stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
+}
+
+static struct kobj_attribute stop_timeout_def =
+__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
+
+static ssize_t
preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
@@ -317,6 +350,18 @@ static struct kobj_attribute preempt_timeout_attr =
__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
static ssize_t
+preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
+}
+
+static struct kobj_attribute preempt_timeout_def =
+__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
+
+static ssize_t
heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
@@ -359,6 +404,17 @@ heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
static struct kobj_attribute heartbeat_interval_attr =
__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
+static ssize_t
+heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
+}
+
+static struct kobj_attribute heartbeat_interval_def =
+__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
+
static void kobj_engine_release(struct kobject *kobj)
{
kfree(kobj);
@@ -390,6 +446,42 @@ kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
return &ke->base;
}
+static void add_defaults(struct kobj_engine *parent)
+{
+ static const struct attribute *files[] = {
+ &max_spin_def.attr,
+ &stop_timeout_def.attr,
+#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
+ &heartbeat_interval_def.attr,
+#endif
+ NULL
+ };
+ struct kobj_engine *ke;
+
+ ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+ if (!ke)
+ return;
+
+ kobject_init(&ke->base, &kobj_engine_type);
+ ke->engine = parent->engine;
+
+ if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
+ kobject_put(&ke->base);
+ return;
+ }
+
+ if (sysfs_create_files(&ke->base, files))
+ return;
+
+ if (intel_engine_has_timeslices(ke->engine) &&
+ sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
+ return;
+
+ if (intel_engine_has_preempt_reset(ke->engine) &&
+ sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
+ return;
+}
+
void intel_engines_add_sysfs(struct drm_i915_private *i915)
{
static const struct attribute *files[] = {
@@ -433,6 +525,8 @@ void intel_engines_add_sysfs(struct drm_i915_private *i915)
sysfs_create_file(kobj, &preempt_timeout_attr.attr))
goto err_engine;
+ add_defaults(container_of(kobj, struct kobj_engine, base));
+
if (0) {
err_object:
kobject_put(kobj);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 819f09ef51fc..861657897c0f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -169,7 +169,7 @@ void intel_guc_init_early(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
- intel_guc_fw_init_early(guc);
+ intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
intel_guc_ct_init_early(&guc->ct);
intel_guc_log_init_early(&guc->log);
intel_guc_submission_init_early(guc);
@@ -723,3 +723,47 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
return 0;
}
+
+/**
+ * intel_guc_load_status - dump information about GuC load status
+ * @guc: the GuC
+ * @p: the &drm_printer
+ *
+ * Pretty printer for GuC load status.
+ */
+void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_uncore *uncore = gt->uncore;
+ intel_wakeref_t wakeref;
+
+ if (!intel_guc_is_supported(guc)) {
+ drm_printf(p, "GuC not supported\n");
+ return;
+ }
+
+ if (!intel_guc_is_wanted(guc)) {
+ drm_printf(p, "GuC disabled\n");
+ return;
+ }
+
+ intel_uc_fw_dump(&guc->fw, p);
+
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ u32 status = intel_uncore_read(uncore, GUC_STATUS);
+ u32 i;
+
+ drm_printf(p, "\nGuC status 0x%08x:\n", status);
+ drm_printf(p, "\tBootrom status = 0x%x\n",
+ (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
+ drm_printf(p, "\tuKernel status = 0x%x\n",
+ (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
+ drm_printf(p, "\tMIA Core status = 0x%x\n",
+ (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
+ drm_puts(p, "\nScratch registers:\n");
+ for (i = 0; i < 16; i++) {
+ drm_printf(p, "\t%2d: \t0x%x\n",
+ i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 4594ccbeaa34..e84ab67b317d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -74,6 +74,11 @@ struct intel_guc {
struct mutex send_mutex;
};
+static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
+{
+ return container_of(log, struct intel_guc, log);
+}
+
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
@@ -190,4 +195,6 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
int intel_guc_reset_engine(struct intel_guc *guc,
struct intel_engine_cs *engine);
+void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
new file mode 100644
index 000000000000..fe7cb7b29a1e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "gt/debugfs_gt.h"
+#include "intel_guc.h"
+#include "intel_guc_debugfs.h"
+#include "intel_guc_log_debugfs.h"
+
+static int guc_info_show(struct seq_file *m, void *data)
+{
+ struct intel_guc *guc = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!intel_guc_is_supported(guc))
+ return -ENODEV;
+
+ intel_guc_load_status(guc, &p);
+ drm_puts(&p, "\n");
+ intel_guc_log_info(&guc->log, &p);
+
+ /* Add more as required ... */
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_info);
+
+void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "guc_info", &guc_info_fops, NULL },
+ };
+
+ if (!intel_guc_is_supported(guc))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), guc);
+ intel_guc_log_debugfs_register(&guc->log, root);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h
new file mode 100644
index 000000000000..424c26665cf1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GUC_H
+#define DEBUGFS_GUC_H
+
+struct intel_guc;
+struct dentry;
+
+void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root);
+
+#endif /* DEBUGFS_GUC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 3a1c47d600ea..d4a87f4c9421 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -13,20 +13,6 @@
#include "intel_guc_fw.h"
#include "i915_drv.h"
-/**
- * intel_guc_fw_init_early() - initializes GuC firmware struct
- * @guc: intel_guc struct
- *
- * On platforms with GuC selects firmware for uploading
- */
-void intel_guc_fw_init_early(struct intel_guc *guc)
-{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
-
- intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, HAS_GT_UC(i915),
- INTEL_INFO(i915)->platform, INTEL_REVID(i915));
-}
-
static void guc_prepare_xfer(struct intel_uncore *uncore)
{
u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES |
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
index b5ab639d7259..0b4d2a9c9435 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
@@ -8,7 +8,6 @@
struct intel_guc;
-void intel_guc_fw_init_early(struct intel_guc *guc);
int intel_guc_fw_upload(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index caed0d57e704..fb10f3597ea5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -55,11 +55,6 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable,
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
-{
- return container_of(log, struct intel_guc, log);
-}
-
static void guc_log_enable_flush_events(struct intel_guc_log *log)
{
intel_guc_enable_msg(log_to_guc(log),
@@ -672,3 +667,95 @@ void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
{
queue_work(system_highpri_wq, &log->relay.flush_work);
}
+
+static const char *
+stringify_guc_log_type(enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_ISR_LOG_BUFFER:
+ return "ISR";
+ case GUC_DPC_LOG_BUFFER:
+ return "DPC";
+ case GUC_CRASH_DUMP_LOG_BUFFER:
+ return "CRASH";
+ default:
+ MISSING_CASE(type);
+ }
+
+ return "";
+}
+
+/**
+ * intel_guc_log_info - dump information about GuC log relay
+ * @log: the GuC log
+ * @p: the &drm_printer
+ *
+ * Pretty printer for GuC log info
+ */
+void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p)
+{
+ enum guc_log_buffer_type type;
+
+ if (!intel_guc_log_relay_created(log)) {
+ drm_puts(p, "GuC log relay not created\n");
+ return;
+ }
+
+ drm_puts(p, "GuC logging stats:\n");
+
+ drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count);
+
+ for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n",
+ stringify_guc_log_type(type),
+ log->stats[type].flush,
+ log->stats[type].sampled_overflow);
+ }
+}
+
+/**
+ * intel_guc_log_dump - dump the contents of the GuC log
+ * @log: the GuC log
+ * @p: the &drm_printer
+ * @dump_load_err: dump the log saved on GuC load error
+ *
+ * Pretty printer for the GuC log
+ */
+int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
+ bool dump_load_err)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
+ struct drm_i915_gem_object *obj = NULL;
+ u32 *map;
+ int i = 0;
+
+ if (!intel_guc_is_supported(guc))
+ return -ENODEV;
+
+ if (dump_load_err)
+ obj = uc->load_err_log;
+ else if (guc->log.vma)
+ obj = guc->log.vma->obj;
+
+ if (!obj)
+ return 0;
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ DRM_DEBUG("Failed to pin object\n");
+ drm_puts(p, "(log data unaccessible)\n");
+ return PTR_ERR(map);
+ }
+
+ for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
+ drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(map + i), *(map + i + 1),
+ *(map + i + 2), *(map + i + 3));
+
+ drm_puts(p, "\n");
+
+ i915_gem_object_unpin_map(obj);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index c252c022c5fc..11fccd0b2294 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -79,4 +79,8 @@ static inline u32 intel_guc_log_get_level(struct intel_guc_log *log)
return log->level;
}
+void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p);
+int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
+ bool dump_load_err);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
new file mode 100644
index 000000000000..129e0cf7dfe2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/fs.h>
+#include <drm/drm_print.h>
+
+#include "gt/debugfs_gt.h"
+#include "intel_guc.h"
+#include "intel_guc_log.h"
+#include "intel_guc_log_debugfs.h"
+
+static int guc_log_dump_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ return intel_guc_log_dump(m->private, &p, false);
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_log_dump);
+
+static int guc_load_err_log_dump_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ return intel_guc_log_dump(m->private, &p, true);
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump);
+
+static int guc_log_level_get(void *data, u64 *val)
+{
+ struct intel_guc_log *log = data;
+
+ if (!intel_guc_is_used(log_to_guc(log)))
+ return -ENODEV;
+
+ *val = intel_guc_log_get_level(log);
+
+ return 0;
+}
+
+static int guc_log_level_set(void *data, u64 val)
+{
+ struct intel_guc_log *log = data;
+
+ if (!intel_guc_is_used(log_to_guc(log)))
+ return -ENODEV;
+
+ return intel_guc_log_set_level(log, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(guc_log_level_fops,
+ guc_log_level_get, guc_log_level_set,
+ "%lld\n");
+
+static int guc_log_relay_open(struct inode *inode, struct file *file)
+{
+ struct intel_guc_log *log = inode->i_private;
+
+ if (!intel_guc_is_ready(log_to_guc(log)))
+ return -ENODEV;
+
+ file->private_data = log;
+
+ return intel_guc_log_relay_open(log);
+}
+
+static ssize_t
+guc_log_relay_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct intel_guc_log *log = filp->private_data;
+ int val;
+ int ret;
+
+ ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Enable and start the guc log relay on value of 1.
+ * Flush log relay for any other value.
+ */
+ if (val == 1)
+ ret = intel_guc_log_relay_start(log);
+ else
+ intel_guc_log_relay_flush(log);
+
+ return ret ?: cnt;
+}
+
+static int guc_log_relay_release(struct inode *inode, struct file *file)
+{
+ struct intel_guc_log *log = inode->i_private;
+
+ intel_guc_log_relay_close(log);
+ return 0;
+}
+
+static const struct file_operations guc_log_relay_fops = {
+ .owner = THIS_MODULE,
+ .open = guc_log_relay_open,
+ .write = guc_log_relay_write,
+ .release = guc_log_relay_release,
+};
+
+void intel_guc_log_debugfs_register(struct intel_guc_log *log,
+ struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "guc_log_dump", &guc_log_dump_fops, NULL },
+ { "guc_load_err_log_dump", &guc_load_err_log_dump_fops, NULL },
+ { "guc_log_level", &guc_log_level_fops, NULL },
+ { "guc_log_relay", &guc_log_relay_fops, NULL },
+ };
+
+ if (!intel_guc_is_supported(log_to_guc(log)))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), log);
+}
+
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h
new file mode 100644
index 000000000000..e8900e3d74ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GUC_LOG_H
+#define DEBUGFS_GUC_LOG_H
+
+struct intel_guc_log;
+struct dentry;
+
+void intel_guc_log_debugfs_register(struct intel_guc_log *log,
+ struct dentry *root);
+
+#endif /* DEBUGFS_GUC_LOG_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index aa6d56e25a10..94eb63f309ce 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -258,7 +258,7 @@ static void guc_submit(struct intel_engine_cs *engine,
static inline int rq_prio(const struct i915_request *rq)
{
- return rq->sched.attr.priority | __NO_PREEMPTION;
+ return rq->sched.attr.priority;
}
static struct i915_request *schedule_in(struct i915_request *rq, int idx)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index a74b65694512..65eeb44b397d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -41,7 +41,7 @@ void intel_huc_init_early(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
- intel_huc_fw_init_early(huc);
+ intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
if (INTEL_GEN(i915) >= 11) {
huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
@@ -200,9 +200,13 @@ fail:
* This function reads status register to verify if HuC
* firmware was successfully loaded.
*
- * Returns: 1 if HuC firmware is loaded and verified,
- * 0 if HuC firmware is not loaded and -ENODEV if HuC
- * is not present on this platform.
+ * Returns:
+ * * -ENODEV if HuC is not present on this platform,
+ * * -EOPNOTSUPP if HuC firmware is disabled,
+ * * -ENOPKG if HuC firmware was not installed,
+ * * -ENOEXEC if HuC firmware is invalid or mismatched,
+ * * 0 if HuC firmware is not running,
+ * * 1 if HuC firmware is authenticated and running.
*/
int intel_huc_check_status(struct intel_huc *huc)
{
@@ -210,11 +214,50 @@ int intel_huc_check_status(struct intel_huc *huc)
intel_wakeref_t wakeref;
u32 status = 0;
- if (!intel_huc_is_supported(huc))
+ switch (__intel_uc_fw_status(&huc->fw)) {
+ case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
return -ENODEV;
+ case INTEL_UC_FIRMWARE_DISABLED:
+ return -EOPNOTSUPP;
+ case INTEL_UC_FIRMWARE_MISSING:
+ return -ENOPKG;
+ case INTEL_UC_FIRMWARE_ERROR:
+ return -ENOEXEC;
+ default:
+ break;
+ }
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
status = intel_uncore_read(gt->uncore, huc->status.reg);
return (status & huc->status.mask) == huc->status.value;
}
+
+/**
+ * intel_huc_load_status - dump information about HuC load status
+ * @huc: the HuC
+ * @p: the &drm_printer
+ *
+ * Pretty printer for HuC load status.
+ */
+void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ intel_wakeref_t wakeref;
+
+ if (!intel_huc_is_supported(huc)) {
+ drm_printf(p, "HuC not supported\n");
+ return;
+ }
+
+ if (!intel_huc_is_wanted(huc)) {
+ drm_printf(p, "HuC disabled\n");
+ return;
+ }
+
+ intel_uc_fw_dump(&huc->fw, p);
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ drm_printf(p, "HuC status: 0x%08x\n",
+ intel_uncore_read(gt->uncore, huc->status.reg));
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index a40b9cfc6c22..daee43b661d4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -57,4 +57,6 @@ static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
return intel_uc_fw_is_running(&huc->fw);
}
+void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
new file mode 100644
index 000000000000..5733c15fd123
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "gt/debugfs_gt.h"
+#include "intel_huc.h"
+#include "intel_huc_debugfs.h"
+
+static int huc_info_show(struct seq_file *m, void *data)
+{
+ struct intel_huc *huc = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!intel_huc_is_supported(huc))
+ return -ENODEV;
+
+ intel_huc_load_status(huc, &p);
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(huc_info);
+
+void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "huc_info", &huc_info_fops, NULL },
+ };
+
+ if (!intel_huc_is_supported(huc))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), huc);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h
new file mode 100644
index 000000000000..be79e992f976
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_HUC_H
+#define DEBUGFS_HUC_H
+
+struct intel_huc;
+struct dentry;
+
+void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root);
+
+#endif /* DEBUGFS_HUC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index 9cdf4cbe691c..e5ef509c70e8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -8,23 +8,6 @@
#include "i915_drv.h"
/**
- * intel_huc_fw_init_early() - initializes HuC firmware struct
- * @huc: intel_huc struct
- *
- * On platforms with HuC selects firmware for uploading
- */
-void intel_huc_fw_init_early(struct intel_huc *huc)
-{
- struct intel_gt *gt = huc_to_gt(huc);
- struct intel_uc *uc = &gt->uc;
- struct drm_i915_private *i915 = gt->i915;
-
- intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC,
- intel_uc_wants_guc(uc),
- INTEL_INFO(i915)->platform, INTEL_REVID(i915));
-}
-
-/**
* intel_huc_fw_upload() - load HuC uCode to device
* @huc: intel_huc structure
*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
index b791269ce923..12f264ee3e0b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
@@ -8,7 +8,6 @@
struct intel_huc;
-void intel_huc_fw_init_early(struct intel_huc *huc);
int intel_huc_fw_upload(struct intel_huc *huc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index a4cbe06e06bd..f518fe05c6f9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -45,12 +45,12 @@ static void __confirm_options(struct intel_uc *uc)
{
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
- "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
- i915_modparams.enable_guc,
- yesno(intel_uc_wants_guc(uc)),
- yesno(intel_uc_wants_guc_submission(uc)),
- yesno(intel_uc_wants_huc(uc)));
+ drm_dbg(&i915->drm,
+ "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
+ i915_modparams.enable_guc,
+ yesno(intel_uc_wants_guc(uc)),
+ yesno(intel_uc_wants_guc_submission(uc)),
+ yesno(intel_uc_wants_huc(uc)));
if (i915_modparams.enable_guc == -1)
return;
@@ -63,25 +63,25 @@ static void __confirm_options(struct intel_uc *uc)
}
if (!intel_uc_supports_guc(uc))
- dev_info(i915->drm.dev,
+ drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915_modparams.enable_guc, "GuC is not supported!");
if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC &&
!intel_uc_supports_huc(uc))
- dev_info(i915->drm.dev,
+ drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915_modparams.enable_guc, "HuC is not supported!");
if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
- dev_info(i915->drm.dev,
+ drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915_modparams.enable_guc, "GuC submission is N/A");
if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION |
ENABLE_GUC_LOAD_HUC))
- dev_info(i915->drm.dev,
+ drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915_modparams.enable_guc, "undocumented flag");
}
@@ -131,6 +131,13 @@ static void __uc_free_load_err_log(struct intel_uc *uc)
i915_gem_object_put(log);
}
+void intel_uc_driver_remove(struct intel_uc *uc)
+{
+ intel_uc_fini_hw(uc);
+ intel_uc_fini(uc);
+ __uc_free_load_err_log(uc);
+}
+
static inline bool guc_communication_enabled(struct intel_guc *guc)
{
return intel_guc_ct_enabled(&guc->ct);
@@ -311,8 +318,6 @@ static void __uc_fini(struct intel_uc *uc)
{
intel_huc_fini(&uc->huc);
intel_guc_fini(&uc->guc);
-
- __uc_free_load_err_log(uc);
}
static int __uc_sanitize(struct intel_uc *uc)
@@ -475,14 +480,14 @@ static int __uc_init_hw(struct intel_uc *uc)
if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_enable(guc);
- dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+ drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
guc->fw.major_ver_found, guc->fw.minor_ver_found,
"submission",
enableddisabled(intel_uc_uses_guc_submission(uc)));
if (intel_uc_uses_huc(uc)) {
- dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+ drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
huc->fw.path,
huc->fw.major_ver_found, huc->fw.minor_ver_found,
@@ -503,7 +508,7 @@ err_out:
__uc_sanitize(uc);
if (!ret) {
- dev_notice(i915->drm.dev, "GuC is uninitialized\n");
+ drm_notice(&i915->drm, "GuC is uninitialized\n");
/* We want to run without GuC submission */
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 5ae7b50b7dc1..9c954c589edf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -34,6 +34,7 @@ struct intel_uc {
void intel_uc_init_early(struct intel_uc *uc);
void intel_uc_driver_late_release(struct intel_uc *uc);
+void intel_uc_driver_remove(struct intel_uc *uc);
void intel_uc_init_mmio(struct intel_uc *uc);
void intel_uc_reset_prepare(struct intel_uc *uc);
void intel_uc_suspend(struct intel_uc *uc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
new file mode 100644
index 000000000000..9d16b784aa0d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "intel_guc_debugfs.h"
+#include "intel_huc_debugfs.h"
+#include "intel_uc.h"
+#include "intel_uc_debugfs.h"
+
+void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
+{
+ struct dentry *root;
+
+ if (!gt_root)
+ return;
+
+ /* GuC and HuC go always in pair, no need to check both */
+ if (!intel_uc_supports_guc(uc))
+ return;
+
+ root = debugfs_create_dir("uc", gt_root);
+ if (IS_ERR(root))
+ return;
+
+ intel_guc_debugfs_register(&uc->guc, root);
+ intel_huc_debugfs_register(&uc->huc, root);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h
new file mode 100644
index 000000000000..010ce250d223
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_UC_H
+#define DEBUGFS_UC_H
+
+struct intel_uc;
+struct dentry;
+
+void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root);
+
+#endif /* DEBUGFS_UC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 18c755203688..e1caae93996d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,26 +11,32 @@
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
-static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
+static inline struct intel_gt *
+____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
{
- GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
- if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
+ if (type == INTEL_UC_FW_TYPE_GUC)
return container_of(uc_fw, struct intel_gt, uc.guc.fw);
- GEM_BUG_ON(uc_fw->type != INTEL_UC_FW_TYPE_HUC);
+ GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
return container_of(uc_fw, struct intel_gt, uc.huc.fw);
}
+static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
+{
+ GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
+ return ____uc_fw_to_gt(uc_fw, uc_fw->type);
+}
+
#ifdef CONFIG_DRM_I915_DEBUG_GUC
void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status)
{
uc_fw->__status = status;
- DRM_DEV_DEBUG_DRIVER(__uc_fw_to_gt(uc_fw)->i915->drm.dev,
- "%s firmware -> %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- status == INTEL_UC_FIRMWARE_SELECTED ?
- uc_fw->path : intel_uc_fw_status_repr(status));
+ drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
+ "%s firmware -> %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ status == INTEL_UC_FIRMWARE_SELECTED ?
+ uc_fw->path : intel_uc_fw_status_repr(status));
}
#endif
@@ -187,17 +193,15 @@ static void __uc_fw_user_override(struct intel_uc_fw *uc_fw)
* intel_uc_fw_init_early - initialize the uC object and select the firmware
* @uc_fw: uC firmware
* @type: type of uC
- * @supported: is uC support possible
- * @platform: platform identifier
- * @rev: hardware revision
*
* Initialize the state of our uC object and relevant tracking and select the
* firmware to fetch and load.
*/
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
- enum intel_uc_fw_type type, bool supported,
- enum intel_platform platform, u8 rev)
+ enum intel_uc_fw_type type)
{
+ struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
+
/*
* we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
* before we're looked at the HW caps to see if we have uc support
@@ -208,8 +212,10 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
uc_fw->type = type;
- if (supported) {
- __uc_fw_auto_select(uc_fw, platform, rev);
+ if (HAS_GT_UC(i915)) {
+ __uc_fw_auto_select(uc_fw,
+ INTEL_INFO(i915)->platform,
+ INTEL_REVID(i915));
__uc_fw_user_override(uc_fw);
}
@@ -290,7 +296,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
- dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+ drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
err = -ENODATA;
@@ -303,7 +309,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
css->exponent_size_dw) * sizeof(u32);
if (unlikely(size != sizeof(struct uc_css_header))) {
- dev_warn(dev,
+ drm_warn(&i915->drm,
"%s firmware %s: unexpected header size: %zu != %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
@@ -316,7 +322,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* now RSA */
if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) {
- dev_warn(dev, "%s firmware %s: unexpected key size: %u != %u\n",
+ drm_warn(&i915->drm, "%s firmware %s: unexpected key size: %u != %u\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
css->key_size_dw, UOS_RSA_SCRATCH_COUNT);
err = -EPROTO;
@@ -327,7 +333,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
if (unlikely(fw->size < size)) {
- dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+ drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, size);
err = -ENOEXEC;
@@ -337,7 +343,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* Sanity check whether this fw is not larger than whole WOPCM memory */
size = __intel_uc_fw_get_upload_size(uc_fw);
if (unlikely(size >= i915->wopcm.size)) {
- dev_warn(dev, "%s firmware %s: invalid size: %zu > %zu\n",
+ drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
size, (size_t)i915->wopcm.size);
err = -E2BIG;
@@ -352,7 +358,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- dev_notice(dev, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
uc_fw->major_ver_found, uc_fw->minor_ver_found,
uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
@@ -380,9 +386,9 @@ fail:
INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR);
- dev_notice(dev, "%s firmware %s: fetch failed with error %d\n",
+ drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
- dev_info(dev, "%s firmware(s) can be downloaded from %s\n",
+ drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
@@ -467,7 +473,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
if (ret)
- dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n",
+ drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
intel_uc_fw_type_repr(uc_fw->type),
intel_uncore_read_fw(uncore, DMA_CTRL));
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 888ff0de0244..23d3a423ac0f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -239,8 +239,7 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
}
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
- enum intel_uc_fw_type type, bool supported,
- enum intel_platform platform, u8 rev);
+ enum intel_uc_fw_type type);
int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw);
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 8b13f091cee2..0d6d59871308 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -35,7 +35,7 @@
*/
#include "i915_drv.h"
-#include "i915_gem_fence_reg.h"
+#include "gt/intel_ggtt_fencing.h"
#include "gvt.h"
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index a3cc080a46c6..8b87f130f7f1 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -882,6 +882,47 @@ static int mocs_cmd_reg_handler(struct parser_exec_state *s,
return 0;
}
+static int is_cmd_update_pdps(unsigned int offset,
+ struct parser_exec_state *s)
+{
+ u32 base = s->workload->engine->mmio_base;
+ return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0));
+}
+
+static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s,
+ unsigned int offset, unsigned int index)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm;
+ struct intel_vgpu_mm *mm;
+ u64 pdps[GEN8_3LVL_PDPES];
+
+ if (shadow_mm->ppgtt_mm.root_entry_type ==
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ pdps[0] = (u64)cmd_val(s, 2) << 32;
+ pdps[0] |= cmd_val(s, 4);
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
+ if (!mm) {
+ gvt_vgpu_err("failed to get the 4-level shadow vm\n");
+ return -EINVAL;
+ }
+ intel_vgpu_mm_get(mm);
+ list_add_tail(&mm->ppgtt_mm.link,
+ &s->workload->lri_shadow_mm);
+ *cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
+ *cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
+ } else {
+ /* Currently all guests use PML4 table and now can't
+ * have a guest with 3-level table but uses LRI for
+ * PPGTT update. So this is simply un-testable. */
+ GEM_BUG_ON(1);
+ gvt_vgpu_err("invalid shared shadow vm type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd)
{
@@ -920,6 +961,10 @@ static int cmd_reg_handler(struct parser_exec_state *s,
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
+ if (is_cmd_update_pdps(offset, s) &&
+ cmd_pdp_mmio_update_handler(s, offset, index))
+ return -EINVAL;
+
/* TODO
* In order to let workload with inhibit context to generate
* correct image data into memory, vregs values will be loaded to
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index dd25c3024370..158873f269b1 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -424,8 +424,6 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
out:
- intel_vgpu_unpin_mm(workload->shadow_mm);
- intel_vgpu_destroy_workload(workload);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2a4b23f8aa74..210016192ce7 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1900,6 +1900,7 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
INIT_LIST_HEAD(&mm->ppgtt_mm.list);
INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
+ INIT_LIST_HEAD(&mm->ppgtt_mm.link);
if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
mm->ppgtt_mm.guest_pdps[0] = pdps[0];
@@ -2341,12 +2342,27 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
int ret;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_engine_cs *engine;
+ int i;
if (bytes != 4 && bytes != 8)
return -EINVAL;
off -= info->gtt_start_offset;
ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
+
+ /* if ggtt of last submitted context is written,
+ * that context is probably got unpinned.
+ * Set last shadowed ctx to invalid.
+ */
+ for_each_engine(engine, vgpu->gvt->gt, i) {
+ if (!s->last_ctx[i].valid)
+ continue;
+
+ if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift))
+ s->last_ctx[i].valid = false;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 88789316807d..320b8d6ad92f 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -160,6 +160,7 @@ struct intel_vgpu_mm {
struct list_head list;
struct list_head lru_list;
+ struct list_head link; /* possible LRI shadow mm list */
} ppgtt_mm;
struct {
void *virtual_ggtt;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 9e1787867894..c7c561237883 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -31,7 +31,6 @@
*/
#include <linux/types.h>
-#include <xen/xen.h>
#include <linux/kthread.h>
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 58c2c7932e3f..a4a6db6b7f90 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -163,6 +163,11 @@ struct intel_vgpu_submission {
const struct intel_vgpu_submission_ops *ops;
int virtual_submission_interface;
bool active;
+ struct {
+ u32 lrca;
+ bool valid;
+ u64 ring_context_gpa;
+ } last_ctx[I915_NUM_ENGINES];
};
struct intel_vgpu {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 2faf50e1b051..3e88e3b5c43a 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2812,7 +2812,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
#define RING_REG(base) _MMIO((base) + 0x270)
- MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+ MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index b17c4a1599cd..b79da5124f83 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -79,6 +79,4 @@ struct intel_gvt_mpt {
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
};
-extern struct intel_gvt_mpt xengt_mpt;
-
#endif /* _GVT_HYPERCALL_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index eee530453aa6..ad8a9df49f29 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -31,7 +31,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
+#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <linux/types.h>
#include <linux/list.h>
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index e92ed96c9b23..0fb1df71c637 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -58,10 +58,8 @@ static void set_context_pdp_root_pointer(
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
- struct drm_i915_gem_object *ctx_obj =
- workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
- struct page *page;
+ struct intel_context *ctx = workload->req->context;
if (WARN_ON(!workload->shadow_mm))
return;
@@ -69,11 +67,9 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
return;
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap(page);
+ shadow_ring_context = (struct execlist_ring_context *)ctx->lrc_reg_state;
set_context_pdp_root_pointer(shadow_ring_context,
(void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
- kunmap(page);
}
/*
@@ -128,16 +124,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_gem_object *ctx_obj =
- workload->req->context->state->obj;
+ struct intel_context *ctx = workload->req->context;
struct execlist_ring_context *shadow_ring_context;
- struct page *page;
void *dst;
+ void *context_base;
unsigned long context_gpa, context_page_num;
+ unsigned long gpa_base; /* first gpa of consecutive GPAs */
+ unsigned long gpa_size; /* size of consecutive GPAs */
+ struct intel_vgpu_submission *s = &vgpu->submission;
int i;
+ bool skip = false;
+ int ring_id = workload->engine->id;
+
+ GEM_BUG_ON(!intel_context_is_pinned(ctx));
+
+ context_base = (void *) ctx->lrc_reg_state -
+ (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap(page);
+ shadow_ring_context = (void *) ctx->lrc_reg_state;
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
#define COPY_REG(name) \
@@ -169,23 +173,43 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
- kunmap(page);
- if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
- return 0;
+ gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx",
+ workload->engine->name, workload->ctx_desc.lrca,
+ workload->ctx_desc.context_id,
+ workload->ring_context_gpa);
- gvt_dbg_sched("ring %s workload lrca %x",
- workload->engine->name,
- workload->ctx_desc.lrca);
+ /* only need to ensure this context is not pinned/unpinned during the
+ * period from last submission to this this submission.
+ * Upon reaching this function, the currently submitted context is not
+ * supposed to get unpinned. If a misbehaving guest driver ever does
+ * this, it would corrupt itself.
+ */
+ if (s->last_ctx[ring_id].valid &&
+ (s->last_ctx[ring_id].lrca ==
+ workload->ctx_desc.lrca) &&
+ (s->last_ctx[ring_id].ring_context_gpa ==
+ workload->ring_context_gpa))
+ skip = true;
+
+ s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca;
+ s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa;
+ if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip)
+ return 0;
+
+ s->last_ctx[ring_id].valid = false;
context_page_num = workload->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
context_page_num = 19;
- i = 2;
- while (i < context_page_num) {
+ /* find consecutive GPAs from gma until the first inconsecutive GPA.
+ * read from the continuous GPAs into dst virtual address
+ */
+ gpa_size = 0;
+ for (i = 2; i < context_page_num; i++) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
I915_GTT_PAGE_SHIFT));
@@ -194,13 +218,26 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EFAULT;
}
- page = i915_gem_object_get_page(ctx_obj, i);
- dst = kmap(page);
- intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
- I915_GTT_PAGE_SIZE);
- kunmap(page);
- i++;
+ if (gpa_size == 0) {
+ gpa_base = context_gpa;
+ dst = context_base + (i << I915_GTT_PAGE_SHIFT);
+ } else if (context_gpa != gpa_base + gpa_size)
+ goto read;
+
+ gpa_size += I915_GTT_PAGE_SIZE;
+
+ if (i == context_page_num - 1)
+ goto read;
+
+ continue;
+
+read:
+ intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
+ gpa_base = context_gpa;
+ gpa_size = I915_GTT_PAGE_SIZE;
+ dst = context_base + (i << I915_GTT_PAGE_SHIFT);
}
+ s->last_ctx[ring_id].valid = true;
return 0;
}
@@ -599,10 +636,9 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
if (bb->va && !IS_ERR(bb->va))
i915_gem_object_unpin_map(bb->obj);
- if (bb->vma && !IS_ERR(bb->vma)) {
+ if (bb->vma && !IS_ERR(bb->vma))
i915_vma_unpin(bb->vma);
- i915_vma_close(bb->vma);
- }
+
i915_gem_object_put(bb->obj);
}
list_del(&bb->list);
@@ -610,10 +646,11 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
}
}
-static int prepare_workload(struct intel_vgpu_workload *workload)
+static int
+intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_mm *m;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -628,6 +665,52 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
return -EINVAL;
}
+ if (!list_empty(&workload->lri_shadow_mm)) {
+ list_for_each_entry(m, &workload->lri_shadow_mm,
+ ppgtt_mm.link) {
+ ret = intel_vgpu_pin_mm(m);
+ if (ret) {
+ list_for_each_entry_from_reverse(m,
+ &workload->lri_shadow_mm,
+ ppgtt_mm.link)
+ intel_vgpu_unpin_mm(m);
+ gvt_vgpu_err("LRI shadow ppgtt fail to pin\n");
+ break;
+ }
+ }
+ }
+
+ if (ret)
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+
+ return ret;
+}
+
+static void
+intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu_mm *m;
+
+ if (!list_empty(&workload->lri_shadow_mm)) {
+ list_for_each_entry(m, &workload->lri_shadow_mm,
+ ppgtt_mm.link)
+ intel_vgpu_unpin_mm(m);
+ }
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+}
+
+static int prepare_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ int ret = 0;
+
+ ret = intel_vgpu_shadow_mm_pin(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to pin shadow mm\n");
+ return ret;
+ }
+
update_shadow_pdps(workload);
set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
@@ -674,7 +757,7 @@ err_shadow_wa_ctx:
err_shadow_batch:
release_shadow_batch_buffer(workload);
err_unpin_mm:
- intel_vgpu_unpin_mm(workload->shadow_mm);
+ intel_vgpu_shadow_mm_unpin(workload);
return ret;
}
@@ -784,15 +867,48 @@ out:
return workload;
}
+static void update_guest_pdps(struct intel_vgpu *vgpu,
+ u64 ring_context_gpa, u32 pdp[8])
+{
+ u64 gpa;
+ int i;
+
+ gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
+
+ for (i = 0; i < 8; i++)
+ intel_gvt_hypervisor_write_gpa(vgpu,
+ gpa + i * 8, &pdp[7 - i], 4);
+}
+
+static __maybe_unused bool
+check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m)
+{
+ if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32;
+
+ if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) {
+ gvt_dbg_mm("4-level context ppgtt not match LRI command\n");
+ return false;
+ }
+ return true;
+ } else {
+ /* see comment in LRI handler in cmd_parser.c */
+ gvt_dbg_mm("invalid shadow mm type\n");
+ return false;
+ }
+}
+
static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
- struct page *page;
+ struct intel_context *ctx = workload->req->context;
+ void *context_base;
void *src;
unsigned long context_gpa, context_page_num;
+ unsigned long gpa_base; /* first gpa of consecutive GPAs */
+ unsigned long gpa_size; /* size of consecutive GPAs*/
int i;
u32 ring_base;
u32 head, tail;
@@ -801,6 +917,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
workload->ctx_desc.lrca);
+ GEM_BUG_ON(!intel_context_is_pinned(ctx));
+
head = workload->rb_head;
tail = workload->rb_tail;
wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
@@ -824,9 +942,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
context_page_num = 19;
- i = 2;
+ context_base = (void *) ctx->lrc_reg_state -
+ (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
- while (i < context_page_num) {
+ /* find consecutive GPAs from gma until the first inconsecutive GPA.
+ * write to the consecutive GPAs from src virtual address
+ */
+ gpa_size = 0;
+ for (i = 2; i < context_page_num; i++) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
I915_GTT_PAGE_SHIFT));
@@ -835,19 +958,39 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
return;
}
- page = i915_gem_object_get_page(ctx_obj, i);
- src = kmap(page);
- intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
- I915_GTT_PAGE_SIZE);
- kunmap(page);
- i++;
+ if (gpa_size == 0) {
+ gpa_base = context_gpa;
+ src = context_base + (i << I915_GTT_PAGE_SHIFT);
+ } else if (context_gpa != gpa_base + gpa_size)
+ goto write;
+
+ gpa_size += I915_GTT_PAGE_SIZE;
+
+ if (i == context_page_num - 1)
+ goto write;
+
+ continue;
+
+write:
+ intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
+ gpa_base = context_gpa;
+ gpa_size = I915_GTT_PAGE_SIZE;
+ src = context_base + (i << I915_GTT_PAGE_SHIFT);
}
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap(page);
+ shadow_ring_context = (void *) ctx->lrc_reg_state;
+
+ if (!list_empty(&workload->lri_shadow_mm)) {
+ struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
+ struct intel_vgpu_mm,
+ ppgtt_mm.link);
+ GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m));
+ update_guest_pdps(vgpu, workload->ring_context_gpa,
+ (void *)m->ppgtt_mm.guest_pdps);
+ }
#define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
@@ -864,8 +1007,6 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
-
- kunmap(page);
}
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
@@ -959,6 +1100,9 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->complete(workload);
+ intel_vgpu_shadow_mm_unpin(workload);
+ intel_vgpu_destroy_workload(workload);
+
atomic_dec(&s->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
@@ -1264,6 +1408,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
atomic_set(&s->running_workload_num, 0);
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
+ memset(s->last_ctx, 0, sizeof(s->last_ctx));
+
i915_vm_put(&ppgtt->vm);
return 0;
@@ -1350,6 +1496,16 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
+ if (!list_empty(&workload->lri_shadow_mm)) {
+ struct intel_vgpu_mm *m, *mm;
+ list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
+ ppgtt_mm.link) {
+ list_del(&m->ppgtt_mm.link);
+ intel_vgpu_mm_put(m);
+ }
+ }
+
+ GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
if (workload->shadow_mm)
intel_vgpu_mm_put(workload->shadow_mm);
@@ -1368,6 +1524,7 @@ alloc_workload(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&workload->list);
INIT_LIST_HEAD(&workload->shadow_bb);
+ INIT_LIST_HEAD(&workload->lri_shadow_mm);
init_waitqueue_head(&workload->shadow_ctx_status_wq);
atomic_set(&workload->shadow_ctx_active, 0);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index bf7fc0ca4cb1..15d317f2a4a4 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -87,6 +87,7 @@ struct intel_vgpu_workload {
int status;
struct intel_vgpu_mm *shadow_mm;
+ struct list_head lri_shadow_mm; /* For PPGTT load cmd */
/* different submission model may need different handler */
int (*prepare)(struct intel_vgpu_workload *);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 1d5ff88078bd..7d361623ff67 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -124,7 +124,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
*/
low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
- num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
+ num_types = ARRAY_SIZE(vgpu_types);
gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index c4048628188a..d960d0be5bd2 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -496,7 +496,7 @@ static int flush_lazy_signals(struct i915_active *ref)
return err;
}
-int i915_active_wait(struct i915_active *ref)
+int __i915_active_wait(struct i915_active *ref, int state)
{
int err;
@@ -511,7 +511,9 @@ int i915_active_wait(struct i915_active *ref)
if (err)
return err;
- if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
+ if (!i915_active_is_idle(ref) &&
+ ___wait_var_event(ref, i915_active_is_idle(ref),
+ state, 0, 0, schedule()))
return -EINTR;
flush_work(&ref->work);
@@ -540,34 +542,88 @@ static int __await_active(struct i915_active_fence *active,
return 0;
}
+struct wait_barrier {
+ struct wait_queue_entry base;
+ struct i915_active *ref;
+};
+
+static int
+barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
+{
+ struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
+
+ if (i915_active_is_idle(wb->ref)) {
+ list_del(&wq->entry);
+ i915_sw_fence_complete(wq->private);
+ kfree(wq);
+ }
+
+ return 0;
+}
+
+static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
+{
+ struct wait_barrier *wb;
+
+ wb = kmalloc(sizeof(*wb), GFP_KERNEL);
+ if (unlikely(!wb))
+ return -ENOMEM;
+
+ GEM_BUG_ON(i915_active_is_idle(ref));
+ if (!i915_sw_fence_await(fence)) {
+ kfree(wb);
+ return -EINVAL;
+ }
+
+ wb->base.flags = 0;
+ wb->base.func = barrier_wake;
+ wb->base.private = fence;
+ wb->ref = ref;
+
+ add_wait_queue(__var_waitqueue(ref), &wb->base);
+ return 0;
+}
+
static int await_active(struct i915_active *ref,
unsigned int flags,
int (*fn)(void *arg, struct dma_fence *fence),
- void *arg)
+ void *arg, struct i915_sw_fence *barrier)
{
int err = 0;
- /* We must always wait for the exclusive fence! */
- if (rcu_access_pointer(ref->excl.fence)) {
+ if (!i915_active_acquire_if_busy(ref))
+ return 0;
+
+ if (flags & I915_ACTIVE_AWAIT_EXCL &&
+ rcu_access_pointer(ref->excl.fence)) {
err = __await_active(&ref->excl, fn, arg);
if (err)
- return err;
+ goto out;
}
- if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+ if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
struct active_node *it, *n;
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
err = __await_active(&it->base, fn, arg);
if (err)
- break;
+ goto out;
}
- i915_active_release(ref);
+ }
+
+ if (flags & I915_ACTIVE_AWAIT_BARRIER) {
+ err = flush_lazy_signals(ref);
if (err)
- return err;
+ goto out;
+
+ err = __await_barrier(ref, barrier);
+ if (err)
+ goto out;
}
- return 0;
+out:
+ i915_active_release(ref);
+ return err;
}
static int rq_await_fence(void *arg, struct dma_fence *fence)
@@ -579,7 +635,7 @@ int i915_request_await_active(struct i915_request *rq,
struct i915_active *ref,
unsigned int flags)
{
- return await_active(ref, flags, rq_await_fence, rq);
+ return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
}
static int sw_await_fence(void *arg, struct dma_fence *fence)
@@ -592,7 +648,7 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence,
struct i915_active *ref,
unsigned int flags)
{
- return await_active(ref, flags, sw_await_fence, fence);
+ return await_active(ref, flags, sw_await_fence, fence, fence);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -818,7 +874,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
- intel_engine_pm_put(engine);
+ intel_engine_pm_put_delay(engine, 1);
}
}
@@ -937,6 +993,59 @@ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
active_fence_cb(fence, cb);
}
+struct auto_active {
+ struct i915_active base;
+ struct kref ref;
+};
+
+struct i915_active *i915_active_get(struct i915_active *ref)
+{
+ struct auto_active *aa = container_of(ref, typeof(*aa), base);
+
+ kref_get(&aa->ref);
+ return &aa->base;
+}
+
+static void auto_release(struct kref *ref)
+{
+ struct auto_active *aa = container_of(ref, typeof(*aa), ref);
+
+ i915_active_fini(&aa->base);
+ kfree(aa);
+}
+
+void i915_active_put(struct i915_active *ref)
+{
+ struct auto_active *aa = container_of(ref, typeof(*aa), base);
+
+ kref_put(&aa->ref, auto_release);
+}
+
+static int auto_active(struct i915_active *ref)
+{
+ i915_active_get(ref);
+ return 0;
+}
+
+static void auto_retire(struct i915_active *ref)
+{
+ i915_active_put(ref);
+}
+
+struct i915_active *i915_active_create(void)
+{
+ struct auto_active *aa;
+
+ aa = kmalloc(sizeof(*aa), GFP_KERNEL);
+ if (!aa)
+ return NULL;
+
+ kref_init(&aa->ref);
+ i915_active_init(&aa->base, auto_active, auto_retire);
+
+ return &aa->base;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_active.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index b3282ae7913c..cf4058150966 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -181,7 +181,11 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
return rcu_access_pointer(ref->excl.fence);
}
-int i915_active_wait(struct i915_active *ref);
+int __i915_active_wait(struct i915_active *ref, int state);
+static inline int i915_active_wait(struct i915_active *ref)
+{
+ return __i915_active_wait(ref, TASK_INTERRUPTIBLE);
+}
int i915_sw_fence_await_active(struct i915_sw_fence *fence,
struct i915_active *ref,
@@ -189,7 +193,9 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence,
int i915_request_await_active(struct i915_request *rq,
struct i915_active *ref,
unsigned int flags);
-#define I915_ACTIVE_AWAIT_ALL BIT(0)
+#define I915_ACTIVE_AWAIT_EXCL BIT(0)
+#define I915_ACTIVE_AWAIT_ACTIVE BIT(1)
+#define I915_ACTIVE_AWAIT_BARRIER BIT(2)
int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
@@ -221,4 +227,8 @@ void i915_request_add_active_barriers(struct i915_request *rq);
void i915_active_print(struct i915_active *ref, struct drm_printer *m);
void i915_active_unlock_wait(struct i915_active *ref);
+struct i915_active *i915_active_create(void);
+struct i915_active *i915_active_get(struct i915_active *ref);
+void i915_active_put(struct i915_active *ref);
+
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 189b573d02be..372354d33f55 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -572,6 +572,9 @@ struct drm_i915_reg_descriptor {
#define REG32(_reg, ...) \
{ .addr = (_reg), __VA_ARGS__ }
+#define REG32_IDX(_reg, idx) \
+ { .addr = _reg(idx) }
+
/*
* Convenience macro for adding 64-bit registers.
*
@@ -669,6 +672,7 @@ static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
REG32(BCS_SWCTRL),
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
+ REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE),
REG64_IDX(BCS_GPR, 0),
REG64_IDX(BCS_GPR, 1),
REG64_IDX(BCS_GPR, 2),
diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c
new file mode 100644
index 000000000000..b79b5f6d2cfa
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_config.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+unsigned long
+i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context)
+{
+ if (context && IS_ACTIVE(CONFIG_DRM_I915_FENCE_TIMEOUT))
+ return msecs_to_jiffies_timeout(CONFIG_DRM_I915_FENCE_TIMEOUT);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6ca797128aa1..bca036ac6621 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,12 +32,13 @@
#include <drm/drm_debugfs.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt_buffer_pool.h"
+#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
#include "gt/intel_rc6.h"
#include "gt/intel_rps.h"
-#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
@@ -218,7 +219,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct file_stats {
struct i915_address_space *vm;
unsigned long count;
- u64 total, unbound;
+ u64 total;
u64 active, inactive;
u64 closed;
};
@@ -234,8 +235,6 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++;
stats->total += obj->base.size;
- if (!atomic_read(&obj->bind_count))
- stats->unbound += obj->base.size;
spin_lock(&obj->vma.lock);
if (!stats->vm) {
@@ -285,13 +284,12 @@ static int per_file_stats(int id, void *ptr, void *data)
#define print_file_stats(m, name, stats) do { \
if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
name, \
stats.count, \
stats.total, \
stats.active, \
stats.inactive, \
- stats.unbound, \
stats.closed); \
} while (0)
@@ -745,7 +743,7 @@ i915_error_state_write(struct file *filp,
if (!error)
return 0;
- DRM_DEBUG_DRIVER("Resetting error state\n");
+ drm_dbg(&error->i915->drm, "Resetting error state\n");
i915_reset_error_state(error->i915);
return cnt;
@@ -930,21 +928,30 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
- rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
- seq_printf(m, "RP CUR UP: %d (%dus)\n",
- rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%dus)\n",
- rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
+ seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+ rpupei,
+ intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
+ seq_printf(m, "RP CUR UP: %d (%dun)\n",
+ rpcurup,
+ intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
+ seq_printf(m, "RP PREV UP: %d (%dns)\n",
+ rpprevup,
+ intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
rps->power.up_threshold);
- seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
- rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
- seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
- rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
- rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
+ seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+ rpdownei,
+ intel_gt_pm_interval_to_ns(&dev_priv->gt,
+ rpdownei));
+ seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+ rpcurdown,
+ intel_gt_pm_interval_to_ns(&dev_priv->gt,
+ rpcurdown));
+ seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+ rpprevdown,
+ intel_gt_pm_interval_to_ns(&dev_priv->gt,
+ rpprevdown));
seq_printf(m, "Down threshold: %d%%\n",
rps->power.down_threshold);
@@ -1193,7 +1200,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &dev_priv->gt.rps;
- seq_printf(m, "RPS enabled? %d\n", rps->enabled);
+ seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
+ seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
@@ -1213,7 +1221,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
- if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
+ if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -1251,286 +1259,6 @@ static int i915_llc(struct seq_file *m, void *data)
return 0;
}
-static int i915_huc_load_status_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- struct drm_printer p;
-
- if (!HAS_GT_UC(dev_priv))
- return -ENODEV;
-
- p = drm_seq_file_printer(m);
- intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
-
- return 0;
-}
-
-static int i915_guc_load_status_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- struct drm_printer p;
-
- if (!HAS_GT_UC(dev_priv))
- return -ENODEV;
-
- p = drm_seq_file_printer(m);
- intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- u32 tmp = I915_READ(GUC_STATUS);
- u32 i;
-
- seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
- seq_printf(m, "\tBootrom status = 0x%x\n",
- (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
- seq_printf(m, "\tuKernel status = 0x%x\n",
- (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
- seq_printf(m, "\tMIA Core status = 0x%x\n",
- (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
- seq_puts(m, "\nScratch registers:\n");
- for (i = 0; i < 16; i++) {
- seq_printf(m, "\t%2d: \t0x%x\n",
- i, I915_READ(SOFT_SCRATCH(i)));
- }
- }
-
- return 0;
-}
-
-static const char *
-stringify_guc_log_type(enum guc_log_buffer_type type)
-{
- switch (type) {
- case GUC_ISR_LOG_BUFFER:
- return "ISR";
- case GUC_DPC_LOG_BUFFER:
- return "DPC";
- case GUC_CRASH_DUMP_LOG_BUFFER:
- return "CRASH";
- default:
- MISSING_CASE(type);
- }
-
- return "";
-}
-
-static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
-{
- enum guc_log_buffer_type type;
-
- if (!intel_guc_log_relay_created(log)) {
- seq_puts(m, "GuC log relay not created\n");
- return;
- }
-
- seq_puts(m, "GuC logging stats:\n");
-
- seq_printf(m, "\tRelay full count: %u\n",
- log->relay.full_count);
-
- for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
- seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
- stringify_guc_log_type(type),
- log->stats[type].flush,
- log->stats[type].sampled_overflow);
- }
-}
-
-static int i915_guc_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_uc *uc = &dev_priv->gt.uc;
-
- if (!intel_uc_uses_guc(uc))
- return -ENODEV;
-
- i915_guc_log_info(m, &uc->guc.log);
-
- /* Add more as required ... */
-
- return 0;
-}
-
-static int i915_guc_stage_pool(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_uc *uc = &dev_priv->gt.uc;
- struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
- int index;
-
- if (!intel_uc_uses_guc_submission(uc))
- return -ENODEV;
-
- for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
- struct intel_engine_cs *engine;
-
- if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
- continue;
-
- seq_printf(m, "GuC stage descriptor %u:\n", index);
- seq_printf(m, "\tIndex: %u\n", desc->stage_id);
- seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
- seq_printf(m, "\tPriority: %d\n", desc->priority);
- seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
- seq_printf(m, "\tEngines used: 0x%x\n",
- desc->engines_used);
- seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
- desc->db_trigger_phy,
- desc->db_trigger_cpu,
- desc->db_trigger_uk);
- seq_printf(m, "\tProcess descriptor: 0x%x\n",
- desc->process_desc);
- seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
- desc->wq_addr, desc->wq_size);
- seq_putc(m, '\n');
-
- for_each_uabi_engine(engine, dev_priv) {
- u32 guc_engine_id = engine->guc_id;
- struct guc_execlist_context *lrc =
- &desc->lrc[guc_engine_id];
-
- seq_printf(m, "\t%s LRC:\n", engine->name);
- seq_printf(m, "\t\tContext desc: 0x%x\n",
- lrc->context_desc);
- seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
- seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
- seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
- seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
- seq_putc(m, '\n');
- }
- }
-
- return 0;
-}
-
-static int i915_guc_log_dump(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = m->private;
- struct drm_i915_private *dev_priv = node_to_i915(node);
- bool dump_load_err = !!node->info_ent->data;
- struct drm_i915_gem_object *obj = NULL;
- u32 *log;
- int i = 0;
-
- if (!HAS_GT_UC(dev_priv))
- return -ENODEV;
-
- if (dump_load_err)
- obj = dev_priv->gt.uc.load_err_log;
- else if (dev_priv->gt.uc.guc.log.vma)
- obj = dev_priv->gt.uc.guc.log.vma->obj;
-
- if (!obj)
- return 0;
-
- log = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(log)) {
- DRM_DEBUG("Failed to pin object\n");
- seq_puts(m, "(log data unaccessible)\n");
- return PTR_ERR(log);
- }
-
- for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
- seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
- *(log + i), *(log + i + 1),
- *(log + i + 2), *(log + i + 3));
-
- seq_putc(m, '\n');
-
- i915_gem_object_unpin_map(obj);
-
- return 0;
-}
-
-static int i915_guc_log_level_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_uc *uc = &dev_priv->gt.uc;
-
- if (!intel_uc_uses_guc(uc))
- return -ENODEV;
-
- *val = intel_guc_log_get_level(&uc->guc.log);
-
- return 0;
-}
-
-static int i915_guc_log_level_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_uc *uc = &dev_priv->gt.uc;
-
- if (!intel_uc_uses_guc(uc))
- return -ENODEV;
-
- return intel_guc_log_set_level(&uc->guc.log, val);
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
- i915_guc_log_level_get, i915_guc_log_level_set,
- "%lld\n");
-
-static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *i915 = inode->i_private;
- struct intel_guc *guc = &i915->gt.uc.guc;
- struct intel_guc_log *log = &guc->log;
-
- if (!intel_guc_is_ready(guc))
- return -ENODEV;
-
- file->private_data = log;
-
- return intel_guc_log_relay_open(log);
-}
-
-static ssize_t
-i915_guc_log_relay_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- struct intel_guc_log *log = filp->private_data;
- int val;
- int ret;
-
- ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
- if (ret < 0)
- return ret;
-
- /*
- * Enable and start the guc log relay on value of 1.
- * Flush log relay for any other value.
- */
- if (val == 1)
- ret = intel_guc_log_relay_start(log);
- else
- intel_guc_log_relay_flush(log);
-
- return ret ?: cnt;
-}
-
-static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *i915 = inode->i_private;
- struct intel_guc *guc = &i915->gt.uc.guc;
-
- intel_guc_log_relay_close(&guc->log);
- return 0;
-}
-
-static const struct file_operations i915_guc_log_relay_fops = {
- .owner = THIS_MODULE,
- .open = i915_guc_log_relay_open,
- .write = i915_guc_log_relay_write,
- .release = i915_guc_log_relay_release,
-};
-
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1576,8 +1304,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
seq_printf(m, "GT awake? %s [%d]\n",
yesno(dev_priv->gt.awake),
atomic_read(&dev_priv->gt.wakeref.count));
- seq_printf(m, "CS timestamp frequency: %u kHz\n",
- RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
+ seq_printf(m, "CS timestamp frequency: %u Hz\n",
+ RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_hz);
p = drm_seq_file_printer(m);
for_each_uabi_engine(engine, dev_priv)
@@ -1676,13 +1404,12 @@ static int
i915_perf_noa_delay_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
/*
* This would lead to infinite waits as we're doing timestamp
* difference on the CS with only 32bits.
*/
- if (val > mul_u32_u32(U32_MAX, clk))
+ if (i915_cs_timestamp_ns_to_ticks(i915, val) > U32_MAX)
return -EINVAL;
atomic64_set(&i915->perf.noa_programming_delay, val);
@@ -1757,6 +1484,9 @@ gt_drop_caches(struct intel_gt *gt, u64 val)
if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
+ if (val & DROP_FREED)
+ intel_gt_flush_buffer_pool(gt);
+
return 0;
}
@@ -2139,12 +1869,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_guc_info", i915_guc_info, 0},
- {"i915_guc_load_status", i915_guc_load_status_info, 0},
- {"i915_guc_log_dump", i915_guc_log_dump, 0},
- {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
- {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
- {"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_context_status", i915_context_status, 0},
@@ -2172,11 +1896,9 @@ static const struct i915_debugfs_files {
{"i915_error_state", &i915_error_state_fops},
{"i915_gpu_info", &i915_gpu_info_fops},
#endif
- {"i915_guc_log_level", &i915_guc_log_level_fops},
- {"i915_guc_log_relay", &i915_guc_log_relay_fops},
};
-int i915_debugfs_register(struct drm_i915_private *dev_priv)
+void i915_debugfs_register(struct drm_i915_private *dev_priv)
{
struct drm_minor *minor = dev_priv->drm.primary;
int i;
@@ -2193,7 +1915,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
i915_debugfs_files[i].fops);
}
- return drm_debugfs_create_files(i915_debugfs_list,
- I915_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(i915_debugfs_list,
+ I915_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.h b/drivers/gpu/drm/i915/i915_debugfs.h
index 6da39c76ab5e..1de2736f1248 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.h
+++ b/drivers/gpu/drm/i915/i915_debugfs.h
@@ -12,10 +12,10 @@ struct drm_i915_private;
struct seq_file;
#ifdef CONFIG_DEBUG_FS
-int i915_debugfs_register(struct drm_i915_private *dev_priv);
+void i915_debugfs_register(struct drm_i915_private *dev_priv);
void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj);
#else
-static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; }
+static inline void i915_debugfs_register(struct drm_i915_private *dev_priv) {}
static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 81a4621853db..34ee12f3f02d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -43,6 +43,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "display/intel_acpi.h"
@@ -227,14 +228,14 @@ static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
ret = drm_vblank_init(&i915->drm,
INTEL_NUM_PIPES(i915));
if (ret)
- goto out;
+ return ret;
}
intel_bios_init(i915);
ret = intel_vga_register(i915);
if (ret)
- goto out;
+ goto cleanup_bios;
intel_power_domains_init_hw(i915, false);
@@ -242,13 +243,16 @@ static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
ret = intel_modeset_init_noirq(i915);
if (ret)
- goto cleanup_vga_client;
+ goto cleanup_vga_client_pw_domain_csr;
return 0;
-cleanup_vga_client:
+cleanup_vga_client_pw_domain_csr:
+ intel_csr_ucode_fini(i915);
+ intel_power_domains_driver_remove(i915);
intel_vga_unregister(i915);
-out:
+cleanup_bios:
+ intel_bios_driver_remove(i915);
return ret;
}
@@ -307,13 +311,13 @@ static void i915_driver_modeset_remove(struct drm_i915_private *i915)
/* part #2: call after irq uninstall */
static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
{
- intel_modeset_driver_remove_noirq(i915);
+ intel_csr_ucode_fini(i915);
- intel_bios_driver_remove(i915);
+ intel_power_domains_driver_remove(i915);
intel_vga_unregister(i915);
- intel_csr_ucode_fini(i915);
+ intel_bios_driver_remove(i915);
}
static void intel_init_dpio(struct drm_i915_private *dev_priv)
@@ -566,6 +570,62 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
}
/**
+ * i915_set_dma_info - set all relevant PCI dma info as configured for the
+ * platform
+ * @i915: valid i915 instance
+ *
+ * Set the dma max segment size, device and coherent masks. The dma mask set
+ * needs to occur before i915_ggtt_probe_hw.
+ *
+ * A couple of platforms have special needs. Address them as well.
+ *
+ */
+static int i915_set_dma_info(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
+ int ret;
+
+ GEM_BUG_ON(!mask_size);
+
+ /*
+ * We don't have a max segment size, so set it to the max so sg's
+ * debugging layer doesn't complain
+ */
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
+ if (ret)
+ goto mask_err;
+
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (IS_GEN(i915, 2))
+ mask_size = 30;
+
+ /*
+ * 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
+ */
+ if (IS_I965G(i915) || IS_I965GM(i915))
+ mask_size = 32;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
+ if (ret)
+ goto mask_err;
+
+ return 0;
+
+mask_err:
+ drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
+ return ret;
+}
+
+/**
* i915_driver_hw_probe - setup state requiring device access
* @dev_priv: device private
*
@@ -610,6 +670,10 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
/* needs to be done before ggtt probe */
intel_dram_edram_detect(dev_priv);
+ ret = i915_set_dma_info(dev_priv);
+ if (ret)
+ return ret;
+
i915_perf_init(dev_priv);
ret = i915_ggtt_probe_hw(dev_priv);
@@ -638,40 +702,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
- /*
- * We don't have a max segment size, so set it to the max so sg's
- * debugging layer doesn't complain
- */
- dma_set_max_seg_size(&pdev->dev, UINT_MAX);
-
- /* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN(dev_priv, 2)) {
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
- if (ret) {
- drm_err(&dev_priv->drm, "failed to set DMA mask\n");
-
- goto err_mem_regions;
- }
- }
-
- /* 965GM sometimes incorrectly writes to hardware status page (HWS)
- * using 32bit addressing, overwriting memory if HWS is located
- * above 4GB.
- *
- * The documentation also mentions an issue with undefined
- * behaviour if any general state is accessed within a page above 4GB,
- * which also needs to be handled carefully.
- */
- if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-
- if (ret) {
- drm_err(&dev_priv->drm, "failed to set DMA mask\n");
-
- goto err_mem_regions;
- }
- }
-
cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
intel_gt_init_workarounds(dev_priv);
@@ -876,17 +906,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
(struct intel_device_info *)ent->driver_data;
struct intel_device_info *device_info;
struct drm_i915_private *i915;
- int err;
-
- i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
- if (!i915)
- return ERR_PTR(-ENOMEM);
- err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
- if (err) {
- kfree(i915);
- return ERR_PTR(err);
- }
+ i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
+ struct drm_i915_private, drm);
+ if (IS_ERR(i915))
+ return i915;
i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
@@ -901,17 +925,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
return i915;
}
-static void i915_driver_destroy(struct drm_i915_private *i915)
-{
- struct pci_dev *pdev = i915->drm.pdev;
-
- drm_dev_fini(&i915->drm);
- kfree(i915);
-
- /* And make sure we never chase our dangling pointer from pci_dev */
- pci_set_drvdata(pdev, NULL);
-}
-
/**
* i915_driver_probe - setup chip and create an initial config
* @pdev: PCI device
@@ -993,12 +1006,14 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i915_welcome_messages(i915);
+ i915->do_release = true;
+
return 0;
out_cleanup_irq:
intel_irq_uninstall(i915);
out_cleanup_modeset:
- /* FIXME */
+ i915_driver_modeset_remove_noirq(i915);
out_cleanup_hw:
i915_driver_hw_remove(i915);
intel_memory_regions_driver_release(i915);
@@ -1012,7 +1027,6 @@ out_pci_disable:
pci_disable_device(pdev);
out_fini:
i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
- i915_driver_destroy(i915);
return ret;
}
@@ -1035,12 +1049,12 @@ void i915_driver_remove(struct drm_i915_private *i915)
intel_irq_uninstall(i915);
- i915_driver_modeset_remove_noirq(i915);
+ intel_modeset_driver_remove_noirq(i915);
i915_reset_error_state(i915);
i915_gem_driver_remove(i915);
- intel_power_domains_driver_remove(i915);
+ i915_driver_modeset_remove_noirq(i915);
i915_driver_hw_remove(i915);
@@ -1052,6 +1066,9 @@ static void i915_driver_release(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ if (!dev_priv->do_release)
+ return;
+
disable_rpm_wakeref_asserts(rpm);
i915_gem_driver_release(dev_priv);
@@ -1065,7 +1082,6 @@ static void i915_driver_release(struct drm_device *dev)
intel_runtime_pm_driver_release(rpm);
i915_driver_late_release(dev_priv);
- i915_driver_destroy(dev_priv);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -1286,7 +1302,6 @@ static int i915_drm_resume(struct drm_device *dev)
drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
i915_ggtt_resume(&dev_priv->ggtt);
- i915_gem_restore_fences(&dev_priv->ggtt);
intel_csr_ucode_resume(dev_priv);
@@ -1604,8 +1619,6 @@ static int intel_runtime_suspend(struct device *kdev)
intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_restore_fences(&dev_priv->ggtt);
-
enable_rpm_wakeref_asserts(rpm);
return ret;
@@ -1685,7 +1698,6 @@ static int intel_runtime_resume(struct device *kdev)
* we can do is to hope that things will still work (and disable RPM).
*/
intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_restore_fences(&dev_priv->ggtt);
/*
* On VLV/CHV display interrupts are part of the display
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 62b901ffabf9..adb9bf34cf97 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -92,7 +92,6 @@
#include "intel_wopcm.h"
#include "i915_gem.h"
-#include "i915_gem_fence_reg.h"
#include "i915_gem_gtt.h"
#include "i915_gpu_error.h"
#include "i915_perf_types.h"
@@ -109,8 +108,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20200313"
-#define DRIVER_TIMESTAMP 1584144591
+#define DRIVER_DATE "20200515"
+#define DRIVER_TIMESTAMP 1589543364
struct drm_i915_gem_object;
@@ -149,6 +148,8 @@ enum hpd_pin {
struct i915_hotplug {
struct delayed_work hotplug_work;
+ const u32 *hpd, *pch_hpd;
+
struct {
unsigned long last_jiffies;
int count;
@@ -417,6 +418,7 @@ struct intel_fbc {
struct {
const struct drm_format_info *format;
unsigned int stride;
+ u64 modifier;
} fb;
u16 gen9_wa_cfb_stride;
s8 fence_id;
@@ -510,6 +512,7 @@ struct i915_psr {
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
bool force_mode_changed;
+ struct drm_dp_vsc_sdp vsc;
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -540,7 +543,6 @@ struct i915_suspend_saved_registers {
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF3[3];
- u64 saveFENCE[I915_MAX_NUM_FENCES];
u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS;
};
@@ -615,13 +617,14 @@ struct i915_gem_mm {
#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
-#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
-#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
-
-#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
-#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
+unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
+ u64 context);
-#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
+static inline unsigned long
+i915_fence_timeout(const struct drm_i915_private *i915)
+{
+ return i915_fence_context_timeout(i915, U64_MAX);
+}
/* Amount of SAGV/QGV points, BSpec precisely defines this */
#define I915_NUM_QGV_POINTS 8
@@ -823,6 +826,9 @@ struct i915_selftest_stash {
struct drm_i915_private {
struct drm_device drm;
+ /* FIXME: Device release actions should all be moved to drmm_ */
+ bool do_release;
+
const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
@@ -885,7 +891,6 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
- struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct rb_root uabi_engines;
struct resource mch_res;
@@ -1506,6 +1511,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ICL_REVID(p, since, until) \
(IS_ICELAKE(p) && IS_REVID(p, since, until))
+#define EHL_REVID_A0 0x0
+
+#define IS_EHL_REVID(p, since, until) \
+ (IS_ELKHARTLAKE(p) && IS_REVID(p, since, until))
+
#define TGL_REVID_A0 0x0
#define TGL_REVID_B0 0x1
#define TGL_REVID_C0 0x2
@@ -1606,7 +1616,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
-#define HAS_TRANSCODER_EDP(dev_priv) (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
+#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
@@ -1740,6 +1750,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
+#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
@@ -1913,4 +1924,16 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
+static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
+{
+ return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
+ 1000000000);
+}
+
+static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
+{
+ return div_u64(val * 1000000000,
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ca5420012a22..0cbcb9f54e7d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -118,7 +118,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
- if (!atomic_read(&obj->bind_count))
+ if (list_empty(&obj->vma.list))
return 0;
/*
@@ -141,6 +141,11 @@ try_again:
if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
continue;
+ if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
+ ret = -EBUSY;
+ break;
+ }
+
ret = -EAGAIN;
if (!i915_vm_tryopen(vm))
break;
@@ -993,18 +998,16 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(ret);
}
+ ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ if (ret)
+ return ERR_PTR(ret);
+
if (vma->fence && !i915_gem_object_is_tiled(obj)) {
mutex_lock(&ggtt->vm.mutex);
- ret = i915_vma_revoke_fence(vma);
+ i915_vma_revoke_fence(vma);
mutex_unlock(&ggtt->vm.mutex);
- if (ret)
- return ERR_PTR(ret);
}
- ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
- if (ret)
- return ERR_PTR(ret);
-
ret = i915_vma_wait_for_bind(vma);
if (ret) {
i915_vma_unpin(vma);
@@ -1156,7 +1159,6 @@ err_unlock:
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
i915_ggtt_resume(&dev_priv->ggtt);
- i915_gem_restore_fences(&dev_priv->ggtt);
intel_init_clock_gating(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 02ad1acd117c..6501939929d5 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -226,7 +226,12 @@ found:
while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
vma = container_of(node, struct i915_vma, node);
- ret = __i915_vma_unbind(vma);
+
+ /* If we find any non-objects (!vma), we cannot evict them */
+ if (vma->node.color != I915_COLOR_UNEVICTABLE)
+ ret = __i915_vma_unbind(vma);
+ else
+ ret = -ENOSPC; /* XXX search failed, try again? */
}
return ret;
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 54fce81d5724..d042644b9cd2 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -153,7 +153,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = 1000 * RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
+ value = RUNTIME_INFO(i915)->cs_timestamp_frequency_hz;
break;
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(i915)->has_coherent_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 5c8e51d2ba5b..eec292d06f11 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -467,14 +467,14 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, start %08x, head %08x, tail %08x\n",
+ err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
prefix, erq->pid, erq->context, erq->seqno,
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&erq->flags) ? "+" : "",
erq->sched_attr.priority,
- erq->start, erq->head, erq->tail);
+ erq->head, erq->tail);
}
static void error_print_context(struct drm_i915_error_state_buf *m,
@@ -1211,7 +1211,6 @@ static void record_request(const struct i915_request *request,
erq->context = request->fence.context;
erq->seqno = request->fence.seqno;
erq->sched_attr = request->sched.attr;
- erq->start = i915_ggtt_offset(request->ring->vma);
erq->head = request->head;
erq->tail = request->tail;
@@ -1321,26 +1320,6 @@ capture_user(struct intel_engine_capture_vma *capture,
return capture;
}
-static struct i915_vma_coredump *
-capture_object(const struct intel_gt *gt,
- struct drm_i915_gem_object *obj,
- const char *name,
- struct i915_vma_compress *compress)
-{
- if (obj && i915_gem_object_has_pages(obj)) {
- struct i915_vma fake = {
- .node = { .start = U64_MAX, .size = obj->base.size },
- .size = obj->base.size,
- .pages = obj->mm.pages,
- .obj = obj,
- };
-
- return i915_vma_coredump_create(gt, &fake, name, compress);
- } else {
- return NULL;
- }
-}
-
static void add_vma(struct intel_engine_coredump *ee,
struct i915_vma_coredump *vma)
{
@@ -1429,12 +1408,6 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
engine->wa_ctx.vma,
"WA context",
compress));
-
- add_vma(ee,
- capture_object(engine->gt,
- engine->default_state,
- "NULL context",
- compress));
}
static struct intel_engine_coredump *
@@ -1860,7 +1833,7 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
return;
i915 = error->i915;
- dev_info(i915->drm.dev, "%s\n", error_msg(error));
+ drm_info(&i915->drm, "%s\n", error_msg(error));
if (error->simulated ||
cmpxchg(&i915->gpu_error.first_error, NULL, error))
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 0d1f6c8ff355..76b80fbfb7e9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -42,7 +42,7 @@ struct i915_vma_coredump {
int num_pages;
int page_count;
int unused;
- u32 *pages[0];
+ u32 *pages[];
};
struct i915_request_coredump {
@@ -50,7 +50,6 @@ struct i915_request_coredump {
pid_t pid;
u32 context;
u32 seqno;
- u32 start;
u32 head;
u32 tail;
struct i915_sched_attr sched_attr;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 8e45ca3d2ede..55b97c3a3dde 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -47,20 +47,16 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct drm_i915_getparam32 req32;
- drm_i915_getparam_t __user *request;
+ struct drm_i915_getparam req;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(request, sizeof(*request)) ||
- __put_user(req32.param, &request->param) ||
- __put_user((void __user *)(unsigned long)req32.value,
- &request->value))
- return -EFAULT;
+ req.param = req32.param;
+ req.value = compat_ptr(req32.value);
- return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
- (unsigned long)request);
+ return drm_ioctl_kernel(file, i915_getparam_ioctl, &req,
+ DRM_RENDER_ALLOW);
}
static drm_ioctl_compat_t *i915_compat_ioctls[] = {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8a2b83807ffc..4dc601dffc08 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -124,7 +124,6 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
};
-/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
@@ -168,6 +167,49 @@ static const u32 hpd_tgp[HPD_NUM_PINS] = {
[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
};
+static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
+{
+ struct i915_hotplug *hpd = &dev_priv->hotplug;
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv))
+ hpd->hpd = hpd_status_g4x;
+ else
+ hpd->hpd = hpd_status_i915;
+ return;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ hpd->hpd = hpd_gen12;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ hpd->hpd = hpd_gen11;
+ else if (IS_GEN9_LP(dev_priv))
+ hpd->hpd = hpd_bxt;
+ else if (INTEL_GEN(dev_priv) >= 8)
+ hpd->hpd = hpd_bdw;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ hpd->hpd = hpd_ivb;
+ else
+ hpd->hpd = hpd_ilk;
+
+ if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))
+ return;
+
+ if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv))
+ hpd->pch_hpd = hpd_tgp;
+ else if (HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
+ hpd->pch_hpd = hpd_icp;
+ else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
+ hpd->pch_hpd = hpd_spt;
+ else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
+ hpd->pch_hpd = hpd_cpt;
+ else if (HAS_PCH_IBX(dev_priv))
+ hpd->pch_hpd = hpd_ibx;
+ else
+ MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
+}
+
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -1504,33 +1546,27 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_status)
{
u32 pin_mask = 0, long_mask = 0;
+ u32 hotplug_trigger;
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
-
- if (hotplug_trigger) {
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- hotplug_trigger, hotplug_trigger,
- hpd_status_g4x,
- i9xx_port_hotplug_long_detect);
+ if (IS_G4X(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+ else
+ hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
- }
+ if (hotplug_trigger) {
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ hotplug_trigger, hotplug_trigger,
+ dev_priv->hotplug.hpd,
+ i9xx_port_hotplug_long_detect);
- if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- dp_aux_irq_handler(dev_priv);
- } else {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
-
- if (hotplug_trigger) {
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- hotplug_trigger, hotplug_trigger,
- hpd_status_i915,
- i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
- }
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
+
+ if ((IS_G4X(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+ dp_aux_irq_handler(dev_priv);
}
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -1696,8 +1732,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
}
static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
- u32 hotplug_trigger,
- const u32 hpd[HPD_NUM_PINS])
+ u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
@@ -1720,8 +1755,9 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!hotplug_trigger)
return;
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
- dig_hotplug_reg, hpd,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ hotplug_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.pch_hpd,
pch_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1732,7 +1768,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
+ ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1820,7 +1856,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
+ ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -1857,22 +1893,18 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
u32 ddi_hotplug_trigger, tc_hotplug_trigger;
u32 pin_mask = 0, long_mask = 0;
bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
- const u32 *pins;
if (HAS_PCH_TGP(dev_priv)) {
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
- pins = hpd_tgp;
} else if (HAS_PCH_JSP(dev_priv)) {
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
tc_hotplug_trigger = 0;
- pins = hpd_tgp;
} else if (HAS_PCH_MCC(dev_priv)) {
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
- pins = hpd_icp;
} else {
drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
"Unrecognized PCH type 0x%x\n",
@@ -1881,7 +1913,6 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
- pins = hpd_icp;
}
if (ddi_hotplug_trigger) {
@@ -1891,8 +1922,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- ddi_hotplug_trigger,
- dig_hotplug_reg, pins,
+ ddi_hotplug_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.pch_hpd,
icp_ddi_port_hotplug_long_detect);
}
@@ -1903,8 +1934,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- tc_hotplug_trigger,
- dig_hotplug_reg, pins,
+ tc_hotplug_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.pch_hpd,
tc_port_hotplug_long_detect);
}
@@ -1929,7 +1960,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- hotplug_trigger, dig_hotplug_reg, hpd_spt,
+ hotplug_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.pch_hpd,
spt_port_hotplug_long_detect);
}
@@ -1940,7 +1972,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- hotplug2_trigger, dig_hotplug_reg, hpd_spt,
+ hotplug2_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.pch_hpd,
spt_port_hotplug2_long_detect);
}
@@ -1952,16 +1985,16 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
}
static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
- u32 hotplug_trigger,
- const u32 hpd[HPD_NUM_PINS])
+ u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
- dig_hotplug_reg, hpd,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ hotplug_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.hpd,
ilk_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1974,7 +2007,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
+ ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
if (de_iir & DE_AUX_CHANNEL_A)
dp_aux_irq_handler(dev_priv);
@@ -2020,7 +2053,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
+ ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev_priv);
@@ -2130,16 +2163,16 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
}
static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
- u32 hotplug_trigger,
- const u32 hpd[HPD_NUM_PINS])
+ u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
- dig_hotplug_reg, hpd,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ hotplug_trigger, dig_hotplug_reg,
+ dev_priv->hotplug.hpd,
bxt_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -2151,15 +2184,11 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
long_pulse_detect_func long_pulse_detect;
- const u32 *hpd;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (INTEL_GEN(dev_priv) >= 12)
long_pulse_detect = gen12_port_hotplug_long_detect;
- hpd = hpd_gen12;
- } else {
+ else
long_pulse_detect = gen11_port_hotplug_long_detect;
- hpd = hpd_gen11;
- }
if (trigger_tc) {
u32 dig_hotplug_reg;
@@ -2167,8 +2196,10 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
- dig_hotplug_reg, hpd, long_pulse_detect);
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ trigger_tc, dig_hotplug_reg,
+ dev_priv->hotplug.hpd,
+ long_pulse_detect);
}
if (trigger_tbt) {
@@ -2177,8 +2208,10 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
- dig_hotplug_reg, hpd, long_pulse_detect);
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ trigger_tbt, dig_hotplug_reg,
+ dev_priv->hotplug.hpd,
+ long_pulse_detect);
}
if (pin_mask)
@@ -2309,15 +2342,13 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (IS_GEN9_LP(dev_priv)) {
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (tmp_mask) {
- bxt_hpd_irq_handler(dev_priv, tmp_mask,
- hpd_bxt);
+ bxt_hpd_irq_handler(dev_priv, tmp_mask);
found = true;
}
} else if (IS_BROADWELL(dev_priv)) {
tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
if (tmp_mask) {
- ilk_hpd_irq_handler(dev_priv,
- tmp_mask, hpd_bdw);
+ ilk_hpd_irq_handler(dev_priv, tmp_mask);
found = true;
}
}
@@ -2870,6 +2901,14 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
+
+ /* Wa_14010685332:icl */
+ if (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) {
+ intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
+ SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
+ intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
+ SBCLK_RUN_REFCLK_DIS, 0);
+ }
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
@@ -2989,13 +3028,12 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(dev_priv))
hotplug_irqs = SDE_HOTPLUG_MASK;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
- } else {
+ else
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
- }
+
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3021,13 +3059,12 @@ static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
u32 sde_ddi_mask, u32 sde_tc_mask,
- u32 ddi_enable_mask, u32 tc_enable_mask,
- const u32 *pins)
+ u32 ddi_enable_mask, u32 tc_enable_mask)
{
u32 hotplug_irqs, enabled_irqs;
hotplug_irqs = sde_ddi_mask | sde_tc_mask;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
@@ -3044,8 +3081,7 @@ static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
icp_hpd_irq_setup(dev_priv,
SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
- ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
- hpd_icp);
+ ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1));
}
/*
@@ -3057,8 +3093,7 @@ static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
icp_hpd_irq_setup(dev_priv,
SDE_DDI_MASK_TGP, 0,
- TGP_DDI_HPD_ENABLE_MASK, 0,
- hpd_tgp);
+ TGP_DDI_HPD_ENABLE_MASK, 0);
}
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3083,11 +3118,9 @@ static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- const u32 *hpd;
u32 val;
- hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
val = I915_READ(GEN11_DE_HPD_IMR);
@@ -3099,12 +3132,10 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
- TGP_DDI_HPD_ENABLE_MASK,
- TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
+ TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
- ICP_DDI_HPD_ENABLE_MASK,
- ICP_TC_HPD_ENABLE_MASK, hpd_icp);
+ ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK);
}
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3140,7 +3171,7 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3169,17 +3200,17 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 8) {
hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
} else if (INTEL_GEN(dev_priv) >= 7) {
hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
} else {
hotplug_irqs = DE_DP_A_HOTPLUG;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
}
@@ -3230,7 +3261,7 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3646,7 +3677,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3751,7 +3782,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3893,10 +3924,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3924,6 +3955,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
int i;
+ intel_hpd_init_pins(dev_priv);
+
intel_hpd_init_work(dev_priv);
INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index b6376b25ef63..43039dc8c607 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -25,7 +25,6 @@
#include <linux/mm.h>
#include <linux/io-mapping.h>
-#include <asm/pgtable.h>
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index add00ec1f787..02559da61e6e 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -65,7 +65,7 @@ i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-i915_param_named_unsafe(reset, int, 0600,
+i915_param_named_unsafe(reset, uint, 0600,
"Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
i915_param_named_unsafe(vbt_firmware, charp, 0400,
@@ -173,7 +173,7 @@ i915_param_named(enable_gvt, bool, 0400,
#endif
#if IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)
-i915_param_named_unsafe(fake_lmem_start, ulong, 0600,
+i915_param_named_unsafe(fake_lmem_start, ulong, 0400,
"Fake LMEM start offset (default: 0)");
#endif
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 45323732f099..4f21bfffbf0e 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -64,7 +64,7 @@ struct drm_printer;
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
param(int, edp_vswing, 0, 0400) \
param(unsigned int, reset, 3, 0600) \
- param(unsigned int, inject_probe_failure, 0, 0600) \
+ param(unsigned int, inject_probe_failure, 0, 0) \
param(int, fastboot, -1, 0600) \
param(int, enable_dpcd_backlight, -1, 0600) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 2c80a0194c80..eb0b5be7c35d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -160,6 +160,7 @@
GEN(2), \
.is_mobile = 1, \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
@@ -170,6 +171,7 @@
.engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
+ .dma_mask_size = 32, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
@@ -179,6 +181,7 @@
#define I845_FEATURES \
GEN(2), \
.pipe_mask = BIT(PIPE_A), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A), \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
@@ -188,6 +191,7 @@
.engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
+ .dma_mask_size = 32, \
I845_PIPE_OFFSETS, \
I845_CURSOR_OFFSETS, \
I9XX_COLORS, \
@@ -218,11 +222,13 @@ static const struct intel_device_info i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
+ .dma_mask_size = 32, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
@@ -283,6 +289,7 @@ static const struct intel_device_info g33_info = {
PLATFORM(INTEL_G33),
.display.has_hotplug = 1,
.display.has_overlay = 1,
+ .dma_mask_size = 36,
};
static const struct intel_device_info pnv_g_info = {
@@ -290,6 +297,7 @@ static const struct intel_device_info pnv_g_info = {
PLATFORM(INTEL_PINEVIEW),
.display.has_hotplug = 1,
.display.has_overlay = 1,
+ .dma_mask_size = 36,
};
static const struct intel_device_info pnv_m_info = {
@@ -298,17 +306,20 @@ static const struct intel_device_info pnv_m_info = {
.is_mobile = 1,
.display.has_hotplug = 1,
.display.has_overlay = 1,
+ .dma_mask_size = 36,
};
#define GEN4_FEATURES \
GEN(4), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
+ .dma_mask_size = 36, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I965_COLORS, \
@@ -354,12 +365,14 @@ static const struct intel_device_info gm45_info = {
#define GEN5_FEATURES \
GEN(5), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
/* ilk does support rc6, but we do not implement [power] contexts */ \
.has_rc6 = 0, \
+ .dma_mask_size = 36, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
@@ -381,6 +394,7 @@ static const struct intel_device_info ilk_m_info = {
#define GEN6_FEATURES \
GEN(6), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -389,6 +403,7 @@ static const struct intel_device_info ilk_m_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
+ .dma_mask_size = 40, \
.ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
I9XX_PIPE_OFFSETS, \
@@ -430,6 +445,7 @@ static const struct intel_device_info snb_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -438,6 +454,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
+ .dma_mask_size = 40, \
.ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
@@ -482,6 +499,7 @@ static const struct intel_device_info ivb_q_info = {
PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
.pipe_mask = 0, /* legal, last one wins */
+ .cpu_transcoder_mask = 0,
.has_l3_dpf = 1,
};
@@ -490,11 +508,13 @@ static const struct intel_device_info vlv_info = {
GEN(7),
.is_lp = 1,
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
+ .dma_mask_size = 40,
.ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 31,
.has_snoop = true,
@@ -511,6 +531,8 @@ static const struct intel_device_info vlv_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_psr = 1, \
@@ -543,6 +565,7 @@ static const struct intel_device_info hsw_gt3_info = {
G75_FEATURES, \
GEN(8), \
.has_logical_ring_contexts = 1, \
+ .dma_mask_size = 39, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
.has_64bit_reloc = 1, \
@@ -581,6 +604,7 @@ static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
.display.has_hotplug = 1,
.is_lp = 1,
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
@@ -590,7 +614,8 @@ static const struct intel_device_info chv_info = {
.has_rps = true,
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
- .ppgtt_type = INTEL_PPGTT_ALIASING,
+ .dma_mask_size = 39,
+ .ppgtt_type = INTEL_PPGTT_FULL,
.ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
@@ -656,6 +681,9 @@ static const struct intel_device_info skl_gt4_info = {
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
+ BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
@@ -670,6 +698,7 @@ static const struct intel_device_info skl_gt4_info = {
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_gt_uc = 1, \
+ .dma_mask_size = 39, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
.has_reset_engine = 1, \
@@ -759,6 +788,9 @@ static const struct intel_device_info cnl_info = {
#define GEN11_FEATURES \
GEN10_FEATURES, \
GEN11_DEFAULT_PAGE_SIZES, \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -799,6 +831,10 @@ static const struct intel_device_info ehl_info = {
#define GEN12_FEATURES \
GEN11_FEATURES, \
GEN(12), \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -822,7 +858,6 @@ static const struct intel_device_info ehl_info = {
static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.display.has_modular_fia = 1,
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
@@ -920,8 +955,6 @@ static void i915_pci_remove(struct pci_dev *pdev)
i915_driver_remove(i915);
pci_set_drvdata(pdev, NULL);
-
- drm_dev_put(&i915->drm);
}
/* is device_id present in comma separated list of ids */
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index cf2c01f17da8..25329b7600c9 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -204,21 +204,6 @@
#include "i915_drv.h"
#include "i915_perf.h"
-#include "oa/i915_oa_hsw.h"
-#include "oa/i915_oa_bdw.h"
-#include "oa/i915_oa_chv.h"
-#include "oa/i915_oa_sklgt2.h"
-#include "oa/i915_oa_sklgt3.h"
-#include "oa/i915_oa_sklgt4.h"
-#include "oa/i915_oa_bxt.h"
-#include "oa/i915_oa_kblgt2.h"
-#include "oa/i915_oa_kblgt3.h"
-#include "oa/i915_oa_glk.h"
-#include "oa/i915_oa_cflgt2.h"
-#include "oa/i915_oa_cflgt3.h"
-#include "oa/i915_oa_cnl.h"
-#include "oa/i915_oa_icl.h"
-#include "oa/i915_oa_tgl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -238,26 +223,17 @@
*
* Although this can be observed explicitly while copying reports to userspace
* by checking for a zeroed report-id field in tail reports, we want to account
- * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
- * read() attempts.
- *
- * In effect we define a tail pointer for reading that lags the real tail
- * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
- * time for the corresponding reports to become visible to the CPU.
- *
- * To manage this we actually track two tail pointers:
- * 1) An 'aging' tail with an associated timestamp that is tracked until we
- * can trust the corresponding data is visible to the CPU; at which point
- * it is considered 'aged'.
- * 2) An 'aged' tail that can be used for read()ing.
- *
- * The two separate pointers let us decouple read()s from tail pointer aging.
- *
- * The tail pointers are checked and updated at a limited rate within a hrtimer
- * callback (the same callback that is used for delivering EPOLLIN events)
- *
- * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
- * indicates that an updated tail pointer is needed.
+ * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
+ * redundant read() attempts.
+ *
+ * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
+ * in the OA buffer, starting from the tail reported by the HW until we find a
+ * report with its first 2 dwords not 0 meaning its previous report is
+ * completely in memory and ready to be read. Those dwords are also set to 0
+ * once read and the whole buffer is cleared upon OA buffer initialization. The
+ * first dword is the reason for this report while the second is the timestamp,
+ * making the chances of having those 2 fields at 0 fairly unlikely. A more
+ * detailed explanation is available in oa_buffer_check_unlocked().
*
* Most of the implementation details for this workaround are in
* oa_buffer_check_unlocked() and _append_oa_reports()
@@ -272,11 +248,11 @@
#define OA_TAIL_MARGIN_NSEC 100000ULL
#define INVALID_TAIL_PTR 0xffffffff
-/* frequency for checking whether the OA unit has written new reports to the
- * circular OA buffer...
+/* The default frequency for checking whether the OA unit has written new
+ * reports to the circular OA buffer...
*/
-#define POLL_FREQUENCY 200
-#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
+#define DEFAULT_POLL_FREQUENCY_HZ 200
+#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
static u32 i915_perf_stream_paranoid = true;
@@ -359,6 +335,12 @@ static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
* @oa_periodic: Whether to enable periodic OA unit sampling
* @oa_period_exponent: The OA unit sampling period is derived from this
* @engine: The engine (typically rcs0) being monitored by the OA unit
+ * @has_sseu: Whether @sseu was specified by userspace
+ * @sseu: internal SSEU configuration computed either from the userspace
+ * specified configuration in the opening parameters or a default value
+ * (see get_default_sseu_config())
+ * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
+ * data availability
*
* As read_properties_unlocked() enumerates and validates the properties given
* to open a stream of metrics the configuration is built up in the structure
@@ -378,6 +360,11 @@ struct perf_open_properties {
int oa_period_exponent;
struct intel_engine_cs *engine;
+
+ bool has_sseu;
+ struct intel_sseu sseu;
+
+ u64 poll_oa_period;
};
struct i915_oa_config_bo {
@@ -409,10 +396,7 @@ i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
struct i915_oa_config *oa_config;
rcu_read_lock();
- if (metrics_set == 1)
- oa_config = &perf->test_config;
- else
- oa_config = idr_find(&perf->metrics_idr, metrics_set);
+ oa_config = idr_find(&perf->metrics_idr, metrics_set);
if (oa_config)
oa_config = i915_oa_config_get(oa_config);
rcu_read_unlock();
@@ -465,8 +449,8 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
* (See description of OA_TAIL_MARGIN_NSEC above for further details.)
*
* Besides returning true when there is data available to read() this function
- * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
- * and .aged_tail_idx state used for reading.
+ * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
+ * object.
*
* Note: It's safe to read OA config state here unlocked, assuming that this is
* only called while the stream is enabled, while the global OA configuration
@@ -476,28 +460,19 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
*/
static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
{
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
int report_size = stream->oa_buffer.format_size;
unsigned long flags;
- unsigned int aged_idx;
- u32 head, hw_tail, aged_tail, aging_tail;
+ bool pollin;
+ u32 hw_tail;
u64 now;
/* We have to consider the (unlikely) possibility that read() errors
- * could result in an OA buffer reset which might reset the head,
- * tails[] and aged_tail state.
+ * could result in an OA buffer reset which might reset the head and
+ * tail state.
*/
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- /* NB: The head we observe here might effectively be a little out of
- * date (between head and tails[aged_idx].offset if there is currently
- * a read() in progress.
- */
- head = stream->oa_buffer.head;
-
- aged_idx = stream->oa_buffer.aged_tail_idx;
- aged_tail = stream->oa_buffer.tails[aged_idx].offset;
- aging_tail = stream->oa_buffer.tails[!aged_idx].offset;
-
hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
/* The tail pointer increases in 64 byte increments,
@@ -507,64 +482,63 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
now = ktime_get_mono_fast_ns();
- /* Update the aged tail
- *
- * Flip the tail pointer available for read()s once the aging tail is
- * old enough to trust that the corresponding data will be visible to
- * the CPU...
- *
- * Do this before updating the aging pointer in case we may be able to
- * immediately start aging a new pointer too (if new data has become
- * available) without needing to wait for a later hrtimer callback.
- */
- if (aging_tail != INVALID_TAIL_PTR &&
- ((now - stream->oa_buffer.aging_timestamp) >
- OA_TAIL_MARGIN_NSEC)) {
-
- aged_idx ^= 1;
- stream->oa_buffer.aged_tail_idx = aged_idx;
+ if (hw_tail == stream->oa_buffer.aging_tail &&
+ (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
+ /* If the HW tail hasn't move since the last check and the HW
+ * tail has been aging for long enough, declare it the new
+ * tail.
+ */
+ stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
+ } else {
+ u32 head, tail, aged_tail;
- aged_tail = aging_tail;
+ /* NB: The head we observe here might effectively be a little
+ * out of date. If a read() is in progress, the head could be
+ * anywhere between this head and stream->oa_buffer.tail.
+ */
+ head = stream->oa_buffer.head - gtt_offset;
+ aged_tail = stream->oa_buffer.tail - gtt_offset;
+
+ hw_tail -= gtt_offset;
+ tail = hw_tail;
+
+ /* Walk the stream backward until we find a report with dword 0
+ * & 1 not at 0. Since the circular buffer pointers progress by
+ * increments of 64 bytes and that reports can be up to 256
+ * bytes long, we can't tell whether a report has fully landed
+ * in memory before the first 2 dwords of the following report
+ * have effectively landed.
+ *
+ * This is assuming that the writes of the OA unit land in
+ * memory in the order they were written to.
+ * If not : (╯°□°)╯︵ ┻━┻
+ */
+ while (OA_TAKEN(tail, aged_tail) >= report_size) {
+ u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
- /* Mark that we need a new pointer to start aging... */
- stream->oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
- aging_tail = INVALID_TAIL_PTR;
- }
+ if (report32[0] != 0 || report32[1] != 0)
+ break;
- /* Update the aging tail
- *
- * We throttle aging tail updates until we have a new tail that
- * represents >= one report more data than is already available for
- * reading. This ensures there will be enough data for a successful
- * read once this new pointer has aged and ensures we will give the new
- * pointer time to age.
- */
- if (aging_tail == INVALID_TAIL_PTR &&
- (aged_tail == INVALID_TAIL_PTR ||
- OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
- struct i915_vma *vma = stream->oa_buffer.vma;
- u32 gtt_offset = i915_ggtt_offset(vma);
-
- /* Be paranoid and do a bounds check on the pointer read back
- * from hardware, just in case some spurious hardware condition
- * could put the tail out of bounds...
- */
- if (hw_tail >= gtt_offset &&
- hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
- stream->oa_buffer.tails[!aged_idx].offset =
- aging_tail = hw_tail;
- stream->oa_buffer.aging_timestamp = now;
- } else {
- drm_err(&stream->perf->i915->drm,
- "Ignoring spurious out of range OA buffer tail pointer = %x\n",
- hw_tail);
+ tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
}
+
+ if (OA_TAKEN(hw_tail, tail) > report_size &&
+ __ratelimit(&stream->perf->tail_pointer_race))
+ DRM_NOTE("unlanded report(s) head=0x%x "
+ "tail=0x%x hw_tail=0x%x\n",
+ head, tail, hw_tail);
+
+ stream->oa_buffer.tail = gtt_offset + tail;
+ stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
+ stream->oa_buffer.aging_timestamp = now;
}
+ pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
+ stream->oa_buffer.head - gtt_offset) >= report_size;
+
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
- return aged_tail == INVALID_TAIL_PTR ?
- false : OA_TAKEN(aged_tail, head) >= report_size;
+ return pollin;
}
/**
@@ -682,7 +656,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
- unsigned int aged_tail_idx;
u32 head, tail;
u32 taken;
int ret = 0;
@@ -693,19 +666,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
head = stream->oa_buffer.head;
- aged_tail_idx = stream->oa_buffer.aged_tail_idx;
- tail = stream->oa_buffer.tails[aged_tail_idx].offset;
+ tail = stream->oa_buffer.tail;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/*
- * An invalid tail pointer here means we're still waiting for the poll
- * hrtimer callback to give us a pointer
- */
- if (tail == INVALID_TAIL_PTR)
- return -EAGAIN;
-
- /*
* NB: oa_buffer.head/tail include the gtt_offset which we don't want
* while indexing relative to oa_buf_base.
*/
@@ -838,13 +803,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
}
/*
- * The above reason field sanity check is based on
- * the assumption that the OA buffer is initially
- * zeroed and we reset the field after copying so the
- * check is still meaningful once old reports start
- * being overwritten.
+ * Clear out the first 2 dword as a mean to detect unlanded
+ * reports.
*/
report32[0] = 0;
+ report32[1] = 0;
}
if (start_offset != *offset) {
@@ -985,7 +948,6 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
- unsigned int aged_tail_idx;
u32 head, tail;
u32 taken;
int ret = 0;
@@ -996,17 +958,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
head = stream->oa_buffer.head;
- aged_tail_idx = stream->oa_buffer.aged_tail_idx;
- tail = stream->oa_buffer.tails[aged_tail_idx].offset;
+ tail = stream->oa_buffer.tail;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
- /* An invalid tail pointer here means we're still waiting for the poll
- * hrtimer callback to give us a pointer
- */
- if (tail == INVALID_TAIL_PTR)
- return -EAGAIN;
-
/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
* while indexing relative to oa_buf_base.
*/
@@ -1064,13 +1019,11 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
if (ret)
break;
- /* The above report-id field sanity check is based on
- * the assumption that the OA buffer is initially
- * zeroed and we reset the field after copying so the
- * check is still meaningful once old reports start
- * being overwritten.
+ /* Clear out the first 2 dwords as a mean to detect unlanded
+ * reports.
*/
report32[0] = 0;
+ report32[1] = 0;
}
if (start_offset != *offset) {
@@ -1447,8 +1400,8 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
gtt_offset | OABUFFER_SIZE_16M);
/* Mark that we need updated tail pointers to read from... */
- stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
+ stream->oa_buffer.tail = gtt_offset;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
@@ -1470,8 +1423,6 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
* memory...
*/
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
-
- stream->pollin = false;
}
static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
@@ -1501,8 +1452,8 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
- stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
+ stream->oa_buffer.tail = gtt_offset;
/*
* Reset state used to recognise context switches, affecting which
@@ -1526,8 +1477,6 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
* memory...
*/
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
-
- stream->pollin = false;
}
static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
@@ -1557,8 +1506,8 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
gtt_offset & GEN12_OAG_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
- stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
+ stream->oa_buffer.tail = gtt_offset;
/*
* Reset state used to recognise context switches, affecting which
@@ -1583,8 +1532,6 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
*/
memset(stream->oa_buffer.vaddr, 0,
stream->oa_buffer.vma->size);
-
- stream->pollin = false;
}
static int alloc_oa_buffer(struct i915_perf_stream *stream)
@@ -1665,10 +1612,7 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
const u64 delay_ticks = 0xffffffffffffffff -
- DIV64_U64_ROUND_UP(
- atomic64_read(&stream->perf->noa_programming_delay) *
- RUNTIME_INFO(i915)->cs_timestamp_frequency_khz,
- 1000000ull);
+ i915_cs_timestamp_ns_to_ticks(i915, atomic64_read(&stream->perf->noa_programming_delay));
const u32 base = stream->engine->mmio_base;
#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
u32 *batch, *ts0, *cs, *jump;
@@ -1970,10 +1914,11 @@ out:
return i915_vma_get(oa_bo->vma);
}
-static struct i915_request *
+static int
emit_oa_config(struct i915_perf_stream *stream,
struct i915_oa_config *oa_config,
- struct intel_context *ce)
+ struct intel_context *ce,
+ struct i915_active *active)
{
struct i915_request *rq;
struct i915_vma *vma;
@@ -1981,7 +1926,7 @@ emit_oa_config(struct i915_perf_stream *stream,
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
- return ERR_CAST(vma);
+ return PTR_ERR(vma);
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
@@ -1995,6 +1940,18 @@ emit_oa_config(struct i915_perf_stream *stream,
goto err_vma_unpin;
}
+ if (!IS_ERR_OR_NULL(active)) {
+ /* After all individual context modifications */
+ err = i915_request_await_active(rq, active,
+ I915_ACTIVE_AWAIT_ACTIVE);
+ if (err)
+ goto err_add_request;
+
+ err = i915_active_add_request(active, rq);
+ if (err)
+ goto err_add_request;
+ }
+
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, 0);
if (!err)
@@ -2009,14 +1966,13 @@ emit_oa_config(struct i915_perf_stream *stream,
if (err)
goto err_add_request;
- i915_request_get(rq);
err_add_request:
i915_request_add(rq);
err_vma_unpin:
i915_vma_unpin(vma);
err_vma_put:
i915_vma_put(vma);
- return err ? ERR_PTR(err) : rq;
+ return err;
}
static struct intel_context *oa_context(struct i915_perf_stream *stream)
@@ -2024,8 +1980,9 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream)
return stream->pinned_ctx ?: stream->engine->kernel_context;
}
-static struct i915_request *
-hsw_enable_metric_set(struct i915_perf_stream *stream)
+static int
+hsw_enable_metric_set(struct i915_perf_stream *stream,
+ struct i915_active *active)
{
struct intel_uncore *uncore = stream->uncore;
@@ -2044,7 +2001,9 @@ hsw_enable_metric_set(struct i915_perf_stream *stream)
intel_uncore_rmw(uncore, GEN6_UCGCTL1,
0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
- return emit_oa_config(stream, stream->oa_config, oa_context(stream));
+ return emit_oa_config(stream,
+ stream->oa_config, oa_context(stream),
+ active);
}
static void hsw_disable_metric_set(struct i915_perf_stream *stream)
@@ -2114,9 +2073,6 @@ gen8_update_reg_state_unlocked(const struct intel_context *ce,
for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
reg_state[ctx_flexeu0 + i * 2 + 1] =
oa_config_flex_reg(stream->oa_config, flex_regs[i]);
-
- reg_state[CTX_R_PWR_CLK_STATE] =
- intel_sseu_make_rpcs(ce->engine->i915, &ce->sseu);
}
struct flex {
@@ -2137,7 +2093,7 @@ gen8_store_flex(struct i915_request *rq,
if (IS_ERR(cs))
return PTR_ERR(cs);
- offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+ offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
do {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = offset + flex->offset * sizeof(u32);
@@ -2194,8 +2150,10 @@ static int gen8_modify_context(struct intel_context *ce,
return err;
}
-static int gen8_modify_self(struct intel_context *ce,
- const struct flex *flex, unsigned int count)
+static int
+gen8_modify_self(struct intel_context *ce,
+ const struct flex *flex, unsigned int count,
+ struct i915_active *active)
{
struct i915_request *rq;
int err;
@@ -2206,8 +2164,17 @@ static int gen8_modify_self(struct intel_context *ce,
if (IS_ERR(rq))
return PTR_ERR(rq);
+ if (!IS_ERR_OR_NULL(active)) {
+ err = i915_active_add_request(active, rq);
+ if (err)
+ goto err_add_request;
+ }
+
err = gen8_load_flex(rq, ce, flex, count);
+ if (err)
+ goto err_add_request;
+err_add_request:
i915_request_add(rq);
return err;
}
@@ -2241,7 +2208,8 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
return err;
}
-static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable)
+static int gen12_configure_oar_context(struct i915_perf_stream *stream,
+ struct i915_active *active)
{
int err;
struct intel_context *ce = stream->pinned_ctx;
@@ -2250,7 +2218,7 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
{
GEN8_OACTXCONTROL,
stream->perf->ctx_oactxctrl_offset + 1,
- enable ? GEN8_OA_COUNTER_RESUME : 0,
+ active ? GEN8_OA_COUNTER_RESUME : 0,
},
};
/* Offsets in regs_lri are not used since this configuration is only
@@ -2262,13 +2230,13 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
GEN12_OAR_OACONTROL,
GEN12_OAR_OACONTROL_OFFSET + 1,
(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
- (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
+ (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
},
{
RING_CONTEXT_CONTROL(ce->engine->mmio_base),
CTX_CONTEXT_CONTROL,
_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
- enable ?
+ active ?
GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
0)
},
@@ -2285,7 +2253,7 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
return err;
/* Apply regs_lri using LRI with pinned context */
- return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri));
+ return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
}
/*
@@ -2313,9 +2281,11 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
* Note: it's only the RCS/Render context that has any OA state.
* Note: the first flex register passed must always be R_PWR_CLK_STATE
*/
-static int oa_configure_all_contexts(struct i915_perf_stream *stream,
- struct flex *regs,
- size_t num_regs)
+static int
+oa_configure_all_contexts(struct i915_perf_stream *stream,
+ struct flex *regs,
+ size_t num_regs,
+ struct i915_active *active)
{
struct drm_i915_private *i915 = stream->perf->i915;
struct intel_engine_cs *engine;
@@ -2372,7 +2342,7 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
- err = gen8_modify_self(ce, regs, num_regs);
+ err = gen8_modify_self(ce, regs, num_regs, active);
if (err)
return err;
}
@@ -2380,8 +2350,10 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
return 0;
}
-static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
- const struct i915_oa_config *oa_config)
+static int
+gen12_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config,
+ struct i915_active *active)
{
struct flex regs[] = {
{
@@ -2390,11 +2362,15 @@ static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
},
};
- return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+ return oa_configure_all_contexts(stream,
+ regs, ARRAY_SIZE(regs),
+ active);
}
-static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
- const struct i915_oa_config *oa_config)
+static int
+lrc_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config,
+ struct i915_active *active)
{
/* The MMIO offsets for Flex EU registers aren't contiguous */
const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
@@ -2427,11 +2403,14 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
for (i = 2; i < ARRAY_SIZE(regs); i++)
regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
- return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+ return oa_configure_all_contexts(stream,
+ regs, ARRAY_SIZE(regs),
+ active);
}
-static struct i915_request *
-gen8_enable_metric_set(struct i915_perf_stream *stream)
+static int
+gen8_enable_metric_set(struct i915_perf_stream *stream,
+ struct i915_active *active)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@@ -2471,11 +2450,13 @@ gen8_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = lrc_configure_all_contexts(stream, oa_config);
+ ret = lrc_configure_all_contexts(stream, oa_config, active);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- return emit_oa_config(stream, oa_config, oa_context(stream));
+ return emit_oa_config(stream,
+ stream->oa_config, oa_context(stream),
+ active);
}
static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
@@ -2485,8 +2466,9 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
-static struct i915_request *
-gen12_enable_metric_set(struct i915_perf_stream *stream)
+static int
+gen12_enable_metric_set(struct i915_perf_stream *stream,
+ struct i915_active *active)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@@ -2515,9 +2497,9 @@ gen12_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen12_configure_all_contexts(stream, oa_config);
+ ret = gen12_configure_all_contexts(stream, oa_config, active);
if (ret)
- return ERR_PTR(ret);
+ return ret;
/*
* For Gen12, performance counters are context
@@ -2525,12 +2507,14 @@ gen12_enable_metric_set(struct i915_perf_stream *stream)
* requested this.
*/
if (stream->ctx) {
- ret = gen12_configure_oar_context(stream, true);
+ ret = gen12_configure_oar_context(stream, active);
if (ret)
- return ERR_PTR(ret);
+ return ret;
}
- return emit_oa_config(stream, oa_config, oa_context(stream));
+ return emit_oa_config(stream,
+ stream->oa_config, oa_context(stream),
+ active);
}
static void gen8_disable_metric_set(struct i915_perf_stream *stream)
@@ -2538,7 +2522,7 @@ static void gen8_disable_metric_set(struct i915_perf_stream *stream)
struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- lrc_configure_all_contexts(stream, NULL);
+ lrc_configure_all_contexts(stream, NULL, NULL);
intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
@@ -2548,7 +2532,7 @@ static void gen10_disable_metric_set(struct i915_perf_stream *stream)
struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- lrc_configure_all_contexts(stream, NULL);
+ lrc_configure_all_contexts(stream, NULL, NULL);
/* Make sure we disable noa to save power. */
intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
@@ -2559,11 +2543,11 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- gen12_configure_all_contexts(stream, NULL);
+ gen12_configure_all_contexts(stream, NULL, NULL);
/* disable the context save/restore or OAR counters */
if (stream->ctx)
- gen12_configure_oar_context(stream, false);
+ gen12_configure_oar_context(stream, NULL);
/* Make sure we disable noa to save power. */
intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
@@ -2655,11 +2639,13 @@ static void gen12_oa_enable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
+ stream->pollin = false;
+
stream->perf->ops.oa_enable(stream);
if (stream->periodic)
hrtimer_start(&stream->poll_check_timer,
- ns_to_ktime(POLL_PERIOD),
+ ns_to_ktime(stream->poll_oa_period),
HRTIMER_MODE_REL_PINNED);
}
@@ -2735,16 +2721,52 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
{
- struct i915_request *rq;
+ struct i915_active *active;
+ int err;
- rq = stream->perf->ops.enable_metric_set(stream);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ active = i915_active_create();
+ if (!active)
+ return -ENOMEM;
- i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(rq);
+ err = stream->perf->ops.enable_metric_set(stream, active);
+ if (err == 0)
+ __i915_active_wait(active, TASK_UNINTERRUPTIBLE);
- return 0;
+ i915_active_put(active);
+ return err;
+}
+
+static void
+get_default_sseu_config(struct intel_sseu *out_sseu,
+ struct intel_engine_cs *engine)
+{
+ const struct sseu_dev_info *devinfo_sseu =
+ &RUNTIME_INFO(engine->i915)->sseu;
+
+ *out_sseu = intel_sseu_from_device_info(devinfo_sseu);
+
+ if (IS_GEN(engine->i915, 11)) {
+ /*
+ * We only need subslice count so it doesn't matter which ones
+ * we select - just turn off low bits in the amount of half of
+ * all available subslices per slice.
+ */
+ out_sseu->subslice_mask =
+ ~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
+ out_sseu->slice_mask = 0x1;
+ }
+}
+
+static int
+get_sseu_config(struct intel_sseu *out_sseu,
+ struct intel_engine_cs *engine,
+ const struct drm_i915_gem_context_param_sseu *drm_sseu)
+{
+ if (drm_sseu->engine.engine_class != engine->uabi_class ||
+ drm_sseu->engine.engine_instance != engine->uabi_instance)
+ return -EINVAL;
+
+ return i915_gem_user_to_context_sseu(engine->i915, drm_sseu, out_sseu);
}
/**
@@ -2879,6 +2901,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
goto err_oa_buf_alloc;
stream->ops = &i915_oa_stream_ops;
+
+ perf->sseu = props->sseu;
WRITE_ONCE(perf->exclusive_stream, stream);
ret = i915_perf_stream_enable_sync(stream);
@@ -2930,10 +2954,6 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
stream = READ_ONCE(engine->i915->perf.exclusive_stream);
- /*
- * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
- * is already doing that, so nothing to be done for gen12 here.
- */
if (stream && INTEL_GEN(stream->perf->i915) < 12)
gen8_update_reg_state_unlocked(ce, stream);
}
@@ -3024,7 +3044,8 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
wake_up(&stream->poll_wq);
}
- hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
+ hrtimer_forward_now(hrtimer,
+ ns_to_ktime(stream->poll_oa_period));
return HRTIMER_RESTART;
}
@@ -3155,7 +3176,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
return -EINVAL;
if (config != stream->oa_config) {
- struct i915_request *rq;
+ int err;
/*
* If OA is bound to a specific context, emit the
@@ -3166,13 +3187,11 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
* When set globally, we use a low priority kernel context,
* so it will effectively take effect when idle.
*/
- rq = emit_oa_config(stream, config, oa_context(stream));
- if (!IS_ERR(rq)) {
+ err = emit_oa_config(stream, config, oa_context(stream), NULL);
+ if (!err)
config = xchg(&stream->oa_config, config);
- i915_request_put(rq);
- } else {
- ret = PTR_ERR(rq);
- }
+ else
+ ret = err;
}
i915_oa_config_put(config);
@@ -3385,13 +3404,21 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
privileged_op = true;
}
+ /*
+ * Asking for SSEU configuration is a priviliged operation.
+ */
+ if (props->has_sseu)
+ privileged_op = true;
+ else
+ get_default_sseu_config(&props->sseu, props->engine);
+
/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
* we check a dev.i915.perf_stream_paranoid sysctl option
* to determine if it's ok to access system wide OA counters
- * without CAP_SYS_ADMIN privileges.
+ * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
*/
if (privileged_op &&
- i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ i915_perf_stream_paranoid && !perfmon_capable()) {
DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
ret = -EACCES;
goto err_ctx;
@@ -3405,6 +3432,7 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
stream->perf = perf;
stream->ctx = specific_ctx;
+ stream->poll_oa_period = props->poll_oa_period;
ret = i915_oa_stream_init(stream, param, props);
if (ret)
@@ -3454,8 +3482,7 @@ err:
static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
- return div64_u64(1000000000ULL * (2ULL << exponent),
- 1000ULL * RUNTIME_INFO(perf->i915)->cs_timestamp_frequency_khz);
+ return i915_cs_timestamp_ticks_to_ns(perf->i915, 2ULL << exponent);
}
/**
@@ -3480,8 +3507,10 @@ static int read_properties_unlocked(struct i915_perf *perf,
{
u64 __user *uprop = uprops;
u32 i;
+ int ret;
memset(props, 0, sizeof(struct perf_open_properties));
+ props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
if (!n_props) {
DRM_DEBUG("No i915 perf properties given\n");
@@ -3511,7 +3540,6 @@ static int read_properties_unlocked(struct i915_perf *perf,
for (i = 0; i < n_props; i++) {
u64 oa_period, oa_freq_hz;
u64 id, value;
- int ret;
ret = get_user(id, uprop);
if (ret)
@@ -3584,9 +3612,8 @@ static int read_properties_unlocked(struct i915_perf *perf,
} else
oa_freq_hz = 0;
- if (oa_freq_hz > i915_oa_max_sample_rate &&
- !capable(CAP_SYS_ADMIN)) {
- DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
+ if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
+ DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
i915_oa_max_sample_rate);
return -EACCES;
}
@@ -3597,6 +3624,32 @@ static int read_properties_unlocked(struct i915_perf *perf,
case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
props->hold_preemption = !!value;
break;
+ case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
+ struct drm_i915_gem_context_param_sseu user_sseu;
+
+ if (copy_from_user(&user_sseu,
+ u64_to_user_ptr(value),
+ sizeof(user_sseu))) {
+ DRM_DEBUG("Unable to copy global sseu parameter\n");
+ return -EFAULT;
+ }
+
+ ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
+ if (ret) {
+ DRM_DEBUG("Invalid SSEU configuration\n");
+ return ret;
+ }
+ props->has_sseu = true;
+ break;
+ }
+ case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
+ if (value < 100000 /* 100us */) {
+ DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
+ value);
+ return -EINVAL;
+ }
+ props->poll_oa_period = value;
+ break;
case DRM_I915_PERF_PROP_MAX:
MISSING_CASE(id);
return -EINVAL;
@@ -3623,7 +3676,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
* buffered data written by the GPU besides periodic OA metrics.
*
* Note we copy the properties from userspace outside of the i915 perf
- * mutex to avoid an awkward lockdep with mmap_sem.
+ * mutex to avoid an awkward lockdep with mmap_lock.
*
* Most of the implementation details are handled by
* i915_perf_open_ioctl_locked() after taking the &perf->lock
@@ -3679,7 +3732,6 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
void i915_perf_register(struct drm_i915_private *i915)
{
struct i915_perf *perf = &i915->perf;
- int ret;
if (!perf->i915)
return;
@@ -3693,64 +3745,7 @@ void i915_perf_register(struct drm_i915_private *i915)
perf->metrics_kobj =
kobject_create_and_add("metrics",
&i915->drm.primary->kdev->kobj);
- if (!perf->metrics_kobj)
- goto exit;
-
- sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr);
-
- if (IS_TIGERLAKE(i915)) {
- i915_perf_load_test_config_tgl(i915);
- } else if (INTEL_GEN(i915) >= 11) {
- i915_perf_load_test_config_icl(i915);
- } else if (IS_CANNONLAKE(i915)) {
- i915_perf_load_test_config_cnl(i915);
- } else if (IS_COFFEELAKE(i915)) {
- if (IS_CFL_GT2(i915))
- i915_perf_load_test_config_cflgt2(i915);
- if (IS_CFL_GT3(i915))
- i915_perf_load_test_config_cflgt3(i915);
- } else if (IS_GEMINILAKE(i915)) {
- i915_perf_load_test_config_glk(i915);
- } else if (IS_KABYLAKE(i915)) {
- if (IS_KBL_GT2(i915))
- i915_perf_load_test_config_kblgt2(i915);
- else if (IS_KBL_GT3(i915))
- i915_perf_load_test_config_kblgt3(i915);
- } else if (IS_BROXTON(i915)) {
- i915_perf_load_test_config_bxt(i915);
- } else if (IS_SKYLAKE(i915)) {
- if (IS_SKL_GT2(i915))
- i915_perf_load_test_config_sklgt2(i915);
- else if (IS_SKL_GT3(i915))
- i915_perf_load_test_config_sklgt3(i915);
- else if (IS_SKL_GT4(i915))
- i915_perf_load_test_config_sklgt4(i915);
- } else if (IS_CHERRYVIEW(i915)) {
- i915_perf_load_test_config_chv(i915);
- } else if (IS_BROADWELL(i915)) {
- i915_perf_load_test_config_bdw(i915);
- } else if (IS_HASWELL(i915)) {
- i915_perf_load_test_config_hsw(i915);
- }
-
- if (perf->test_config.id == 0)
- goto sysfs_error;
-
- ret = sysfs_create_group(perf->metrics_kobj,
- &perf->test_config.sysfs_metric);
- if (ret)
- goto sysfs_error;
-
- perf->test_config.perf = perf;
- kref_init(&perf->test_config.ref);
-
- goto exit;
-
-sysfs_error:
- kobject_put(perf->metrics_kobj);
- perf->metrics_kobj = NULL;
-exit:
mutex_unlock(&perf->lock);
}
@@ -3770,9 +3765,6 @@ void i915_perf_unregister(struct drm_i915_private *i915)
if (!perf->metrics_kobj)
return;
- sysfs_remove_group(perf->metrics_kobj,
- &perf->test_config.sysfs_metric);
-
kobject_put(perf->metrics_kobj);
perf->metrics_kobj = NULL;
}
@@ -3904,9 +3896,6 @@ static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
if (!n_regs)
return NULL;
- if (!access_ok(regs, n_regs * sizeof(u32) * 2))
- return ERR_PTR(-EFAULT);
-
/* No is_valid function means we're not allowing any register to be programmed. */
GEM_BUG_ON(!is_valid);
if (!is_valid)
@@ -4007,7 +3996,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ if (i915_perf_stream_paranoid && !perfmon_capable()) {
DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
return -EACCES;
}
@@ -4154,7 +4143,7 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
return -ENOTSUPP;
}
- if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ if (i915_perf_stream_paranoid && !perfmon_capable()) {
DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
return -EACCES;
}
@@ -4347,8 +4336,8 @@ void i915_perf_init(struct drm_i915_private *i915)
if (perf->ops.enable_metric_set) {
mutex_init(&perf->lock);
- oa_sample_rate_hard_limit = 1000 *
- (RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2);
+ oa_sample_rate_hard_limit =
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_hz / 2;
mutex_init(&perf->metrics_lock);
idr_init(&perf->metrics_idr);
@@ -4371,6 +4360,11 @@ void i915_perf_init(struct drm_i915_private *i915)
ratelimit_set_flags(&perf->spurious_report_rs,
RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_state_init(&perf->tail_pointer_race,
+ 5 * HZ, 10);
+ ratelimit_set_flags(&perf->tail_pointer_race,
+ RATELIMIT_MSG_ON_RELEASE);
+
atomic64_set(&perf->noa_programming_delay,
500 * 1000 /* 500us */);
@@ -4431,8 +4425,15 @@ int i915_perf_ioctl_version(void)
* preemption on a particular context so that performance data is
* accessible from a delta of MI_RPC reports without looking at the
* OA buffer.
+ *
+ * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
+ * be run for the duration of the performance recording based on
+ * their SSEU configuration.
+ *
+ * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
+ * interval for the hrtimer used to check for OA data.
*/
- return 3;
+ return 5;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index a0e22f00f6cf..a36a455ae336 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -16,11 +16,13 @@
#include <linux/uuid.h>
#include <linux/wait.h>
+#include "gt/intel_sseu.h"
#include "i915_reg.h"
#include "intel_wakeref.h"
struct drm_i915_private;
struct file;
+struct i915_active;
struct i915_gem_context;
struct i915_perf;
struct i915_vma;
@@ -272,21 +274,10 @@ struct i915_perf_stream {
spinlock_t ptr_lock;
/**
- * @tails: One 'aging' tail pointer and one 'aged' tail pointer ready to
- * used for reading.
- *
- * Initial values of 0xffffffff are invalid and imply that an
- * update is required (and should be ignored by an attempted
- * read)
- */
- struct {
- u32 offset;
- } tails[2];
-
- /**
- * @aged_tail_idx: Index for the aged tail ready to read() data up to.
+ * @aging_tail: The last HW tail reported by HW. The data
+ * might not have made it to memory yet though.
*/
- unsigned int aged_tail_idx;
+ u32 aging_tail;
/**
* @aging_timestamp: A monotonic timestamp for when the current aging tail pointer
@@ -302,6 +293,11 @@ struct i915_perf_stream {
* OA buffer data to userspace.
*/
u32 head;
+
+ /**
+ * @tail: The last verified tail that can be read by userspace.
+ */
+ u32 tail;
} oa_buffer;
/**
@@ -309,6 +305,12 @@ struct i915_perf_stream {
* reprogrammed.
*/
struct i915_vma *noa_wait;
+
+ /**
+ * @poll_oa_period: The period in nanoseconds at which the OA
+ * buffer should be checked for available data.
+ */
+ u64 poll_oa_period;
};
/**
@@ -339,8 +341,8 @@ struct i915_oa_ops {
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
- struct i915_request *
- (*enable_metric_set)(struct i915_perf_stream *stream);
+ int (*enable_metric_set)(struct i915_perf_stream *stream,
+ struct i915_active *active);
/**
* @disable_metric_set: Remove system constraints associated with using
@@ -408,12 +410,22 @@ struct i915_perf {
struct i915_perf_stream *exclusive_stream;
/**
+ * @sseu: sseu configuration selected to run while perf is active,
+ * applies to all contexts.
+ */
+ struct intel_sseu sseu;
+
+ /**
* For rate limiting any notifications of spurious
* invalid OA reports
*/
struct ratelimit_state spurious_report_rs;
- struct i915_oa_config test_config;
+ /**
+ * For rate limiting any notifications of tail pointer
+ * race.
+ */
+ struct ratelimit_state tail_pointer_race;
u32 gen7_latched_oastatus1;
u32 ctx_oactxctrl_offset;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 2c062534eac1..e991a707bdb7 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -439,29 +439,10 @@ static u64 count_interrupts(struct drm_i915_private *i915)
return sum;
}
-static void engine_event_destroy(struct perf_event *event)
-{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct intel_engine_cs *engine;
-
- engine = intel_engine_lookup_user(i915,
- engine_event_class(event),
- engine_event_instance(event));
- if (drm_WARN_ON_ONCE(&i915->drm, !engine))
- return;
-
- if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
- intel_engine_supports_stats(engine))
- intel_disable_engine_stats(engine);
-}
-
static void i915_pmu_event_destroy(struct perf_event *event)
{
WARN_ON(event->parent);
-
- if (is_engine_event(event))
- engine_event_destroy(event);
+ module_put(THIS_MODULE);
}
static int
@@ -514,23 +495,13 @@ static int engine_event_init(struct perf_event *event)
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct intel_engine_cs *engine;
- u8 sample;
- int ret;
engine = intel_engine_lookup_user(i915, engine_event_class(event),
engine_event_instance(event));
if (!engine)
return -ENODEV;
- sample = engine_event_sample(event);
- ret = engine_event_status(engine, sample);
- if (ret)
- return ret;
-
- if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
- ret = intel_enable_engine_stats(engine);
-
- return ret;
+ return engine_event_status(engine, engine_event_sample(event));
}
static int i915_pmu_event_init(struct perf_event *event)
@@ -563,8 +534,10 @@ static int i915_pmu_event_init(struct perf_event *event)
if (ret)
return ret;
- if (!event->parent)
+ if (!event->parent) {
+ __module_get(THIS_MODULE);
event->destroy = i915_pmu_event_destroy;
+ }
return 0;
}
@@ -1115,7 +1088,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
int ret = -ENOMEM;
if (INTEL_GEN(i915) <= 2) {
- dev_info(i915->drm.dev, "PMU not supported for this GPU.");
+ drm_info(&i915->drm, "PMU not supported for this GPU.");
return;
}
@@ -1178,7 +1151,7 @@ err_name:
if (!is_igp(i915))
kfree(pmu->name);
err:
- dev_notice(i915->drm.dev, "Failed to register PMU!\n");
+ drm_notice(&i915->drm, "Failed to register PMU!\n");
}
void i915_pmu_unregister(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 732aad148881..5003a71113cb 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -24,15 +24,12 @@ enum {
I915_PRIORITY_DISPLAY,
};
-#define I915_USER_PRIORITY_SHIFT 2
+#define I915_USER_PRIORITY_SHIFT 0
#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
-#define I915_PRIORITY_WAIT ((u8)BIT(0))
-#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(1))
-
/* Smallest priority value that cannot be bumped. */
#define I915_PRIORITY_INVALID (INT_MIN | (u8)I915_PRIORITY_MASK)
@@ -47,8 +44,6 @@ enum {
#define I915_PRIORITY_UNPREEMPTABLE INT_MAX
#define I915_PRIORITY_BARRIER INT_MAX
-#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
-
struct i915_priolist {
struct list_head requests[I915_PRIORITY_COUNT];
struct rb_node node;
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index ef25ce6e395e..e75c528ebbe0 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -25,10 +25,6 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
query_sz))
return -EFAULT;
- if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
- total_length))
- return -EFAULT;
-
return 0;
}
@@ -72,20 +68,20 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
topo.eu_offset = slice_length + subslice_length;
topo.eu_stride = sseu->eu_stride;
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
&topo, sizeof(topo)))
return -EFAULT;
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
&sseu->slice_mask, slice_length))
return -EFAULT;
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
sizeof(topo) + slice_length),
sseu->subslice_mask, subslice_length))
return -EFAULT;
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
sizeof(topo) +
slice_length + subslice_length),
sseu->eu_mask, eu_length))
@@ -131,14 +127,14 @@ query_engine_info(struct drm_i915_private *i915,
info.engine.engine_instance = engine->uabi_instance;
info.capabilities = engine->uabi_capabilities;
- if (__copy_to_user(info_ptr, &info, sizeof(info)))
+ if (copy_to_user(info_ptr, &info, sizeof(info)))
return -EFAULT;
query.num_engines++;
info_ptr++;
}
- if (__copy_to_user(query_ptr, &query, sizeof(query)))
+ if (copy_to_user(query_ptr, &query, sizeof(query)))
return -EFAULT;
return len;
@@ -158,10 +154,6 @@ static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
if (user_n_regs < kernel_n_regs)
return -EINVAL;
- if (!access_ok(u64_to_user_ptr(user_regs_ptr),
- 2 * sizeof(u32) * kernel_n_regs))
- return -EFAULT;
-
return 0;
}
@@ -170,6 +162,7 @@ static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel
u64 user_regs_ptr,
u32 *user_n_regs)
{
+ u32 __user *p = u64_to_user_ptr(user_regs_ptr);
u32 r;
if (*user_n_regs == 0) {
@@ -179,25 +172,19 @@ static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel
*user_n_regs = kernel_n_regs;
- for (r = 0; r < kernel_n_regs; r++) {
- u32 __user *user_reg_ptr =
- u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2);
- u32 __user *user_val_ptr =
- u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 +
- sizeof(u32));
- int ret;
-
- ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
- user_reg_ptr);
- if (ret)
- return -EFAULT;
+ if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs))
+ return -EFAULT;
- ret = __put_user(kernel_regs[r].value, user_val_ptr);
- if (ret)
- return -EFAULT;
+ for (r = 0; r < kernel_n_regs; r++, p += 2) {
+ unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
+ p, Efault);
+ unsafe_put_user(kernel_regs[r].value, p + 1, Efault);
}
-
+ user_write_access_end();
return 0;
+Efault:
+ user_write_access_end();
+ return -EFAULT;
}
static int query_perf_config_data(struct drm_i915_private *i915,
@@ -233,10 +220,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
return -EINVAL;
}
- if (!access_ok(user_query_config_ptr, total_size))
- return -EFAULT;
-
- if (__get_user(flags, &user_query_config_ptr->flags))
+ if (get_user(flags, &user_query_config_ptr->flags))
return -EFAULT;
if (flags != 0)
@@ -249,7 +233,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
memset(&uuid, 0, sizeof(uuid));
- if (__copy_from_user(uuid, user_query_config_ptr->uuid,
+ if (copy_from_user(uuid, user_query_config_ptr->uuid,
sizeof(user_query_config_ptr->uuid)))
return -EFAULT;
@@ -263,7 +247,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
}
rcu_read_unlock();
} else {
- if (__get_user(config_id, &user_query_config_ptr->config))
+ if (get_user(config_id, &user_query_config_ptr->config))
return -EFAULT;
oa_config = i915_perf_get_oa_config(perf, config_id);
@@ -271,8 +255,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
if (!oa_config)
return -ENOENT;
- if (__copy_from_user(&user_config, user_config_ptr,
- sizeof(user_config))) {
+ if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
ret = -EFAULT;
goto out;
}
@@ -318,8 +301,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
- if (__copy_to_user(user_config_ptr, &user_config,
- sizeof(user_config))) {
+ if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
ret = -EFAULT;
goto out;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6e12000c4b6b..7717581350bd 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -186,7 +186,7 @@ typedef struct {
#define INVALID_MMIO_REG _MMIO(0)
-static inline u32 i915_mmio_reg_offset(i915_reg_t reg)
+static __always_inline u32 i915_mmio_reg_offset(i915_reg_t reg)
{
return reg.reg;
}
@@ -561,6 +561,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* Registers used only by the command parser
*/
#define BCS_SWCTRL _MMIO(0x22200)
+#define BCS_SRC_Y REG_BIT(0)
+#define BCS_DST_Y REG_BIT(1)
/* There are 16 GPR registers */
#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
@@ -2555,6 +2557,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
+#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208)
+#define GEN12_VD0_AUX_NV _MMIO(0x4218)
+#define GEN12_VD1_AUX_NV _MMIO(0x4228)
+#define GEN12_VD2_AUX_NV _MMIO(0x4298)
+#define GEN12_VD3_AUX_NV _MMIO(0x42A8)
+#define GEN12_VE0_AUX_NV _MMIO(0x4238)
+#define GEN12_VE1_AUX_NV _MMIO(0x42B8)
+#define AUX_INV REG_BIT(0)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
#define RING_ACTHD(base) _MMIO((base) + 0x74)
@@ -2657,6 +2667,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */
#define RING_INSTPM(base) _MMIO((base) + 0xc0)
#define RING_MI_MODE(base) _MMIO((base) + 0x9c)
+#define RING_CMD_BUF_CCTL(base) _MMIO((base) + 0x84)
#define INSTPS _MMIO(0x2070) /* 965+ only */
#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
#define ACTHD_I965 _MMIO(0x2074)
@@ -4013,31 +4024,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
-
-/*
- * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
- * 8300) freezing up around GPU hangs. Looks as if even
- * scheduling/timer interrupts start misbehaving if the RPS
- * EI/thresholds are "bad", leading to a very sluggish or even
- * frozen machine.
- */
-#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
-#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
-#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
-#define GT_INTERVAL_FROM_US(dev_priv, us) (INTEL_GEN(dev_priv) >= 9 ? \
- (IS_GEN9_LP(dev_priv) ? \
- INTERVAL_0_833_US(us) : \
- INTERVAL_1_33_US(us)) : \
- INTERVAL_1_28_US(us))
-
-#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100)
-#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
-#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
-#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (INTEL_GEN(dev_priv) >= 9 ? \
- (IS_GEN9_LP(dev_priv) ? \
- INTERVAL_0_833_TO_US(interval) : \
- INTERVAL_1_33_TO_US(interval)) : \
- INTERVAL_1_28_TO_US(interval))
+#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
/*
* Logical Context regs
@@ -4325,6 +4312,96 @@ enum {
#define EXITLINE_MASK REG_GENMASK(12, 0)
#define EXITLINE_SHIFT 0
+/* VRR registers */
+#define _TRANS_VRR_CTL_A 0x60420
+#define _TRANS_VRR_CTL_B 0x61420
+#define _TRANS_VRR_CTL_C 0x62420
+#define _TRANS_VRR_CTL_D 0x63420
+#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
+#define VRR_CTL_VRR_ENABLE REG_BIT(31)
+#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
+#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
+#define VRR_CTL_LINE_COUNT_MASK REG_GENMASK(10, 3)
+#define VRR_CTL_SW_FULLLINE_COUNT REG_BIT(0)
+
+#define _TRANS_VRR_VMAX_A 0x60424
+#define _TRANS_VRR_VMAX_B 0x61424
+#define _TRANS_VRR_VMAX_C 0x62424
+#define _TRANS_VRR_VMAX_D 0x63424
+#define TRANS_VRR_VMAX(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMAX_A)
+#define VRR_VMAX_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_VRR_VMIN_A 0x60434
+#define _TRANS_VRR_VMIN_B 0x61434
+#define _TRANS_VRR_VMIN_C 0x62434
+#define _TRANS_VRR_VMIN_D 0x63434
+#define TRANS_VRR_VMIN(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMIN_A)
+#define VRR_VMIN_MASK REG_GENMASK(15, 0)
+
+#define _TRANS_VRR_VMAXSHIFT_A 0x60428
+#define _TRANS_VRR_VMAXSHIFT_B 0x61428
+#define _TRANS_VRR_VMAXSHIFT_C 0x62428
+#define _TRANS_VRR_VMAXSHIFT_D 0x63428
+#define TRANS_VRR_VMAXSHIFT(trans) _MMIO_TRANS2(trans, \
+ _TRANS_VRR_VMAXSHIFT_A)
+#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16)
+#define VRR_VMAXSHIFT_DEC REG_BIT(16)
+#define VRR_VMAXSHIFT_INC_MASK REG_GENMASK(12, 0)
+
+#define _TRANS_VRR_STATUS_A 0x6042C
+#define _TRANS_VRR_STATUS_B 0x6142C
+#define _TRANS_VRR_STATUS_C 0x6242C
+#define _TRANS_VRR_STATUS_D 0x6342C
+#define TRANS_VRR_STATUS(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS_A)
+#define VRR_STATUS_VMAX_REACHED REG_BIT(31)
+#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30)
+#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29)
+#define VRR_STATUS_NO_FLIP_FRAME REG_BIT(28)
+#define VRR_STATUS_VRR_EN_LIVE REG_BIT(27)
+#define VRR_STATUS_FLIPS_SERVICED REG_BIT(26)
+#define VRR_STATUS_VBLANK_MASK REG_GENMASK(22, 20)
+#define STATUS_FSM_IDLE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0)
+#define STATUS_FSM_WAIT_TILL_FDB REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1)
+#define STATUS_FSM_WAIT_TILL_FS REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2)
+#define STATUS_FSM_WAIT_TILL_FLIP REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3)
+#define STATUS_FSM_PIPELINE_FILL REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4)
+#define STATUS_FSM_ACTIVE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5)
+#define STATUS_FSM_LEGACY_VBLANK REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6)
+
+#define _TRANS_VRR_VTOTAL_PREV_A 0x60480
+#define _TRANS_VRR_VTOTAL_PREV_B 0x61480
+#define _TRANS_VRR_VTOTAL_PREV_C 0x62480
+#define _TRANS_VRR_VTOTAL_PREV_D 0x63480
+#define TRANS_VRR_VTOTAL_PREV(trans) _MMIO_TRANS2(trans, \
+ _TRANS_VRR_VTOTAL_PREV_A)
+#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31)
+#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30)
+#define VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29)
+#define VRR_VTOTAL_PREV_FRAME_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_VRR_FLIPLINE_A 0x60438
+#define _TRANS_VRR_FLIPLINE_B 0x61438
+#define _TRANS_VRR_FLIPLINE_C 0x62438
+#define _TRANS_VRR_FLIPLINE_D 0x63438
+#define TRANS_VRR_FLIPLINE(trans) _MMIO_TRANS2(trans, \
+ _TRANS_VRR_FLIPLINE_A)
+#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_VRR_STATUS2_A 0x6043C
+#define _TRANS_VRR_STATUS2_B 0x6143C
+#define _TRANS_VRR_STATUS2_C 0x6243C
+#define _TRANS_VRR_STATUS2_D 0x6343C
+#define TRANS_VRR_STATUS2(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS2_A)
+#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_PUSH_A 0x60A70
+#define _TRANS_PUSH_B 0x61A70
+#define _TRANS_PUSH_C 0x62A70
+#define _TRANS_PUSH_D 0x63A70
+#define TRANS_PUSH(trans) _MMIO_TRANS2(trans, _TRANS_PUSH_A)
+#define TRANS_PUSH_EN REG_BIT(31)
+#define TRANS_PUSH_SEND REG_BIT(30)
+
/*
* HSW+ eDP PSR registers
*
@@ -6765,7 +6842,7 @@ enum {
#define PLANE_CTL_FORMAT_P012 (5 << 24)
#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24)
#define PLANE_CTL_FORMAT_P016 (7 << 24)
-#define PLANE_CTL_FORMAT_AYUV (8 << 24)
+#define PLANE_CTL_FORMAT_XYUV (8 << 24)
#define PLANE_CTL_FORMAT_INDEXED (12 << 24)
#define PLANE_CTL_FORMAT_RGB_565 (14 << 24)
#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
@@ -8504,6 +8581,7 @@ enum {
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
+#define SBCLK_RUN_REFCLK_DIS (1 << 7)
#define SPT_PWM_GRANULARITY (1 << 0)
#define SOUTH_CHICKEN2 _MMIO(0xc2004)
#define FDI_MPHY_IOSFSB_RESET_STATUS (1 << 13)
@@ -8995,6 +9073,7 @@ enum {
#define GEN7_PCODE_ILLEGAL_DATA 0x3
#define GEN11_PCODE_ILLEGAL_SUBCOMMAND 0x4
#define GEN11_PCODE_LOCKED 0x6
+#define GEN11_PCODE_REJECTED 0x11
#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
@@ -9016,10 +9095,18 @@ enum {
#define ICL_PCODE_MEM_SUBSYSYSTEM_INFO 0xd
#define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8)
#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
+#define ICL_PCODE_SAGV_DE_MEM_SS_CONFIG 0xe
+#define ICL_PCODE_POINTS_RESTRICTED 0x0
+#define ICL_PCODE_POINTS_RESTRICTED_MASK 0x1
#define GEN6_PCODE_READ_D_COMP 0x10
#define GEN6_PCODE_WRITE_D_COMP 0x11
+#define ICL_PCODE_EXIT_TCCOLD 0x12
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
#define DISPLAY_IPS_CONTROL 0x19
+#define TGL_PCODE_TCCOLD 0x26
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0)
+#define TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ 0
+#define TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ REG_BIT(0)
/* See also IPS_CTL */
#define IPS_PCODE_CONTROL (1 << 30)
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
@@ -9306,6 +9393,22 @@ enum {
#define AUD_PIN_BUF_CTL _MMIO(0x48414)
#define AUD_PIN_BUF_ENABLE REG_BIT(31)
+/* Display Audio Config Reg */
+#define AUD_CONFIG_BE _MMIO(0x65ef0)
+#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe)))
+#define HBLANK_EARLY_ENABLE_TGL(pipe) (0x1 << (24 + (pipe)))
+#define HBLANK_START_COUNT_MASK(pipe) (0x7 << (3 + ((pipe) * 6)))
+#define HBLANK_START_COUNT(pipe, val) (((val) & 0x7) << (3 + ((pipe)) * 6))
+#define NUMBER_SAMPLES_PER_LINE_MASK(pipe) (0x3 << ((pipe) * 6))
+#define NUMBER_SAMPLES_PER_LINE(pipe, val) (((val) & 0x3) << ((pipe) * 6))
+
+#define HBLANK_START_COUNT_8 0
+#define HBLANK_START_COUNT_16 1
+#define HBLANK_START_COUNT_32 2
+#define HBLANK_START_COUNT_64 3
+#define HBLANK_START_COUNT_96 4
+#define HBLANK_START_COUNT_128 5
+
/*
* HSW - ICL power wells
*
@@ -9701,8 +9804,11 @@ enum skl_power_gate {
#define TRANS_DDI_BPC_10 (1 << 20)
#define TRANS_DDI_BPC_6 (2 << 20)
#define TRANS_DDI_BPC_12 (3 << 20)
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK REG_GENMASK(19, 18) /* bdw-cnl */
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT(x) REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
#define TRANS_DDI_PVSYNC (1 << 17)
#define TRANS_DDI_PHSYNC (1 << 16)
+#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15) /* bdw-cnl */
#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
@@ -9729,12 +9835,10 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
-#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
- _TRANS_DDI_FUNC_CTL2_A)
-#define PORT_SYNC_MODE_ENABLE (1 << 4)
-#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
-#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
-#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
+#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL2_A)
+#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
+#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
@@ -9795,6 +9899,24 @@ enum skl_power_gate {
#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31)
#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
+/* DDI DP Compliance Control */
+#define _DDI_DP_COMP_CTL_A 0x605F0
+#define _DDI_DP_COMP_CTL_B 0x615F0
+#define DDI_DP_COMP_CTL(pipe) _MMIO_PIPE(pipe, _DDI_DP_COMP_CTL_A, _DDI_DP_COMP_CTL_B)
+#define DDI_DP_COMP_CTL_ENABLE (1 << 31)
+#define DDI_DP_COMP_CTL_D10_2 (0 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_0 (1 << 28)
+#define DDI_DP_COMP_CTL_PRBS7 (2 << 28)
+#define DDI_DP_COMP_CTL_CUSTOM80 (3 << 28)
+#define DDI_DP_COMP_CTL_HBR2 (4 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_1 (5 << 28)
+#define DDI_DP_COMP_CTL_HBR2_RESET (0xFC << 0)
+
+/* DDI DP Compliance Pattern */
+#define _DDI_DP_COMP_PAT_A 0x605F4
+#define _DDI_DP_COMP_PAT_B 0x615F4
+#define DDI_DP_COMP_PAT(pipe, i) _MMIO(_PIPE(pipe, _DDI_DP_COMP_PAT_A, _DDI_DP_COMP_PAT_B) + (i) * 4)
+
/* Sideband Interface (SBI) is programmed indirectly, via
* SBI_ADDR, which contains the register offset; and SBI_DATA,
* which contains the payload */
@@ -10742,6 +10864,12 @@ enum skl_power_gate {
#define _PAL_PREC_MULTI_SEG_DATA_A 0x4A40C
#define _PAL_PREC_MULTI_SEG_DATA_B 0x4AC0C
+#define PAL_PREC_MULTI_SEG_RED_LDW_MASK REG_GENMASK(29, 24)
+#define PAL_PREC_MULTI_SEG_RED_UDW_MASK REG_GENMASK(29, 20)
+#define PAL_PREC_MULTI_SEG_GREEN_LDW_MASK REG_GENMASK(19, 14)
+#define PAL_PREC_MULTI_SEG_GREEN_UDW_MASK REG_GENMASK(19, 10)
+#define PAL_PREC_MULTI_SEG_BLUE_LDW_MASK REG_GENMASK(9, 4)
+#define PAL_PREC_MULTI_SEG_BLUE_UDW_MASK REG_GENMASK(9, 0)
#define PREC_PAL_MULTI_SEG_INDEX(pipe) _MMIO_PIPE(pipe, \
_PAL_PREC_MULTI_SEG_INDEX_A, \
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index e2b78db685ea..def62100e666 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -23,6 +23,7 @@
*/
#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
#include <linux/irq_work.h>
#include <linux/prefetch.h>
#include <linux/sched.h>
@@ -101,6 +102,11 @@ static signed long i915_fence_wait(struct dma_fence *fence,
timeout);
}
+struct kmem_cache *i915_request_slab_cache(void)
+{
+ return global.slab_requests;
+}
+
static void i915_fence_release(struct dma_fence *fence)
{
struct i915_request *rq = to_request(fence);
@@ -115,6 +121,41 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->submit);
i915_sw_fence_fini(&rq->semaphore);
+ /*
+ * Keep one request on each engine for reserved use under mempressure
+ *
+ * We do not hold a reference to the engine here and so have to be
+ * very careful in what rq->engine we poke. The virtual engine is
+ * referenced via the rq->context and we released that ref during
+ * i915_request_retire(), ergo we must not dereference a virtual
+ * engine here. Not that we would want to, as the only consumer of
+ * the reserved engine->request_pool is the power management parking,
+ * which must-not-fail, and that is only run on the physical engines.
+ *
+ * Since the request must have been executed to be have completed,
+ * we know that it will have been processed by the HW and will
+ * not be unsubmitted again, so rq->engine and rq->execution_mask
+ * at this point is stable. rq->execution_mask will be a single
+ * bit if the last and _only_ engine it could execution on was a
+ * physical engine, if it's multiple bits then it started on and
+ * could still be on a virtual engine. Thus if the mask is not a
+ * power-of-two we assume that rq->engine may still be a virtual
+ * engine and so a dangling invalid pointer that we cannot dereference
+ *
+ * For example, consider the flow of a bonded request through a virtual
+ * engine. The request is created with a wide engine mask (all engines
+ * that we might execute on). On processing the bond, the request mask
+ * is reduced to one or more engines. If the request is subsequently
+ * bound to a single engine, it will then be constrained to only
+ * execute on that engine and never returned to the virtual engine
+ * after timeslicing away, see __unwind_incomplete_requests(). Thus we
+ * know that if the rq->execution_mask is a single bit, rq->engine
+ * can be a physical engine with the exact corresponding mask.
+ */
+ if (is_power_of_2(rq->execution_mask) &&
+ !cmpxchg(&rq->engine->request_pool, NULL, rq))
+ return;
+
kmem_cache_free(global.slab_requests, rq);
}
@@ -316,6 +357,53 @@ void i915_request_retire_upto(struct i915_request *rq)
} while (i915_request_retire(tmp) && tmp != rq);
}
+static struct i915_request * const *
+__engine_active(struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->execlists.active);
+}
+
+static bool __request_in_flight(const struct i915_request *signal)
+{
+ struct i915_request * const *port, *rq;
+ bool inflight = false;
+
+ if (!i915_request_is_ready(signal))
+ return false;
+
+ /*
+ * Even if we have unwound the request, it may still be on
+ * the GPU (preempt-to-busy). If that request is inside an
+ * unpreemptible critical section, it will not be removed. Some
+ * GPU functions may even be stuck waiting for the paired request
+ * (__await_execution) to be submitted and cannot be preempted
+ * until the bond is executing.
+ *
+ * As we know that there are always preemption points between
+ * requests, we know that only the currently executing request
+ * may be still active even though we have cleared the flag.
+ * However, we can't rely on our tracking of ELSP[0] to known
+ * which request is currently active and so maybe stuck, as
+ * the tracking maybe an event behind. Instead assume that
+ * if the context is still inflight, then it is still active
+ * even if the active flag has been cleared.
+ */
+ if (!intel_context_inflight(signal->context))
+ return false;
+
+ rcu_read_lock();
+ for (port = __engine_active(signal->engine); (rq = *port); port++) {
+ if (rq->context == signal->context) {
+ inflight = i915_seqno_passed(rq->fence.seqno,
+ signal->fence.seqno);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return inflight;
+}
+
static int
__await_execution(struct i915_request *rq,
struct i915_request *signal,
@@ -346,7 +434,7 @@ __await_execution(struct i915_request *rq,
}
spin_lock_irq(&signal->lock);
- if (i915_request_is_active(signal)) {
+ if (i915_request_is_active(signal) || __request_in_flight(signal)) {
if (hook) {
hook(rq, &signal->fence);
i915_request_put(signal);
@@ -358,8 +446,6 @@ __await_execution(struct i915_request *rq,
}
spin_unlock_irq(&signal->lock);
- /* Copy across semaphore status as we need the same behaviour */
- rq->sched.flags |= signal->sched.flags;
return 0;
}
@@ -527,10 +613,8 @@ void __i915_request_unsubmit(struct i915_request *request)
spin_unlock(&request->lock);
/* We've already spun, don't charge on resubmitting. */
- if (request->sched.semaphores && i915_request_started(request)) {
- request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+ if (request->sched.semaphores && i915_request_started(request))
request->sched.semaphores = 0;
- }
/*
* We don't need to wake_up any waiters on request->execute, they
@@ -588,15 +672,6 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
-static void irq_semaphore_cb(struct irq_work *wrk)
-{
- struct i915_request *rq =
- container_of(wrk, typeof(*rq), semaphore_work);
-
- i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
- i915_request_put(rq);
-}
-
static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
@@ -604,11 +679,6 @@ semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
switch (state) {
case FENCE_COMPLETE:
- if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
- i915_request_get(rq);
- init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
- irq_work_queue(&rq->semaphore_work);
- }
break;
case FENCE_FREE:
@@ -629,14 +699,22 @@ static void retire_requests(struct intel_timeline *tl)
}
static noinline struct i915_request *
-request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
+request_alloc_slow(struct intel_timeline *tl,
+ struct i915_request **rsvd,
+ gfp_t gfp)
{
struct i915_request *rq;
- if (list_empty(&tl->requests))
- goto out;
+ /* If we cannot wait, dip into our reserves */
+ if (!gfpflags_allow_blocking(gfp)) {
+ rq = xchg(rsvd, NULL);
+ if (!rq) /* Use the normal failure path for one final WARN */
+ goto out;
+
+ return rq;
+ }
- if (!gfpflags_allow_blocking(gfp))
+ if (list_empty(&tl->requests))
goto out;
/* Move our oldest request to the slab-cache (if not in use!) */
@@ -721,7 +799,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq = kmem_cache_alloc(global.slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- rq = request_alloc_slow(tl, gfp);
+ rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
@@ -933,6 +1011,7 @@ __emit_semaphore_wait(struct i915_request *to,
u32 *cs;
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
/* We need to pin the signaler's HWSP until we are finished reading. */
err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
@@ -978,13 +1057,26 @@ emit_semaphore_wait(struct i915_request *to,
gfp_t gfp)
{
const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+ struct i915_sw_fence *wait = &to->submit;
if (!intel_context_use_semaphores(to->context))
goto await_fence;
+ if (i915_request_has_initial_breadcrumb(to))
+ goto await_fence;
+
if (!rcu_access_pointer(from->hwsp_cacheline))
goto await_fence;
+ /*
+ * If this or its dependents are waiting on an external fence
+ * that may fail catastrophically, then we want to avoid using
+ * sempahores as they bypass the fence signaling metadata, and we
+ * lose the fence->error propagation.
+ */
+ if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
+ goto await_fence;
+
/* Just emit the first semaphore we see as request space is limited. */
if (already_busywaiting(to) & mask)
goto await_fence;
@@ -1000,118 +1092,14 @@ emit_semaphore_wait(struct i915_request *to,
goto await_fence;
to->sched.semaphores |= mask;
- to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
- return 0;
+ wait = &to->semaphore;
await_fence:
- return i915_sw_fence_await_dma_fence(&to->submit,
+ return i915_sw_fence_await_dma_fence(wait,
&from->fence, 0,
I915_FENCE_GFP);
}
-static int
-i915_request_await_request(struct i915_request *to, struct i915_request *from)
-{
- int ret;
-
- GEM_BUG_ON(to == from);
- GEM_BUG_ON(to->timeline == from->timeline);
-
- if (i915_request_completed(from)) {
- i915_sw_fence_set_error_once(&to->submit, from->fence.error);
- return 0;
- }
-
- if (to->engine->schedule) {
- ret = i915_sched_node_add_dependency(&to->sched,
- &from->sched,
- I915_DEPENDENCY_EXTERNAL);
- if (ret < 0)
- return ret;
- }
-
- if (to->engine == from->engine)
- ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
- &from->submit,
- I915_FENCE_GFP);
- else
- ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
- if (ret < 0)
- return ret;
-
- if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
- ret = i915_sw_fence_await_dma_fence(&to->semaphore,
- &from->fence, 0,
- I915_FENCE_GFP);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-int
-i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
-{
- struct dma_fence **child = &fence;
- unsigned int nchild = 1;
- int ret;
-
- /*
- * Note that if the fence-array was created in signal-on-any mode,
- * we should *not* decompose it into its individual fences. However,
- * we don't currently store which mode the fence-array is operating
- * in. Fortunately, the only user of signal-on-any is private to
- * amdgpu and we should not see any incoming fence-array from
- * sync-file being in signal-on-any mode.
- */
- if (dma_fence_is_array(fence)) {
- struct dma_fence_array *array = to_dma_fence_array(fence);
-
- child = array->fences;
- nchild = array->num_fences;
- GEM_BUG_ON(!nchild);
- }
-
- do {
- fence = *child++;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- i915_sw_fence_set_error_once(&rq->submit, fence->error);
- continue;
- }
-
- /*
- * Requests on the same timeline are explicitly ordered, along
- * with their dependencies, by i915_request_add() which ensures
- * that requests are submitted in-order through each ring.
- */
- if (fence->context == rq->fence.context)
- continue;
-
- /* Squash repeated waits to the same timelines */
- if (fence->context &&
- intel_timeline_sync_is_later(i915_request_timeline(rq),
- fence))
- continue;
-
- if (dma_fence_is_i915(fence))
- ret = i915_request_await_request(rq, to_request(fence));
- else
- ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
- fence->context ? I915_FENCE_TIMEOUT : 0,
- I915_FENCE_GFP);
- if (ret < 0)
- return ret;
-
- /* Record the latest fence used against each timeline */
- if (fence->context)
- intel_timeline_sync_set(i915_request_timeline(rq),
- fence);
- } while (--nchild);
-
- return 0;
-}
-
static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
struct dma_fence *fence)
{
@@ -1179,7 +1167,8 @@ __i915_request_await_execution(struct i915_request *to,
* immediate execution, and so we must wait until it reaches the
* active slot.
*/
- if (intel_engine_has_semaphores(to->engine)) {
+ if (intel_engine_has_semaphores(to->engine) &&
+ !i915_request_has_initial_breadcrumb(to)) {
err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
if (err < 0)
return err;
@@ -1198,6 +1187,55 @@ __i915_request_await_execution(struct i915_request *to,
&from->fence);
}
+static void mark_external(struct i915_request *rq)
+{
+ /*
+ * The downside of using semaphores is that we lose metadata passing
+ * along the signaling chain. This is particularly nasty when we
+ * need to pass along a fatal error such as EFAULT or EDEADLK. For
+ * fatal errors we want to scrub the request before it is executed,
+ * which means that we cannot preload the request onto HW and have
+ * it wait upon a semaphore.
+ */
+ rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
+}
+
+static int
+__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
+{
+ mark_external(rq);
+ return i915_sw_fence_await_dma_fence(&rq->submit, fence,
+ i915_fence_context_timeout(rq->i915,
+ fence->context),
+ I915_FENCE_GFP);
+}
+
+static int
+i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
+{
+ struct dma_fence *iter;
+ int err = 0;
+
+ if (!to_dma_fence_chain(fence))
+ return __i915_request_await_external(rq, fence);
+
+ dma_fence_chain_for_each(iter, fence) {
+ struct dma_fence_chain *chain = to_dma_fence_chain(iter);
+
+ if (!dma_fence_is_i915(chain->fence)) {
+ err = __i915_request_await_external(rq, iter);
+ break;
+ }
+
+ err = i915_request_await_dma_fence(rq, chain->fence);
+ if (err < 0)
+ break;
+ }
+
+ dma_fence_put(iter);
+ return err;
+}
+
int
i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence,
@@ -1225,6 +1263,9 @@ i915_request_await_execution(struct i915_request *rq,
continue;
}
+ if (fence->context == rq->fence.context)
+ continue;
+
/*
* We don't squash repeated fence dependencies here as we
* want to run our callback in all cases.
@@ -1235,11 +1276,119 @@ i915_request_await_execution(struct i915_request *rq,
to_request(fence),
hook);
else
- ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
- I915_FENCE_TIMEOUT,
- GFP_KERNEL);
+ ret = i915_request_await_external(rq, fence);
+ if (ret < 0)
+ return ret;
+ } while (--nchild);
+
+ return 0;
+}
+
+static int
+await_request_submit(struct i915_request *to, struct i915_request *from)
+{
+ /*
+ * If we are waiting on a virtual engine, then it may be
+ * constrained to execute on a single engine *prior* to submission.
+ * When it is submitted, it will be first submitted to the virtual
+ * engine and then passed to the physical engine. We cannot allow
+ * the waiter to be submitted immediately to the physical engine
+ * as it may then bypass the virtual request.
+ */
+ if (to->engine == READ_ONCE(from->engine))
+ return i915_sw_fence_await_sw_fence_gfp(&to->submit,
+ &from->submit,
+ I915_FENCE_GFP);
+ else
+ return __i915_request_await_execution(to, from, NULL);
+}
+
+static int
+i915_request_await_request(struct i915_request *to, struct i915_request *from)
+{
+ int ret;
+
+ GEM_BUG_ON(to == from);
+ GEM_BUG_ON(to->timeline == from->timeline);
+
+ if (i915_request_completed(from)) {
+ i915_sw_fence_set_error_once(&to->submit, from->fence.error);
+ return 0;
+ }
+
+ if (to->engine->schedule) {
+ ret = i915_sched_node_add_dependency(&to->sched,
+ &from->sched,
+ I915_DEPENDENCY_EXTERNAL);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
+ ret = await_request_submit(to, from);
+ else
+ ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int
+i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
+{
+ struct dma_fence **child = &fence;
+ unsigned int nchild = 1;
+ int ret;
+
+ /*
+ * Note that if the fence-array was created in signal-on-any mode,
+ * we should *not* decompose it into its individual fences. However,
+ * we don't currently store which mode the fence-array is operating
+ * in. Fortunately, the only user of signal-on-any is private to
+ * amdgpu and we should not see any incoming fence-array from
+ * sync-file being in signal-on-any mode.
+ */
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+
+ child = array->fences;
+ nchild = array->num_fences;
+ GEM_BUG_ON(!nchild);
+ }
+
+ do {
+ fence = *child++;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ i915_sw_fence_set_error_once(&rq->submit, fence->error);
+ continue;
+ }
+
+ /*
+ * Requests on the same timeline are explicitly ordered, along
+ * with their dependencies, by i915_request_add() which ensures
+ * that requests are submitted in-order through each ring.
+ */
+ if (fence->context == rq->fence.context)
+ continue;
+
+ /* Squash repeated waits to the same timelines */
+ if (fence->context &&
+ intel_timeline_sync_is_later(i915_request_timeline(rq),
+ fence))
+ continue;
+
+ if (dma_fence_is_i915(fence))
+ ret = i915_request_await_request(rq, to_request(fence));
+ else
+ ret = i915_request_await_external(rq, fence);
if (ret < 0)
return ret;
+
+ /* Record the latest fence used against each timeline */
+ if (fence->context)
+ intel_timeline_sync_set(i915_request_timeline(rq),
+ fence);
} while (--nchild);
return 0;
@@ -1445,14 +1594,7 @@ void i915_request_add(struct i915_request *rq)
attr = ctx->sched;
rcu_read_unlock();
- if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
- attr.priority |= I915_PRIORITY_NOSEMAPHORE;
- if (list_empty(&rq->sched.signalers_list))
- attr.priority |= I915_PRIORITY_WAIT;
-
- local_bh_disable();
__i915_request_queue(rq, &attr);
- local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
mutex_unlock(&tl->mutex);
}
@@ -1636,7 +1778,6 @@ long i915_request_wait(struct i915_request *rq,
if (flags & I915_WAIT_PRIORITY) {
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
intel_rps_boost(rq);
- i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
}
wait.tsk = current;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 3c552bfea67a..8ec7ee4dbadc 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -84,19 +84,26 @@ enum {
I915_FENCE_FLAG_PQUEUE,
/*
- * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
+ * I915_FENCE_FLAG_HOLD - this request is currently on hold
*
- * Internal bookkeeping used by the breadcrumb code to track when
- * a request is on the various signal_list.
+ * This request has been suspended, pending an ongoing investigation.
*/
- I915_FENCE_FLAG_SIGNAL,
+ I915_FENCE_FLAG_HOLD,
/*
- * I915_FENCE_FLAG_HOLD - this request is currently on hold
+ * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
+ * breadcrumb that marks the end of semaphore waits and start of the
+ * user payload.
+ */
+ I915_FENCE_FLAG_INITIAL_BREADCRUMB,
+
+ /*
+ * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
*
- * This request has been suspended, pending an ongoing investigation.
+ * Internal bookkeeping used by the breadcrumb code to track when
+ * a request is on the various signal_list.
*/
- I915_FENCE_FLAG_HOLD,
+ I915_FENCE_FLAG_SIGNAL,
/*
* I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
@@ -209,7 +216,6 @@ struct i915_request {
};
struct list_head execute_cb;
struct i915_sw_fence semaphore;
- struct irq_work semaphore_work;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
@@ -300,6 +306,8 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
return fence->ops == &i915_fence_ops;
}
+struct kmem_cache *i915_request_slab_cache(void);
+
struct i915_request * __must_check
__i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
@@ -388,6 +396,12 @@ static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
+static inline bool
+i915_request_has_initial_breadcrumb(const struct i915_request *rq)
+{
+ return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+}
+
/**
* Returns true if seq1 is later than seq2.
*/
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index f0a9e8958ca0..cbb880b10c65 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -51,11 +51,11 @@ static void assert_priolists(struct intel_engine_execlists * const execlists)
GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
rb_first(&execlists->queue.rb_root));
- last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
+ last_prio = INT_MAX;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
const struct i915_priolist *p = to_priolist(rb);
- GEM_BUG_ON(p->priority >= last_prio);
+ GEM_BUG_ON(p->priority > last_prio);
last_prio = p->priority;
GEM_BUG_ON(!p->used);
@@ -174,7 +174,7 @@ sched_lock_engine(const struct i915_sched_node *node,
static inline int rq_prio(const struct i915_request *rq)
{
- return rq->sched.attr.priority | __NO_PREEMPTION;
+ return rq->sched.attr.priority;
}
static inline bool need_preempt(int prio, int active)
@@ -209,8 +209,6 @@ static void kick_submission(struct intel_engine_cs *engine,
if (!inflight)
goto unlock;
- engine->execlists.queue_priority_hint = prio;
-
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
@@ -218,6 +216,14 @@ static void kick_submission(struct intel_engine_cs *engine,
if (inflight->context == rq->context)
goto unlock;
+ ENGINE_TRACE(engine,
+ "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
+ prio,
+ rq->fence.context, rq->fence.seqno,
+ inflight->fence.context, inflight->fence.seqno,
+ inflight->sched.attr.priority);
+
+ engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet);
@@ -428,25 +434,12 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
dep->waiter = node;
dep->flags = flags;
- /* Keep track of whether anyone on this chain has a semaphore */
- if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
- !node_started(signal))
- node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
-
/* All set, now publish. Beware the lockless walkers. */
list_add_rcu(&dep->signal_link, &node->signalers_list);
list_add_rcu(&dep->wait_link, &signal->waiters_list);
- /*
- * As we do not allow WAIT to preempt inflight requests,
- * once we have executed a request, along with triggering
- * any execution callbacks, we must preserve its ordering
- * within the non-preemptible FIFO.
- */
- BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
- if (flags & I915_DEPENDENCY_EXTERNAL)
- __bump_priority(signal, __NO_PREEMPTION);
-
+ /* Propagate the chains */
+ node->flags |= signal->flags;
ret = true;
}
@@ -465,10 +458,14 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
if (!dep)
return -ENOMEM;
+ local_bh_disable();
+
if (!__i915_sched_node_add_dependency(node, signal, dep,
flags | I915_DEPENDENCY_ALLOC))
i915_dependency_free(dep);
+ local_bh_enable(); /* kick submission tasklet */
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 7186875088a0..f72e6c397b08 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -65,7 +65,7 @@ struct i915_sched_node {
struct list_head link;
struct i915_sched_attr attr;
unsigned int flags;
-#define I915_SCHED_HAS_SEMAPHORE_CHAIN BIT(0)
+#define I915_SCHED_HAS_EXTERNAL_CHAIN BIT(0)
intel_engine_mask_t semaphores;
};
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 98bcb6fa0ab4..d53d207ab6eb 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -133,4 +133,6 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
#define igt_timeout(t, fmt, ...) \
__igt_timeout((t), KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+void igt_hexdump(const void *buf, size_t len);
+
#endif /* !__I915_SELFTEST_H__ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index a3d38e089b6e..295b9829e2da 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -421,7 +421,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
if (!fence)
return;
- pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n",
+ pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%ps)\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno,
@@ -546,13 +546,11 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
cb->fence = fence;
i915_sw_fence_await(fence);
- ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
- if (ret == 0) {
- ret = 1;
- } else {
+ ret = 1;
+ if (dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake)) {
+ /* fence already signaled */
__dma_i915_sw_fence_wake(dma, &cb->base);
- if (ret == -ENOENT) /* fence already signaled */
- ret = 0;
+ ret = 0;
}
return ret;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index 997b2998f1f2..a3a81bb8f2c3 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -38,7 +38,10 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
if (!f->dma.error) {
dma_fence_get(&f->dma);
- queue_work(system_unbound_wq, &f->work);
+ if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
+ fence_work(&f->work);
+ else
+ queue_work(system_unbound_wq, &f->work);
} else {
fence_complete(f);
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.h b/drivers/gpu/drm/i915/i915_sw_fence_work.h
index 3a22b287e201..2c409f11c5c5 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.h
@@ -32,6 +32,10 @@ struct dma_fence_work {
const struct dma_fence_work_ops *ops;
};
+enum {
+ DMA_FENCE_WORK_IMM = DMA_FENCE_FLAG_USER_BITS,
+};
+
void dma_fence_work_init(struct dma_fence_work *f,
const struct dma_fence_work_ops *ops);
int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal);
@@ -41,4 +45,23 @@ static inline void dma_fence_work_commit(struct dma_fence_work *f)
i915_sw_fence_commit(&f->chain);
}
+/**
+ * dma_fence_work_commit_imm: Commit the fence, and if possible execute locally.
+ * @f: the fenced worker
+ *
+ * Instead of always scheduling a worker to execute the callback (see
+ * dma_fence_work_commit()), we try to execute the callback immediately in
+ * the local context. It is required that the fence be committed before it
+ * is published, and that no other threads try to tamper with the number
+ * of asynchronous waits on the fence (or else the callback will be
+ * executed in the wrong context, i.e. not the callers).
+ */
+static inline void dma_fence_work_commit_imm(struct dma_fence_work *f)
+{
+ if (atomic_read(&f->chain.pending) <= 1)
+ __set_bit(DMA_FENCE_WORK_IMM, &f->dma.flags);
+
+ dma_fence_work_commit(f);
+}
+
#endif /* I915_SW_FENCE_WORK_H */
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index ed69b5d4a375..b3a24eac21f1 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -20,14 +20,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
}
if (state == VGA_SWITCHEROO_ON) {
- pr_info("switched on\n");
+ drm_info(&i915->drm, "switched on\n");
i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(pdev, PCI_D0);
i915_resume_switcheroo(i915);
i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- pr_info("switched off\n");
+ drm_info(&i915->drm, "switched off\n");
i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend_switcheroo(i915, pmm);
i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 029854ae65fc..e28eae4a8f70 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -101,5 +101,6 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout)
*/
barrier();
- mod_timer(t, jiffies + timeout);
+ /* Keep t->expires = 0 reserved to indicate a canceled timer. */
+ mod_timer(t, jiffies + timeout ?: 1);
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 2cd7a7e87c0a..fc14ebf9a0b7 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -522,7 +522,6 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
GEM_BUG_ON(!obj);
i915_vma_unpin(vma);
- i915_vma_close(vma);
if (flags & I915_VMA_RELEASE_MAP)
i915_gem_object_unpin_map(obj);
@@ -610,18 +609,6 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
return true;
}
-static void assert_bind_count(const struct drm_i915_gem_object *obj)
-{
- /*
- * Combine the assertion that the object is bound and that we have
- * pinned its pages. But we should never have bound the object
- * more than we have pinned its pages. (For complete accuracy, we
- * assume that no else is pinning the pages, but as a rough assertion
- * that we will not run into problems later, this will do!)
- */
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
-}
-
/**
* i915_vma_insert - finds a slot for the vma in its address space
* @vma: the vma
@@ -740,12 +727,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
- if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- atomic_inc(&obj->bind_count);
- assert_bind_count(obj);
- }
list_add_tail(&vma->vm_link, &vma->vm->bound_list);
return 0;
@@ -763,12 +744,6 @@ i915_vma_detach(struct i915_vma *vma)
* it to be reaped by the shrinker.
*/
list_del(&vma->vm_link);
- if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- assert_bind_count(obj);
- atomic_dec(&obj->bind_count);
- }
}
static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
@@ -915,11 +890,30 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
- /* No more allocations allowed once we hold vm->mutex */
- err = mutex_lock_interruptible(&vma->vm->mutex);
+ /*
+ * Differentiate between user/kernel vma inside the aliasing-ppgtt.
+ *
+ * We conflate the Global GTT with the user's vma when using the
+ * aliasing-ppgtt, but it is still vitally important to try and
+ * keep the use cases distinct. For example, userptr objects are
+ * not allowed inside the Global GTT as that will cause lock
+ * inversions when we have to evict them the mmu_notifier callbacks -
+ * but they are allowed to be part of the user ppGTT which can never
+ * be mapped. As such we try to give the distinct users of the same
+ * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
+ * and i915_ppgtt separate].
+ *
+ * NB this may cause us to mask real lock inversions -- while the
+ * code is safe today, lockdep may not be able to spot future
+ * transgressions.
+ */
+ err = mutex_lock_interruptible_nested(&vma->vm->mutex,
+ !(flags & PIN_GLOBAL));
if (err)
goto err_fence;
+ /* No more allocations allowed now we hold vm->mutex */
+
if (unlikely(i915_vma_is_closed(vma))) {
err = -ENOENT;
goto err_unlock;
@@ -982,7 +976,7 @@ err_unlock:
mutex_unlock(&vma->vm->mutex);
err_fence:
if (work)
- dma_fence_work_commit(&work->base);
+ dma_fence_work_commit_imm(&work->base);
if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
err_pages:
@@ -1028,13 +1022,8 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
} while (1);
}
-void i915_vma_close(struct i915_vma *vma)
+static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
{
- struct intel_gt *gt = vma->vm->gt;
- unsigned long flags;
-
- GEM_BUG_ON(i915_vma_is_closed(vma));
-
/*
* We defer actually closing, unbinding and destroying the VMA until
* the next idle point, or if the object is freed in the meantime. By
@@ -1047,9 +1036,25 @@ void i915_vma_close(struct i915_vma *vma)
* causing us to rebind the VMA once more. This ends up being a lot
* of wasted work for the steady state.
*/
- spin_lock_irqsave(&gt->closed_lock, flags);
+ GEM_BUG_ON(i915_vma_is_closed(vma));
list_add(&vma->closed_link, &gt->closed_vma);
- spin_unlock_irqrestore(&gt->closed_lock, flags);
+}
+
+void i915_vma_close(struct i915_vma *vma)
+{
+ struct intel_gt *gt = vma->vm->gt;
+ unsigned long flags;
+
+ if (i915_vma_is_ggtt(vma))
+ return;
+
+ GEM_BUG_ON(!atomic_read(&vma->open_count));
+ if (atomic_dec_and_lock_irqsave(&vma->open_count,
+ &gt->closed_lock,
+ flags)) {
+ __vma_close(vma, gt);
+ spin_unlock_irqrestore(&gt->closed_lock, flags);
+ }
}
static void __i915_vma_remove_closed(struct i915_vma *vma)
@@ -1174,7 +1179,8 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */
- err = i915_request_await_active(rq, &vma->active, 0);
+ err = i915_request_await_active(rq, &vma->active,
+ I915_ACTIVE_AWAIT_EXCL);
if (err)
return err;
@@ -1215,6 +1221,10 @@ int i915_vma_move_to_active(struct i915_vma *vma,
dma_resv_add_shared_fence(vma->resv, &rq->fence);
obj->write_domain = 0;
}
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
+ i915_active_add_request(&vma->fence->active, rq);
+
obj->read_domains |= I915_GEM_GPU_DOMAINS;
obj->mm.dirty = true;
@@ -1249,6 +1259,9 @@ int __i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_map_and_fenceable(vma)) {
+ /* Force a pagefault for domain tracking on next user access */
+ i915_vma_revoke_mmap(vma);
+
/*
* Check that we have flushed all writes through the GGTT
* before the unbind, other due to non-strict nature of those
@@ -1265,12 +1278,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_flush_writes(vma);
/* release the fence reg _after_ flushing */
- ret = i915_vma_revoke_fence(vma);
- if (ret)
- return ret;
-
- /* Force a pagefault for domain tracking on next user access */
- i915_vma_revoke_mmap(vma);
+ i915_vma_revoke_fence(vma);
__i915_vma_iounmap(vma);
clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
@@ -1315,7 +1323,7 @@ int i915_vma_unbind(struct i915_vma *vma)
/* XXX not always required: nop_clear_range */
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
- err = mutex_lock_interruptible(&vm->mutex);
+ err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
if (err)
goto out_rpm;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index e1ced1df13e1..8ad1daabcd58 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -30,10 +30,10 @@
#include <drm/drm_mm.h>
+#include "gt/intel_ggtt_fencing.h"
#include "gem/i915_gem_object.h"
#include "i915_gem_gtt.h"
-#include "i915_gem_fence_reg.h"
#include "i915_active.h"
#include "i915_request.h"
@@ -326,7 +326,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
* True if the vma has a fence, false otherwise.
*/
int __must_check i915_vma_pin_fence(struct i915_vma *vma);
-int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
+void i915_vma_revoke_fence(struct i915_vma *vma);
int __i915_vma_pin_fence(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index d7fe12734db8..8a635bd4d5d8 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -98,6 +98,7 @@ void intel_device_info_print_static(const struct intel_device_info *info,
drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
+ drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
@@ -135,8 +136,8 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info,
sseu_dump(&info->sseu, p);
drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
- drm_printf(p, "CS timestamp frequency: %u kHz\n",
- info->cs_timestamp_frequency_khz);
+ drm_printf(p, "CS timestamp frequency: %u Hz\n",
+ info->cs_timestamp_frequency_hz);
}
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
@@ -677,12 +678,12 @@ static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
- base_freq *= 1000;
+ base_freq *= 1000000;
frac_freq = ((ts_override &
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
- frac_freq = 1000 / (frac_freq + 1);
+ frac_freq = 1000000 / (frac_freq + 1);
return base_freq + frac_freq;
}
@@ -690,8 +691,8 @@ static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
u32 rpm_config_reg)
{
- u32 f19_2_mhz = 19200;
- u32 f24_mhz = 24000;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
u32 crystal_clock = (rpm_config_reg &
GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
@@ -710,10 +711,10 @@ static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
u32 rpm_config_reg)
{
- u32 f19_2_mhz = 19200;
- u32 f24_mhz = 24000;
- u32 f25_mhz = 25000;
- u32 f38_4_mhz = 38400;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+ u32 f25_mhz = 25000000;
+ u32 f38_4_mhz = 38400000;
u32 crystal_clock = (rpm_config_reg &
GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
@@ -735,9 +736,9 @@ static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
{
- u32 f12_5_mhz = 12500;
- u32 f19_2_mhz = 19200;
- u32 f24_mhz = 24000;
+ u32 f12_5_mhz = 12500000;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
if (INTEL_GEN(dev_priv) <= 4) {
/* PRMs say:
@@ -746,7 +747,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
* hclks." (through the “Clocking Configuration”
* (“CLKCFG”) MCHBAR register)
*/
- return RUNTIME_INFO(dev_priv)->rawclk_freq / 16;
+ return RUNTIME_INFO(dev_priv)->rawclk_freq * 1000 / 16;
} else if (INTEL_GEN(dev_priv) <= 8) {
/* PRMs say:
*
@@ -980,35 +981,32 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
drm_info(&dev_priv->drm,
"Display fused off, disabling\n");
info->pipe_mask = 0;
+ info->cpu_transcoder_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
drm_info(&dev_priv->drm, "PipeC fused off\n");
info->pipe_mask &= ~BIT(PIPE_C);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
u32 dfsm = I915_READ(SKL_DFSM);
- u8 enabled_mask = info->pipe_mask;
-
- if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
- enabled_mask &= ~BIT(PIPE_A);
- if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
- enabled_mask &= ~BIT(PIPE_B);
- if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
- enabled_mask &= ~BIT(PIPE_C);
- if (INTEL_GEN(dev_priv) >= 12 &&
- (dfsm & TGL_DFSM_PIPE_D_DISABLE))
- enabled_mask &= ~BIT(PIPE_D);
- /*
- * At least one pipe should be enabled and if there are
- * disabled pipes, they should be the last ones, with no holes
- * in the mask.
- */
- if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
- drm_err(&dev_priv->drm,
- "invalid pipe fuse configuration: enabled_mask=0x%x\n",
- enabled_mask);
- else
- info->pipe_mask = enabled_mask;
+ if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
+ info->pipe_mask &= ~BIT(PIPE_A);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
+ }
+ if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
+ info->pipe_mask &= ~BIT(PIPE_B);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
+ }
+ if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
+ info->pipe_mask &= ~BIT(PIPE_C);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ }
+ if (INTEL_GEN(dev_priv) >= 12 &&
+ (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
+ info->pipe_mask &= ~BIT(PIPE_D);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
+ }
if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
info->display.has_hdcp = 0;
@@ -1050,11 +1048,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
/* Initialize command stream timestamp frequency */
- runtime->cs_timestamp_frequency_khz =
+ runtime->cs_timestamp_frequency_hz =
read_timestamp_frequency(dev_priv);
- if (runtime->cs_timestamp_frequency_khz) {
+ if (runtime->cs_timestamp_frequency_hz) {
runtime->cs_timestamp_period_ns =
- div_u64(1e6, runtime->cs_timestamp_frequency_khz);
+ i915_cs_timestamp_ticks_to_ns(dev_priv, 1);
drm_dbg(&dev_priv->drm,
"CS timestamp wraparound in %lldms\n",
div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 1ecb9df2de91..62e03ffa377e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -158,6 +158,8 @@ struct intel_device_info {
enum intel_platform platform;
+ unsigned int dma_mask_size; /* available DMA address bits */
+
enum intel_ppgtt_type ppgtt_type;
unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
@@ -168,6 +170,7 @@ struct intel_device_info {
u32 display_mmio_offset;
u8 pipe_mask;
+ u8 cpu_transcoder_mask;
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
@@ -218,7 +221,7 @@ struct intel_runtime_info {
u32 rawclk_freq;
- u32 cs_timestamp_frequency_khz;
+ u32 cs_timestamp_frequency_hz;
u32 cs_timestamp_period_ns;
/* Media engine access to SFC per instance */
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 6b922efb1d7c..8aa12cad93ce 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -495,6 +495,5 @@ void intel_dram_edram_detect(struct drm_i915_private *i915)
else
i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
- dev_info(i915->drm.dev,
- "Found %uMB of eDRAM\n", i915->edram_size_mb);
+ drm_info(&i915->drm, "Found %uMB of eDRAM\n", i915->edram_size_mb);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a52986a9e7a6..696491d71a1d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -43,6 +43,7 @@
#include "i915_fixed.h"
#include "i915_irq.h"
#include "i915_trace.h"
+#include "display/intel_bw.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
@@ -3637,10 +3638,6 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
- /* HACK! */
- if (IS_GEN(dev_priv, 12))
- return false;
-
return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
@@ -3757,42 +3754,120 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
return 0;
}
-bool intel_can_enable_sagv(struct intel_atomic_state *state)
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc;
- struct intel_plane *plane;
- struct intel_crtc_state *crtc_state;
- enum pipe pipe;
- int level, latency;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state;
+ const struct intel_bw_state *old_bw_state;
+ u32 new_mask = 0;
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
+ */
if (!intel_has_sagv(dev_priv))
- return false;
+ return;
+
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+ if (!new_bw_state)
+ return;
+ if (INTEL_GEN(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) {
+ intel_disable_sagv(dev_priv);
+ return;
+ }
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
/*
- * If there are no active CRTCs, no additional checks need be performed
+ * Nothing to mask
*/
- if (hweight8(state->active_pipes) == 0)
- return true;
+ if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
+ return;
+
+ new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+
+ /*
+ * If new mask is zero - means there is nothing to mask,
+ * we can only unmask, which should be done in unmask.
+ */
+ if (!new_mask)
+ return;
+
+ /*
+ * Restrict required qgv points before updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(dev_priv, new_mask);
+}
+
+void intel_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state;
+ const struct intel_bw_state *old_bw_state;
+ u32 new_mask = 0;
+
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
+ */
+ if (!intel_has_sagv(dev_priv))
+ return;
+
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+ if (!new_bw_state)
+ return;
+
+ if (INTEL_GEN(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) {
+ intel_enable_sagv(dev_priv);
+ return;
+ }
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ /*
+ * Nothing to unmask
+ */
+ if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
+ return;
+
+ new_mask = new_bw_state->qgv_points_mask;
/*
- * SKL+ workaround: bspec recommends we disable SAGV when we have
- * more then one pipe enabled
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
*/
- if (hweight8(state->active_pipes) > 1)
+ icl_pcode_restrict_qgv_points(dev_priv, new_mask);
+}
+
+static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
+ int level, latency;
+
+ if (!intel_has_sagv(dev_priv))
return false;
- /* Since we're now guaranteed to only have one active CRTC... */
- pipe = ffs(state->active_pipes) - 1;
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ if (!crtc_state->hw.active)
+ return true;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
return false;
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
- struct skl_plane_wm *wm =
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane->id];
/* Skip this plane if it's not enabled */
@@ -3807,7 +3882,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
latency = dev_priv->wm.skl_latency[level];
if (skl_needs_memory_bw_wa(dev_priv) &&
- plane->base.state->fb->modifier ==
+ plane_state->uapi.fb->modifier ==
I915_FORMAT_MOD_X_TILED)
latency += 15;
@@ -3823,6 +3898,112 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
return true;
}
+static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+
+ if (!crtc_state->hw.active)
+ return true;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *plane_alloc =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (skl_ddb_entry_size(plane_alloc) < wm->sagv_wm0.min_ddb_alloc)
+ return false;
+ }
+
+ return true;
+}
+
+static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ return tgl_crtc_can_enable_sagv(crtc_state);
+ else
+ return skl_crtc_can_enable_sagv(crtc_state);
+}
+
+bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
+ const struct intel_bw_state *bw_state)
+{
+ if (INTEL_GEN(dev_priv) < 11 &&
+ bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
+ return false;
+
+ return bw_state->pipe_sagv_reject == 0;
+}
+
+static int intel_compute_sagv_mask(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ int ret;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_bw_state *new_bw_state = NULL;
+ const struct intel_bw_state *old_bw_state = NULL;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (intel_crtc_can_enable_sagv(new_crtc_state))
+ new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+ else
+ new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+ }
+
+ if (!new_bw_state)
+ return 0;
+
+ new_bw_state->active_pipes =
+ intel_calc_active_pipes(state, old_bw_state->active_pipes);
+
+ if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case)
+ */
+ pipe_wm->use_sagv_wm = INTEL_GEN(dev_priv) >= 12 &&
+ intel_can_enable_sagv(dev_priv, new_bw_state);
+ }
+
+ if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
+ intel_can_enable_sagv(dev_priv, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* Calculate initial DBuf slice offset, based on slice size
* and mask(i.e if slice size is 1024 and second slice is enabled
@@ -4016,6 +4197,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
int color_plane);
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
+ unsigned int latency,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
@@ -4038,7 +4220,9 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
drm_WARN_ON(&dev_priv->drm, ret);
for (level = 0; level <= max_level; level++) {
- skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
+ unsigned int latency = dev_priv->wm.skl_latency[level];
+
+ skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
break;
@@ -4544,6 +4728,20 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
return total_data_rate;
}
+static const struct skl_wm_level *
+skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
+ enum plane_id plane_id,
+ int level)
+{
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ if (level == 0 && pipe_wm->use_sagv_wm)
+ return &wm->sagv_wm0;
+
+ return &wm->wm[level];
+}
+
static int
skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
{
@@ -4580,7 +4778,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
plane_data_rate,
uv_plane_data_rate);
-
skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
@@ -4780,7 +4977,7 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
wm_intermediate_val = latency * pixel_rate * cpp;
ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
ret = add_fixed16_u32(ret, 1);
return ret;
@@ -4915,18 +5112,19 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_min_scanlines,
wp->dbuf_block_size);
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
interm_pbpl++;
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
wp->y_min_scanlines);
- } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
- interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
- wp->dbuf_block_size);
- wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
} else {
interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
- wp->dbuf_block_size) + 1;
+ wp->dbuf_block_size);
+
+ if (!wp->x_tiled ||
+ INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ interm_pbpl++;
+
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
}
@@ -4972,12 +5170,12 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
+ unsigned int latency,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
u32 res_blocks, res_lines, min_ddb_alloc = 0;
@@ -5106,14 +5304,29 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
+ unsigned int latency = dev_priv->wm.skl_latency[level];
- skl_compute_plane_wm(crtc_state, level, wm_params,
- result_prev, result);
+ skl_compute_plane_wm(crtc_state, level, latency,
+ wm_params, result_prev, result);
result_prev = result;
}
}
+static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
+ const struct skl_wm_params *wm_params,
+ struct skl_plane_wm *plane_wm)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_wm_level *sagv_wm = &plane_wm->sagv_wm0;
+ struct skl_wm_level *levels = plane_wm->wm;
+ unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us;
+
+ skl_compute_plane_wm(crtc_state, 0, latency,
+ wm_params, &levels[0],
+ sagv_wm);
+}
+
static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wp,
struct skl_plane_wm *wm)
@@ -5166,10 +5379,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
trans_offset_b;
} else {
res_blocks = wm0_sel_res_b + trans_offset_b;
-
- /* WA BUG:1938466 add one block for non y-tile planes */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
- res_blocks += 1;
}
/*
@@ -5185,6 +5394,8 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
enum plane_id plane_id, int color_plane)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
struct skl_wm_params wm_params;
int ret;
@@ -5195,6 +5406,10 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
return ret;
skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_compute_sagv_wm(crtc_state, &wm_params, wm);
+
skl_compute_transition_wm(crtc_state, &wm_params, wm);
return 0;
@@ -5354,8 +5569,12 @@ void skl_write_plane_wm(struct intel_plane *plane,
&crtc_state->wm.skl.plane_ddb_uv[plane_id];
for (level = 0; level <= max_level; level++) {
+ const struct skl_wm_level *wm_level;
+
+ wm_level = skl_plane_wm_level(crtc_state, plane_id, level);
+
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
- &wm->wm[level]);
+ wm_level);
}
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
&wm->trans_wm);
@@ -5388,8 +5607,12 @@ void skl_write_cursor_wm(struct intel_plane *plane,
&crtc_state->wm.skl.plane_ddb_y[plane_id];
for (level = 0; level <= max_level; level++) {
+ const struct skl_wm_level *wm_level;
+
+ wm_level = skl_plane_wm_level(crtc_state, plane_id, level);
+
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
- &wm->wm[level]);
+ wm_level);
}
skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
@@ -5424,8 +5647,8 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
-static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
- const struct skl_ddb_entry *b)
+static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
{
return a->start < b->end && b->start < a->end;
}
@@ -5553,23 +5776,25 @@ skl_print_wm_changes(struct intel_atomic_state *state)
continue;
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n",
plane->base.base.id, plane->base.name,
enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
enast(old_wm->trans_wm.plane_en),
+ enast(old_wm->sagv_wm0.plane_en),
enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
- enast(new_wm->trans_wm.plane_en));
+ enast(new_wm->trans_wm.plane_en),
+ enast(new_wm->sagv_wm0.plane_en));
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
- " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
plane->base.base.id, plane->base.name,
enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
@@ -5580,6 +5805,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
+ enast(old_wm->sagv_wm0.ignore_lines), old_wm->sagv_wm0.plane_res_l,
enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
@@ -5589,37 +5815,42 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
- enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l,
+ enast(new_wm->sagv_wm0.ignore_lines), new_wm->sagv_wm0.plane_res_l);
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
plane->base.base.id, plane->base.name,
old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
old_wm->trans_wm.plane_res_b,
+ old_wm->sagv_wm0.plane_res_b,
new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
- new_wm->trans_wm.plane_res_b);
+ new_wm->trans_wm.plane_res_b,
+ new_wm->sagv_wm0.plane_res_b);
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
plane->base.base.id, plane->base.name,
old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
old_wm->trans_wm.min_ddb_alloc,
+ old_wm->sagv_wm0.min_ddb_alloc,
new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
- new_wm->trans_wm.min_ddb_alloc);
+ new_wm->trans_wm.min_ddb_alloc,
+ new_wm->sagv_wm0.min_ddb_alloc);
}
}
}
@@ -5780,6 +6011,10 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
+ ret = intel_compute_sagv_mask(state);
+ if (ret)
+ return ret;
+
/*
* skl_compute_ddb() will have adjusted the final watermarks
* based on how much ddb is available. Now we can actually
@@ -5876,8 +6111,7 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static inline void skl_wm_level_from_reg_val(u32 val,
- struct skl_wm_level *level)
+static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
{
level->plane_en = val & PLANE_WM_EN;
level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
@@ -5909,6 +6143,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
+ if (INTEL_GEN(dev_priv) >= 12)
+ wm->sagv_wm0 = wm->wm[0];
+
if (plane_id != PLANE_CURSOR)
val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
else
@@ -6850,6 +7087,10 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
TGL_VRH_GATING_DIS);
+
+ /* Wa_14011059788:tgl */
+ intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
+ 0, DFR_DISABLE);
}
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -6882,9 +7123,6 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
/* ReadHitWriteOnlyDisable:cnl */
val |= RCCUNIT_CLKGATE_DIS;
- /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
- val |= SARBUNIT_CLKGATE_DIS;
I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
/* Wa_2201832410:cnl */
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index d60a85421c5a..614ac7f8d4cc 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include "i915_reg.h"
+#include "display/intel_bw.h"
struct drm_device;
struct drm_i915_private;
@@ -41,9 +42,12 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct intel_atomic_state *state);
+bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
+ const struct intel_bw_state *bw_state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
+void intel_sagv_post_plane_update(struct intel_atomic_state *state);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ad719c9602af..9cb2d7548daa 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -549,7 +549,7 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
* becaue the HDA driver may require us to enable the audio power
* domain during system suspend.
*/
- dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
+ dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
pm_runtime_mark_last_busy(kdev);
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 3f13baaef058..916ccd1c0e96 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -336,7 +336,7 @@ void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
intel_sbi_rw(i915, reg, destination, &value, false);
}
-static inline int gen6_check_mailbox_status(u32 mbox)
+static int gen6_check_mailbox_status(u32 mbox)
{
switch (mbox & GEN6_PCODE_ERROR_MASK) {
case GEN6_PCODE_SUCCESS:
@@ -356,7 +356,7 @@ static inline int gen6_check_mailbox_status(u32 mbox)
}
}
-static inline int gen7_check_mailbox_status(u32 mbox)
+static int gen7_check_mailbox_status(u32 mbox)
{
switch (mbox & GEN6_PCODE_ERROR_MASK) {
case GEN6_PCODE_SUCCESS:
@@ -371,6 +371,8 @@ static inline int gen7_check_mailbox_status(u32 mbox)
return -ENXIO;
case GEN11_PCODE_LOCKED:
return -EBUSY;
+ case GEN11_PCODE_REJECTED:
+ return -EACCES;
case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
return -EOVERFLOW;
default:
@@ -429,7 +431,7 @@ int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
mutex_lock(&i915->sb_lock);
err = __sandybridge_pcode_rw(i915, mbox, val, val1,
- 500, 0,
+ 500, 20,
true);
mutex_unlock(&i915->sb_lock);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index abb18b90d7c3..a61cb8ca4d50 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -665,7 +665,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
mmio_debug_resume(uncore->debug);
if (check_for_unclaimed_mmio(uncore))
- dev_info(uncore->i915->drm.dev,
+ drm_info(&uncore->i915->drm,
"Invalid mmio detected during user access\n");
spin_unlock(&uncore->debug->lock);
@@ -735,6 +735,28 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore,
}
/**
+ * intel_uncore_forcewake_flush - flush the delayed release
+ * @uncore: the intel_uncore structure
+ * @fw_domains: forcewake domains to flush
+ */
+void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
+ enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
+
+ if (!uncore->funcs.force_wake_put)
+ return;
+
+ fw_domains &= uncore->fw_domains;
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ WRITE_ONCE(domain->active, false);
+ if (hrtimer_cancel(&domain->timer))
+ intel_uncore_fw_release_timer(&domain->timer);
+ }
+}
+
+/**
* intel_uncore_forcewake_put__locked - grab forcewake domain references
* @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
@@ -877,11 +899,6 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
#define GEN_FW_RANGE(s, e, d) \
{ .start = (s), .end = (e), .domains = (d) }
-#define HAS_FWTABLE(dev_priv) \
- (INTEL_GEN(dev_priv) >= 9 || \
- IS_CHERRYVIEW(dev_priv) || \
- IS_VALLEYVIEW(dev_priv))
-
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
static const struct intel_forcewake_range __vlv_fw_ranges[] = {
GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
@@ -1070,8 +1087,7 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = {
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
static const struct intel_forcewake_range __gen11_fw_ranges[] = {
- GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
- GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+ GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
@@ -1081,27 +1097,31 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8800, 0x8bff, 0),
GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
- GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
- GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x9560, 0x95ff, 0),
+ GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER),
- GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24000, 0x2407f, 0),
+ GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x40000, 0x1bffff, 0),
GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
- GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
- GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
- GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
+ GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
- GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
- GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
+ GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
};
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index dcfa243892c6..8d3aa8b9acf9 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -209,7 +209,11 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains domains);
void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains domains);
-/* Like above but the caller must manage the uncore.lock itself.
+void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
+ enum forcewake_domains fw_domains);
+
+/*
+ * Like above but the caller must manage the uncore.lock itself.
* Must be used with I915_READ_FW and friends.
*/
void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 8fbf6f4d3f26..dfd87d082218 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -70,11 +70,12 @@ unlock:
void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
{
- INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
+ INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
/* Assume we are not in process context and so cannot sleep. */
if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
- schedule_work(&wf->work);
+ mod_delayed_work(system_wq, &wf->work,
+ FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
return;
}
@@ -83,7 +84,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
static void __intel_wakeref_put_work(struct work_struct *wrk)
{
- struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
+ struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
if (atomic_add_unless(&wf->count, -1, 1))
return;
@@ -104,8 +105,9 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
atomic_set(&wf->count, 0);
wf->wakeref = 0;
- INIT_WORK(&wf->work, __intel_wakeref_put_work);
- lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0);
+ INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
+ lockdep_init_map(&wf->work.work.lockdep_map,
+ "wakeref.work", &key->work, 0);
}
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 7d1e676b71ef..545c8f277c46 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -8,6 +8,7 @@
#define INTEL_WAKEREF_H
#include <linux/atomic.h>
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
@@ -41,7 +42,7 @@ struct intel_wakeref {
struct intel_runtime_pm *rpm;
const struct intel_wakeref_ops *ops;
- struct work_struct work;
+ struct delayed_work work;
};
struct intel_wakeref_lockclass {
@@ -117,6 +118,11 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
return atomic_inc_not_zero(&wf->count);
}
+enum {
+ INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
+ __INTEL_WAKEREF_PUT_LAST_BIT__
+};
+
/**
* intel_wakeref_put_flags: Release the wakeref
* @wf: the wakeref
@@ -134,7 +140,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
*/
static inline void
__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
-#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
+#define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
+#define INTEL_WAKEREF_PUT_DELAY \
+ GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
{
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
@@ -154,6 +162,14 @@ intel_wakeref_put_async(struct intel_wakeref *wf)
__intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
}
+static inline void
+intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
+{
+ __intel_wakeref_put(wf,
+ INTEL_WAKEREF_PUT_ASYNC |
+ FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
+}
+
/**
* intel_wakeref_lock: Lock the wakeref (mutex)
* @wf: the wakeref
@@ -194,7 +210,7 @@ intel_wakeref_unlock_wait(struct intel_wakeref *wf)
{
mutex_lock(&wf->mutex);
mutex_unlock(&wf->mutex);
- flush_work(&wf->work);
+ flush_delayed_work(&wf->work);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 2bb9f9f9a50a..ec776591e1cf 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -86,10 +86,10 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm)
else
wopcm->size = GEN9_WOPCM_SIZE;
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "WOPCM: %uK\n", wopcm->size / 1024);
+ drm_dbg(&i915->drm, "WOPCM: %uK\n", wopcm->size / 1024);
}
-static inline u32 context_reserved_size(struct drm_i915_private *i915)
+static u32 context_reserved_size(struct drm_i915_private *i915)
{
if (IS_GEN9_LP(i915))
return BXT_WOPCM_RC6_CTX_RESERVED;
@@ -99,8 +99,8 @@ static inline u32 context_reserved_size(struct drm_i915_private *i915)
return 0;
}
-static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
- u32 guc_wopcm_base, u32 guc_wopcm_size)
+static bool gen9_check_dword_gap(struct drm_i915_private *i915,
+ u32 guc_wopcm_base, u32 guc_wopcm_size)
{
u32 offset;
@@ -112,7 +112,7 @@ static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
offset = guc_wopcm_base + GEN9_GUC_WOPCM_OFFSET;
if (offset > guc_wopcm_size ||
(guc_wopcm_size - offset) < sizeof(u32)) {
- dev_err(i915->drm.dev,
+ drm_err(&i915->drm,
"WOPCM: invalid GuC region size: %uK < %uK\n",
guc_wopcm_size / SZ_1K,
(u32)(offset + sizeof(u32)) / SZ_1K);
@@ -122,8 +122,8 @@ static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
return true;
}
-static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
- u32 guc_wopcm_size, u32 huc_fw_size)
+static bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
+ u32 guc_wopcm_size, u32 huc_fw_size)
{
/*
* On Gen9 & CNL A0, hardware requires the total available GuC WOPCM
@@ -131,7 +131,7 @@ static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
* firmware uploading would fail.
*/
if (huc_fw_size > guc_wopcm_size - GUC_WOPCM_RESERVED) {
- dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
(guc_wopcm_size - GUC_WOPCM_RESERVED) / SZ_1K,
huc_fw_size / 1024);
@@ -141,32 +141,31 @@ static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
return true;
}
-static inline bool check_hw_restrictions(struct drm_i915_private *i915,
- u32 guc_wopcm_base, u32 guc_wopcm_size,
- u32 huc_fw_size)
+static bool check_hw_restrictions(struct drm_i915_private *i915,
+ u32 guc_wopcm_base, u32 guc_wopcm_size,
+ u32 huc_fw_size)
{
if (IS_GEN(i915, 9) && !gen9_check_dword_gap(i915, guc_wopcm_base,
guc_wopcm_size))
return false;
- if ((IS_GEN(i915, 9) ||
- IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)) &&
+ if (IS_GEN(i915, 9) &&
!gen9_check_huc_fw_fits(i915, guc_wopcm_size, huc_fw_size))
return false;
return true;
}
-static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
- u32 guc_wopcm_base, u32 guc_wopcm_size,
- u32 guc_fw_size, u32 huc_fw_size)
+static bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
+ u32 guc_wopcm_base, u32 guc_wopcm_size,
+ u32 guc_fw_size, u32 huc_fw_size)
{
const u32 ctx_rsvd = context_reserved_size(i915);
u32 size;
size = wopcm_size - ctx_rsvd;
if (unlikely(range_overflows(guc_wopcm_base, guc_wopcm_size, size))) {
- dev_err(i915->drm.dev,
+ drm_err(&i915->drm,
"WOPCM: invalid GuC region layout: %uK + %uK > %uK\n",
guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K,
size / SZ_1K);
@@ -175,7 +174,7 @@ static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
size = guc_fw_size + GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
if (unlikely(guc_wopcm_size < size)) {
- dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC),
guc_wopcm_size / SZ_1K, size / SZ_1K);
return false;
@@ -183,7 +182,7 @@ static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
size = huc_fw_size + WOPCM_RESERVED_SIZE;
if (unlikely(guc_wopcm_base < size)) {
- dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
guc_wopcm_base / SZ_1K, size / SZ_1K);
return false;
@@ -242,10 +241,8 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
return;
if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) {
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
- "GuC WOPCM is already locked [%uK, %uK)\n",
- guc_wopcm_base / SZ_1K,
- guc_wopcm_size / SZ_1K);
+ drm_dbg(&i915->drm, "GuC WOPCM is already locked [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
goto check;
}
@@ -266,8 +263,8 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
guc_wopcm_size = wopcm->size - ctx_rsvd - guc_wopcm_base;
guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "Calculated GuC WOPCM [%uK, %uK)\n",
- guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
+ drm_dbg(&i915->drm, "Calculated GuC WOPCM [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
check:
if (__check_layout(i915, wopcm->size, guc_wopcm_base, guc_wopcm_size,
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
deleted file mode 100644
index 14da5c3b569d..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_bdw.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x000000a0 },
- { _MMIO(0x9888), 0x198b0000 },
- { _MMIO(0x9888), 0x078b0066 },
- { _MMIO(0x9888), 0x118b0000 },
- { _MMIO(0x9888), 0x258b0000 },
- { _MMIO(0x9888), 0x21850008 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9840), 0x00000080 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "d6de6f55-e526-4f79-a6a6-d7315c09044e",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
deleted file mode 100644
index 0cee3334f0a6..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_BDW_H__
-#define __I915_OA_BDW_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
deleted file mode 100644
index 3e785bafcf99..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_bxt.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x19800000 },
- { _MMIO(0x9888), 0x07800063 },
- { _MMIO(0x9888), 0x11800000 },
- { _MMIO(0x9888), 0x23810008 },
- { _MMIO(0x9888), 0x1d950400 },
- { _MMIO(0x9888), 0x0f922000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "5ee72f5c-092f-421e-8b70-225f7c3e9612",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
deleted file mode 100644
index 0bdf391323ec..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_BXT_H__
-#define __I915_OA_BXT_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
deleted file mode 100644
index 0ea86f70a06c..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_cflgt2.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "74fb4902-d3d3-4237-9e90-cbdc68d0a446",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
deleted file mode 100644
index 6b862280ab78..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CFLGT2_H__
-#define __I915_OA_CFLGT2_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
deleted file mode 100644
index fc632dd890bf..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_cflgt3.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "577e8e2c-3fa0-4875-8743-3538d585e3b0",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
deleted file mode 100644
index 4ca9d8f89b2f..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CFLGT3_H__
-#define __I915_OA_CFLGT3_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
deleted file mode 100644
index 6cd4e9921a8a..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_chv.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_chv.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x000000a0 },
- { _MMIO(0x9888), 0x59800000 },
- { _MMIO(0x9888), 0x59800001 },
- { _MMIO(0x9888), 0x338b0000 },
- { _MMIO(0x9888), 0x258b0066 },
- { _MMIO(0x9888), 0x058b0000 },
- { _MMIO(0x9888), 0x038b0000 },
- { _MMIO(0x9888), 0x03844000 },
- { _MMIO(0x9888), 0x47800080 },
- { _MMIO(0x9888), 0x57800000 },
- { _MMIO(0x1823a4), 0x00000000 },
- { _MMIO(0x9888), 0x59800000 },
- { _MMIO(0x9840), 0x00000080 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "4a534b07-cba3-414d-8d60-874830e883aa",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
deleted file mode 100644
index 3cac7bbc9c71..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_chv.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CHV_H__
-#define __I915_OA_CHV_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
deleted file mode 100644
index 1041e8914993..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_cnl.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x0000ffff },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x0000ffff },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x0000ffff },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0xd04), 0x00000200 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x17060000 },
- { _MMIO(0x9840), 0x00000000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x13034000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x07060066 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x05060000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x0f080040 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x07091000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x0f041000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x1d004000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x35000000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x49000000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x3d000000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x31000000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "db41edd4-d8e7-4730-ad11-b9a2d6833503",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
deleted file mode 100644
index db379f5fcbb9..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CNL_H__
-#define __I915_OA_CNL_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
deleted file mode 100644
index bd15ebe9aeeb..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_glk.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_glk.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x19800000 },
- { _MMIO(0x9888), 0x07800063 },
- { _MMIO(0x9888), 0x11800000 },
- { _MMIO(0x9888), 0x23810008 },
- { _MMIO(0x9888), 0x1d950400 },
- { _MMIO(0x9888), 0x0f922000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "dd3fd789-e783-4204-8cd0-b671bbccb0cf",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
deleted file mode 100644
index 779f343efd11..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_glk.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_GLK_H__
-#define __I915_OA_GLK_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
deleted file mode 100644
index 133721a8619f..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_hsw.h"
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
-};
-
-static const struct i915_oa_reg mux_config_render_basic[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x253a4), 0x01600000 },
- { _MMIO(0x25440), 0x00100000 },
- { _MMIO(0x25128), 0x00000000 },
- { _MMIO(0x2691c), 0x00000800 },
- { _MMIO(0x26aa0), 0x01500000 },
- { _MMIO(0x26b9c), 0x00006000 },
- { _MMIO(0x2791c), 0x00000800 },
- { _MMIO(0x27aa0), 0x01500000 },
- { _MMIO(0x27b9c), 0x00006000 },
- { _MMIO(0x2641c), 0x00000400 },
- { _MMIO(0x25380), 0x00000010 },
- { _MMIO(0x2538c), 0x00000000 },
- { _MMIO(0x25384), 0x0800aaaa },
- { _MMIO(0x25400), 0x00000004 },
- { _MMIO(0x2540c), 0x06029000 },
- { _MMIO(0x25410), 0x00000002 },
- { _MMIO(0x25404), 0x5c30ffff },
- { _MMIO(0x25100), 0x00000016 },
- { _MMIO(0x25110), 0x00000400 },
- { _MMIO(0x25104), 0x00000000 },
- { _MMIO(0x26804), 0x00001211 },
- { _MMIO(0x26884), 0x00000100 },
- { _MMIO(0x26900), 0x00000002 },
- { _MMIO(0x26908), 0x00700000 },
- { _MMIO(0x26904), 0x00000000 },
- { _MMIO(0x26984), 0x00001022 },
- { _MMIO(0x26a04), 0x00000011 },
- { _MMIO(0x26a80), 0x00000006 },
- { _MMIO(0x26a88), 0x00000c02 },
- { _MMIO(0x26a84), 0x00000000 },
- { _MMIO(0x26b04), 0x00001000 },
- { _MMIO(0x26b80), 0x00000002 },
- { _MMIO(0x26b8c), 0x00000007 },
- { _MMIO(0x26b84), 0x00000000 },
- { _MMIO(0x27804), 0x00004844 },
- { _MMIO(0x27884), 0x00000400 },
- { _MMIO(0x27900), 0x00000002 },
- { _MMIO(0x27908), 0x0e000000 },
- { _MMIO(0x27904), 0x00000000 },
- { _MMIO(0x27984), 0x00004088 },
- { _MMIO(0x27a04), 0x00000044 },
- { _MMIO(0x27a80), 0x00000006 },
- { _MMIO(0x27a88), 0x00018040 },
- { _MMIO(0x27a84), 0x00000000 },
- { _MMIO(0x27b04), 0x00004000 },
- { _MMIO(0x27b80), 0x00000002 },
- { _MMIO(0x27b8c), 0x000000e0 },
- { _MMIO(0x27b84), 0x00000000 },
- { _MMIO(0x26104), 0x00002222 },
- { _MMIO(0x26184), 0x0c006666 },
- { _MMIO(0x26284), 0x04000000 },
- { _MMIO(0x26304), 0x04000000 },
- { _MMIO(0x26400), 0x00000002 },
- { _MMIO(0x26410), 0x000000a0 },
- { _MMIO(0x26404), 0x00000000 },
- { _MMIO(0x25420), 0x04108020 },
- { _MMIO(0x25424), 0x1284a420 },
- { _MMIO(0x2541c), 0x00000000 },
- { _MMIO(0x25428), 0x00042049 },
-};
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "403d8832-1a27-4aa6-a64e-f5389ce7b212",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_render_basic;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_render_basic;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_render_basic;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
-
- dev_priv->perf.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_render_basic_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
deleted file mode 100644
index ba97f732f136..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_HSW_H__
-#define __I915_OA_HSW_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
deleted file mode 100644
index 2d92041b754f..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_icl.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_icl.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x0000ffff },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x0000ffff },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x0000ffff },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0xd04), 0x00000200 },
- { _MMIO(0x9840), 0x00000000 },
- { _MMIO(0x9884), 0x00000000 },
- { _MMIO(0x9888), 0x10060000 },
- { _MMIO(0x9888), 0x22060000 },
- { _MMIO(0x9888), 0x16060000 },
- { _MMIO(0x9888), 0x24060000 },
- { _MMIO(0x9888), 0x18060000 },
- { _MMIO(0x9888), 0x1a060000 },
- { _MMIO(0x9888), 0x12060000 },
- { _MMIO(0x9888), 0x14060000 },
- { _MMIO(0x9888), 0x10060000 },
- { _MMIO(0x9888), 0x22060000 },
- { _MMIO(0x9884), 0x00000003 },
- { _MMIO(0x9888), 0x16130000 },
- { _MMIO(0x9888), 0x24000001 },
- { _MMIO(0x9888), 0x0e130056 },
- { _MMIO(0x9888), 0x10130000 },
- { _MMIO(0x9888), 0x1a130000 },
- { _MMIO(0x9888), 0x541f0001 },
- { _MMIO(0x9888), 0x181f0000 },
- { _MMIO(0x9888), 0x4c1f0000 },
- { _MMIO(0x9888), 0x301f0000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "a291665e-244b-4b76-9b9a-01de9d3c8068",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
deleted file mode 100644
index 5c64112d720e..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_icl.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_ICL_H__
-#define __I915_OA_ICL_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
deleted file mode 100644
index 1c3a67c9cfe0..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_kblgt2.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "baa3c7e4-52b6-4b85-801e-465a94b746dd",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
deleted file mode 100644
index 810532fa6b63..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_KBLGT2_H__
-#define __I915_OA_KBLGT2_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
deleted file mode 100644
index ebbe5a9c9fdc..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_kblgt3.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "f1792f32-6db2-4b50-b4b2-557128f1688d",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
deleted file mode 100644
index 13d70456fabd..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_KBLGT3_H__
-#define __I915_OA_KBLGT3_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
deleted file mode 100644
index 1bc359ed34e8..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_sklgt2.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810016 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "1651949f-0ac0-4cb1-a06f-dafd74a407d1",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
deleted file mode 100644
index fda70c51a6ec..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT2_H__
-#define __I915_OA_SKLGT2_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
deleted file mode 100644
index 6e352f881310..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_sklgt3.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "2b985803-d3c9-4629-8a4f-634bfecba0e8",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
deleted file mode 100644
index df74eba5799e..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT3_H__
-#define __I915_OA_SKLGT3_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
deleted file mode 100644
index 8f345115a306..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_sklgt4.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "882fa433-1f4a-4a67-a962-c741888fe5f5",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
deleted file mode 100644
index 378ab7ab78d5..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT4_H__
-#define __I915_OA_SKLGT4_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
deleted file mode 100644
index a29d93707345..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_tgl.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0xD920), 0x00000000 },
- { _MMIO(0xD900), 0x00000000 },
- { _MMIO(0xD904), 0xF0800000 },
- { _MMIO(0xD910), 0x00000000 },
- { _MMIO(0xD914), 0xF0800000 },
- { _MMIO(0xDC40), 0x00FF0000 },
- { _MMIO(0xD940), 0x00000004 },
- { _MMIO(0xD944), 0x0000FFFF },
- { _MMIO(0xDC00), 0x00000004 },
- { _MMIO(0xDC04), 0x0000FFFF },
- { _MMIO(0xD948), 0x00000003 },
- { _MMIO(0xD94C), 0x0000FFFF },
- { _MMIO(0xDC08), 0x00000003 },
- { _MMIO(0xDC0C), 0x0000FFFF },
- { _MMIO(0xD950), 0x00000007 },
- { _MMIO(0xD954), 0x0000FFFF },
- { _MMIO(0xDC10), 0x00000007 },
- { _MMIO(0xDC14), 0x0000FFFF },
- { _MMIO(0xD958), 0x00100002 },
- { _MMIO(0xD95C), 0x0000FFF7 },
- { _MMIO(0xDC18), 0x00100002 },
- { _MMIO(0xDC1C), 0x0000FFF7 },
- { _MMIO(0xD960), 0x00100002 },
- { _MMIO(0xD964), 0x0000FFCF },
- { _MMIO(0xDC20), 0x00100002 },
- { _MMIO(0xDC24), 0x0000FFCF },
- { _MMIO(0xD968), 0x00100082 },
- { _MMIO(0xD96C), 0x0000FFEF },
- { _MMIO(0xDC28), 0x00100082 },
- { _MMIO(0xDC2C), 0x0000FFEF },
- { _MMIO(0xD970), 0x001000C2 },
- { _MMIO(0xD974), 0x0000FFE7 },
- { _MMIO(0xDC30), 0x001000C2 },
- { _MMIO(0xDC34), 0x0000FFE7 },
- { _MMIO(0xD978), 0x00100001 },
- { _MMIO(0xD97C), 0x0000FFE7 },
- { _MMIO(0xDC38), 0x00100001 },
- { _MMIO(0xDC3C), 0x0000FFE7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x0D04), 0x00000200 },
- { _MMIO(0x9840), 0x00000000 },
- { _MMIO(0x9884), 0x00000000 },
- { _MMIO(0x9888), 0x280E0000 },
- { _MMIO(0x9888), 0x1E0E0147 },
- { _MMIO(0x9888), 0x180E0000 },
- { _MMIO(0x9888), 0x160E0000 },
- { _MMIO(0x9888), 0x1E0F1000 },
- { _MMIO(0x9888), 0x1E104000 },
- { _MMIO(0x9888), 0x2E020100 },
- { _MMIO(0x9888), 0x2C030004 },
- { _MMIO(0x9888), 0x38003000 },
- { _MMIO(0x9888), 0x1E0A8000 },
- { _MMIO(0x9884), 0x00000003 },
- { _MMIO(0x9888), 0x49110000 },
- { _MMIO(0x9888), 0x5D101400 },
- { _MMIO(0x9888), 0x1D140020 },
- { _MMIO(0x9888), 0x1D1103A3 },
- { _MMIO(0x9888), 0x01110000 },
- { _MMIO(0x9888), 0x61111000 },
- { _MMIO(0x9888), 0x1F128000 },
- { _MMIO(0x9888), 0x17100000 },
- { _MMIO(0x9888), 0x55100630 },
- { _MMIO(0x9888), 0x57100000 },
- { _MMIO(0x9888), 0x31100000 },
- { _MMIO(0x9884), 0x00000003 },
- { _MMIO(0x9888), 0x65100002 },
- { _MMIO(0x9884), 0x00000000 },
- { _MMIO(0x9888), 0x42000001 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "80a833f0-2504-4321-8894-e9277844ce7b",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "80a833f0-2504-4321-8894-e9277844ce7b";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
deleted file mode 100644
index 4c25f0be825c..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_TGL_H__
-#define __I915_OA_TGL_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 68bbb1580162..4002c984c2e0 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -153,7 +153,7 @@ static int live_active_wait(void *arg)
if (IS_ERR(active))
return PTR_ERR(active);
- i915_active_wait(&active->base);
+ __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
if (!READ_ONCE(active->retired)) {
struct drm_printer p = drm_err_printer(__func__);
@@ -228,11 +228,11 @@ static int live_active_barrier(void *arg)
}
i915_active_release(&active->base);
+ if (err)
+ goto out;
- if (err == 0)
- err = i915_active_wait(&active->base);
-
- if (err == 0 && !READ_ONCE(active->retired)) {
+ __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
+ if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after flushing barriers!\n");
err = -EINVAL;
}
@@ -277,7 +277,7 @@ static struct intel_engine_cs *node_to_barrier(struct active_node *it)
void i915_active_print(struct i915_active *ref, struct drm_printer *m)
{
- drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
+ drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
drm_printf(m, "\tpreallocated barriers? %s\n",
yesno(!llist_empty(&ref->preallocated_barriers)));
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 623759b73bb4..88d400b9df88 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -125,8 +125,6 @@ static void pm_resume(struct drm_i915_private *i915)
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_ggtt_resume(&i915->ggtt);
- i915_gem_restore_fences(&i915->ggtt);
-
i915_gem_resume(i915);
}
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 06ef88510209..028baae9631f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -45,8 +45,8 @@ static void quirk_add(struct drm_i915_gem_object *obj,
static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
{
- unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
+ unsigned long count;
count = 0;
do {
@@ -72,30 +72,6 @@ static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
pr_debug("Filled GGTT with %lu pages [%llu total]\n",
count, ggtt->vm.total / PAGE_SIZE);
- bound = 0;
- unbound = 0;
- list_for_each_entry(obj, objects, st_link) {
- GEM_BUG_ON(!obj->mm.quirked);
-
- if (atomic_read(&obj->bind_count))
- bound++;
- else
- unbound++;
- }
- GEM_BUG_ON(bound + unbound != count);
-
- if (unbound) {
- pr_err("%s: Found %lu objects unbound, expected %u!\n",
- __func__, unbound, 0);
- return -EINVAL;
- }
-
- if (bound != count) {
- pr_err("%s: Found %lu objects bound, expected %lu!\n",
- __func__, bound, count);
- return -EINVAL;
- }
-
if (list_empty(&ggtt->vm.bound_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index b342bef5e7c9..2e471500a646 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -331,9 +331,6 @@ static void close_object_list(struct list_head *objects,
vma = i915_vma_instance(obj, vm, NULL);
if (!IS_ERR(vma))
ignored = i915_vma_unbind(vma);
- /* Only ppgtt vma may be closed before the object is freed */
- if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
list_del(&obj->st_link);
i915_gem_object_put(obj);
@@ -591,7 +588,7 @@ static int walk_hole(struct i915_address_space *vm,
pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
__func__, addr, vma->size,
hole_start, hole_end, err);
- goto err_close;
+ goto err_put;
}
i915_vma_unpin(vma);
@@ -600,14 +597,14 @@ static int walk_hole(struct i915_address_space *vm,
pr_err("%s incorrect at %llx + %llx\n",
__func__, addr, vma->size);
err = -EINVAL;
- goto err_close;
+ goto err_put;
}
err = i915_vma_unbind(vma);
if (err) {
pr_err("%s unbind failed at %llx + %llx with err=%d\n",
__func__, addr, vma->size, err);
- goto err_close;
+ goto err_put;
}
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -616,13 +613,10 @@ static int walk_hole(struct i915_address_space *vm,
"%s timed out at %llx\n",
__func__, addr)) {
err = -EINTR;
- goto err_close;
+ goto err_put;
}
}
-err_close:
- if (!i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
err_put:
i915_gem_object_put(obj);
if (err)
@@ -675,7 +669,7 @@ static int pot_hole(struct i915_address_space *vm,
addr,
hole_start, hole_end,
err);
- goto err;
+ goto err_obj;
}
if (!drm_mm_node_allocated(&vma->node) ||
@@ -685,7 +679,7 @@ static int pot_hole(struct i915_address_space *vm,
i915_vma_unpin(vma);
err = i915_vma_unbind(vma);
err = -EINVAL;
- goto err;
+ goto err_obj;
}
i915_vma_unpin(vma);
@@ -697,13 +691,10 @@ static int pot_hole(struct i915_address_space *vm,
"%s timed out after %d/%d\n",
__func__, pot, fls64(hole_end - 1) - 1)) {
err = -EINTR;
- goto err;
+ goto err_obj;
}
}
-err:
- if (!i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
err_obj:
i915_gem_object_put(obj);
return err;
@@ -778,7 +769,7 @@ static int drunk_hole(struct i915_address_space *vm,
addr, BIT_ULL(size),
hole_start, hole_end,
err);
- goto err;
+ goto err_obj;
}
if (!drm_mm_node_allocated(&vma->node) ||
@@ -788,7 +779,7 @@ static int drunk_hole(struct i915_address_space *vm,
i915_vma_unpin(vma);
err = i915_vma_unbind(vma);
err = -EINVAL;
- goto err;
+ goto err_obj;
}
i915_vma_unpin(vma);
@@ -799,13 +790,10 @@ static int drunk_hole(struct i915_address_space *vm,
"%s timed out after %d/%d\n",
__func__, n, count)) {
err = -EINTR;
- goto err;
+ goto err_obj;
}
}
-err:
- if (!i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
err_obj:
i915_gem_object_put(obj);
kfree(order);
@@ -1229,7 +1217,6 @@ static void track_vma_bind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- atomic_inc(&obj->bind_count); /* track for eviction later */
__i915_gem_object_pin_pages(obj);
GEM_BUG_ON(vma->pages);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 0a953bfc0585..5dd5d81646c4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -37,6 +37,7 @@ selftest(gem, i915_gem_live_selftests)
selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(gem_contexts, i915_gem_context_live_selftests)
+selftest(gem_execbuf, i915_gem_execbuffer_live_selftests)
selftest(blt, i915_gem_object_blt_live_selftests)
selftest(client, i915_gem_client_blt_live_selftests)
selftest(reset, intel_reset_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 5b39bab4da1d..6a2be7d0dd95 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -16,6 +16,7 @@
* Tests are executed in order by igt/drv_selftest
*/
selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
+selftest(shmem, shmem_utils_mock_selftests)
selftest(fence, i915_sw_fence_mock_selftests)
selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index d1a1568c47ba..8eb3108f1767 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -14,10 +14,85 @@
#include "igt_flush_test.h"
#include "lib_sw_fence.h"
+#define TEST_OA_CONFIG_UUID "12345678-1234-1234-1234-1234567890ab"
+
+static int
+alloc_empty_config(struct i915_perf *perf)
+{
+ struct i915_oa_config *oa_config;
+
+ oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
+ if (!oa_config)
+ return -ENOMEM;
+
+ oa_config->perf = perf;
+ kref_init(&oa_config->ref);
+
+ strlcpy(oa_config->uuid, TEST_OA_CONFIG_UUID, sizeof(oa_config->uuid));
+
+ mutex_lock(&perf->metrics_lock);
+
+ oa_config->id = idr_alloc(&perf->metrics_idr, oa_config, 2, 0, GFP_KERNEL);
+ if (oa_config->id < 0) {
+ mutex_unlock(&perf->metrics_lock);
+ i915_oa_config_put(oa_config);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&perf->metrics_lock);
+
+ return 0;
+}
+
+static void
+destroy_empty_config(struct i915_perf *perf)
+{
+ struct i915_oa_config *oa_config = NULL, *tmp;
+ int id;
+
+ mutex_lock(&perf->metrics_lock);
+
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) {
+ oa_config = tmp;
+ break;
+ }
+ }
+
+ if (oa_config)
+ idr_remove(&perf->metrics_idr, oa_config->id);
+
+ mutex_unlock(&perf->metrics_lock);
+
+ if (oa_config)
+ i915_oa_config_put(oa_config);
+}
+
+static struct i915_oa_config *
+get_empty_config(struct i915_perf *perf)
+{
+ struct i915_oa_config *oa_config = NULL, *tmp;
+ int id;
+
+ mutex_lock(&perf->metrics_lock);
+
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) {
+ oa_config = i915_oa_config_get(tmp);
+ break;
+ }
+ }
+
+ mutex_unlock(&perf->metrics_lock);
+
+ return oa_config;
+}
+
static struct i915_perf_stream *
test_stream(struct i915_perf *perf)
{
struct drm_i915_perf_open_param param = {};
+ struct i915_oa_config *oa_config = get_empty_config(perf);
struct perf_open_properties props = {
.engine = intel_engine_lookup_user(perf->i915,
I915_ENGINE_CLASS_RENDER,
@@ -25,13 +100,19 @@ test_stream(struct i915_perf *perf)
.sample_flags = SAMPLE_OA_REPORT,
.oa_format = IS_GEN(perf->i915, 12) ?
I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
- .metrics_set = 1,
};
struct i915_perf_stream *stream;
+ if (!oa_config)
+ return NULL;
+
+ props.metrics_set = oa_config->id;
+
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
- if (!stream)
+ if (!stream) {
+ i915_oa_config_put(oa_config);
return NULL;
+ }
stream->perf = perf;
@@ -42,6 +123,8 @@ test_stream(struct i915_perf *perf)
}
mutex_unlock(&perf->lock);
+ i915_oa_config_put(oa_config);
+
return stream;
}
@@ -138,8 +221,7 @@ static int live_noa_delay(void *arg)
goto out;
}
- if (rq->engine->emit_init_breadcrumb &&
- i915_request_timeline(rq)->has_initial_breadcrumb) {
+ if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
if (err) {
i915_request_add(rq);
@@ -180,8 +262,7 @@ static int live_noa_delay(void *arg)
delay = intel_read_status_page(stream->engine, 0x102);
delay -= intel_read_status_page(stream->engine, 0x100);
- delay = div_u64(mul_u32_u32(delay, 1000 * 1000),
- RUNTIME_INFO(i915)->cs_timestamp_frequency_khz);
+ delay = i915_cs_timestamp_ticks_to_ns(i915, delay);
pr_info("GPU delay: %uns, expected %lluns\n",
delay, expected);
@@ -206,6 +287,7 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_noa_delay),
};
struct i915_perf *perf = &i915->perf;
+ int err;
if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
return 0;
@@ -213,5 +295,13 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ err = alloc_empty_config(&i915->perf);
+ if (err)
+ return err;
+
+ err = i915_subtests(tests, i915);
+
+ destroy_empty_config(&i915->perf);
+
+ return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
index 3bf7f53e9924..d8da142985eb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -16,5 +16,6 @@
* Tests are executed in order by igt/i915_selftest
*/
selftest(engine_cs, intel_engine_cs_perf_selftests)
+selftest(request, i915_request_perf_selftests)
selftest(blt, i915_gem_object_blt_perf_selftests)
selftest(region, intel_memory_region_perf_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index f89d9c42f1fa..6014e8dfcbb1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -23,11 +23,13 @@
*/
#include <linux/prime_numbers.h>
+#include <linux/pm_qos.h>
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "i915_random.h"
@@ -51,6 +53,11 @@ static unsigned int num_uabi_engines(struct drm_i915_private *i915)
return count;
}
+static struct intel_engine_cs *rcs0(struct drm_i915_private *i915)
+{
+ return intel_engine_lookup_user(i915, I915_ENGINE_CLASS_RENDER, 0);
+}
+
static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -58,7 +65,7 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
- request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
+ request = mock_request(rcs0(i915)->kernel_context, HZ / 10);
if (!request)
return -ENOMEM;
@@ -76,7 +83,7 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
- request = mock_request(i915->engine[RCS0]->kernel_context, T);
+ request = mock_request(rcs0(i915)->kernel_context, T);
if (!request)
return -ENOMEM;
@@ -145,7 +152,7 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
- request = mock_request(i915->engine[RCS0]->kernel_context, T);
+ request = mock_request(rcs0(i915)->kernel_context, T);
if (!request)
return -ENOMEM;
@@ -420,7 +427,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
struct smoketest t = {
- .engine = i915->engine[RCS0],
+ .engine = rcs0(i915),
.ncontexts = 1024,
.max_batch = 1024,
.request_alloc = __mock_request_alloc
@@ -809,10 +816,12 @@ static int recursive_batch_resolve(struct i915_vma *batch)
return PTR_ERR(cmd);
*cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(batch->vm->gt);
+ __i915_gem_object_flush_map(batch->obj, 0, sizeof(*cmd));
i915_gem_object_unpin_map(batch->obj);
+ intel_gt_chipset_flush(batch->vm->gt);
+
return 0;
}
@@ -858,13 +867,6 @@ static int live_all_engines(void *arg)
goto out_request;
}
- err = engine->emit_bb_start(request[idx],
- batch->node.start,
- batch->node.size,
- 0);
- GEM_BUG_ON(err);
- request[idx]->batch = batch;
-
i915_vma_lock(batch);
err = i915_request_await_object(request[idx], batch->obj, 0);
if (err == 0)
@@ -872,6 +874,13 @@ static int live_all_engines(void *arg)
i915_vma_unlock(batch);
GEM_BUG_ON(err);
+ err = engine->emit_bb_start(request[idx],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[idx]->batch = batch;
+
i915_request_get(request[idx]);
i915_request_add(request[idx]);
idx++;
@@ -986,13 +995,6 @@ static int live_sequential_engines(void *arg)
}
}
- err = engine->emit_bb_start(request[idx],
- batch->node.start,
- batch->node.size,
- 0);
- GEM_BUG_ON(err);
- request[idx]->batch = batch;
-
i915_vma_lock(batch);
err = i915_request_await_object(request[idx],
batch->obj, false);
@@ -1001,6 +1003,13 @@ static int live_sequential_engines(void *arg)
i915_vma_unlock(batch);
GEM_BUG_ON(err);
+ err = engine->emit_bb_start(request[idx],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[idx]->batch = batch;
+
i915_request_get(request[idx]);
i915_request_add(request[idx]);
@@ -1053,9 +1062,12 @@ out_request:
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(engine->gt);
+ __i915_gem_object_flush_map(request[idx]->batch->obj,
+ 0, sizeof(*cmd));
i915_gem_object_unpin_map(request[idx]->batch->obj);
+
+ intel_gt_chipset_flush(engine->gt);
}
i915_vma_put(request[idx]->batch);
@@ -1233,7 +1245,7 @@ static int live_parallel_engines(void *arg)
struct igt_live_test t;
unsigned int idx;
- snprintf(name, sizeof(name), "%pS", fn);
+ snprintf(name, sizeof(name), "%ps", *fn);
err = igt_live_test_begin(&t, i915, __func__, name);
if (err)
break;
@@ -1470,3 +1482,572 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
return i915_subtests(tests, i915);
}
+
+static int switch_to_kernel_sync(struct intel_context *ce, int err)
+{
+ struct i915_request *rq;
+ struct dma_fence *fence;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ rq = i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ while (!err && !intel_engine_is_idle(ce->engine))
+ intel_engine_flush_submission(ce->engine);
+
+ return err;
+}
+
+struct perf_stats {
+ struct intel_engine_cs *engine;
+ unsigned long count;
+ ktime_t time;
+ ktime_t busy;
+ u64 runtime;
+};
+
+struct perf_series {
+ struct drm_i915_private *i915;
+ unsigned int nengines;
+ struct intel_context *ce[];
+};
+
+static int s_sync0(void *arg)
+{
+ struct perf_series *ps = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+ int err = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+
+ return err;
+}
+
+static int s_sync1(void *arg)
+{
+ struct perf_series *ps = arg;
+ struct i915_request *prev = NULL;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+ int err = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(prev);
+ prev = rq;
+ if (err)
+ break;
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(prev);
+
+ return err;
+}
+
+static int s_many(void *arg)
+{
+ struct perf_series *ps = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+
+ return 0;
+}
+
+static int perf_series_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ s_sync0,
+ s_sync1,
+ s_many,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct pm_qos_request qos;
+ struct perf_stats *stats;
+ struct perf_series *ps;
+ unsigned int idx;
+ int err = 0;
+
+ stats = kcalloc(nengines, sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ ps = kzalloc(struct_size(ps, ce, nengines), GFP_KERNEL);
+ if (!ps) {
+ kfree(stats);
+ return -ENOMEM;
+ }
+
+ cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
+
+ ps->i915 = i915;
+ ps->nengines = nengines;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ ps->ce[idx++] = ce;
+ }
+ GEM_BUG_ON(idx != ps->nengines);
+
+ for (fn = func; *fn && !err; fn++) {
+ char name[KSYM_NAME_LEN];
+ struct igt_live_test t;
+
+ snprintf(name, sizeof(name), "%ps", *fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
+ if (err)
+ break;
+
+ for (idx = 0; idx < nengines; idx++) {
+ struct perf_stats *p =
+ memset(&stats[idx], 0, sizeof(stats[idx]));
+ struct intel_context *ce = ps->ce[idx];
+
+ p->engine = ps->ce[idx]->engine;
+ intel_engine_pm_get(p->engine);
+
+ if (intel_engine_supports_stats(p->engine))
+ p->busy = intel_engine_get_busy_time(p->engine) + 1;
+ p->runtime = -intel_context_get_total_runtime_ns(ce);
+ p->time = ktime_get();
+ }
+
+ err = (*fn)(ps);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+
+ for (idx = 0; idx < nengines; idx++) {
+ struct perf_stats *p = &stats[idx];
+ struct intel_context *ce = ps->ce[idx];
+ int integer, decimal;
+ u64 busy, dt;
+
+ p->time = ktime_sub(ktime_get(), p->time);
+ if (p->busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(p->engine),
+ p->busy - 1);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime += intel_context_get_total_runtime_ns(ce);
+ intel_engine_pm_put(p->engine);
+
+ busy = 100 * ktime_to_ns(p->busy);
+ dt = ktime_to_ns(p->time);
+ if (dt) {
+ integer = div64_u64(busy, dt);
+ busy -= integer * dt;
+ decimal = div64_u64(100 * busy, dt);
+ } else {
+ integer = 0;
+ decimal = 0;
+ }
+
+ pr_info("%s %5s: { seqno:%d, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+ name, p->engine->name, ce->timeline->seqno,
+ integer, decimal,
+ div_u64(p->runtime, 1000 * 1000),
+ div_u64(ktime_to_ns(p->time), 1000 * 1000));
+ }
+ }
+
+out:
+ for (idx = 0; idx < nengines; idx++) {
+ if (IS_ERR_OR_NULL(ps->ce[idx]))
+ break;
+
+ intel_context_unpin(ps->ce[idx]);
+ intel_context_put(ps->ce[idx]);
+ }
+ kfree(ps);
+
+ cpu_latency_qos_remove_request(&qos);
+ kfree(stats);
+ return err;
+}
+
+static int p_sync0(void *arg)
+{
+ struct perf_stats *p = arg;
+ struct intel_engine_cs *engine = p->engine;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ bool busy;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
+ busy = false;
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine);
+ busy = true;
+ }
+
+ p->time = ktime_get();
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ p->time = ktime_sub(ktime_get(), p->time);
+
+ if (busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ p->busy);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ return err;
+}
+
+static int p_sync1(void *arg)
+{
+ struct perf_stats *p = arg;
+ struct intel_engine_cs *engine = p->engine;
+ struct i915_request *prev = NULL;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ bool busy;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
+ busy = false;
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine);
+ busy = true;
+ }
+
+ p->time = ktime_get();
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(prev);
+ prev = rq;
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(prev);
+ p->time = ktime_sub(ktime_get(), p->time);
+
+ if (busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ p->busy);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ return err;
+}
+
+static int p_many(void *arg)
+{
+ struct perf_stats *p = arg;
+ struct intel_engine_cs *engine = p->engine;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int err = 0;
+ bool busy;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
+ busy = false;
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine);
+ busy = true;
+ }
+
+ count = 0;
+ p->time = ktime_get();
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ p->time = ktime_sub(ktime_get(), p->time);
+
+ if (busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ p->busy);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ return err;
+}
+
+static int perf_parallel_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ p_sync0,
+ p_sync1,
+ p_many,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct pm_qos_request qos;
+ struct {
+ struct perf_stats p;
+ struct task_struct *tsk;
+ } *engines;
+ int err = 0;
+
+ engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
+ if (!engines)
+ return -ENOMEM;
+
+ cpu_latency_qos_add_request(&qos, 0);
+
+ for (fn = func; *fn; fn++) {
+ char name[KSYM_NAME_LEN];
+ struct igt_live_test t;
+ unsigned int idx;
+
+ snprintf(name, sizeof(name), "%ps", *fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
+ if (err)
+ break;
+
+ atomic_set(&i915->selftest.counter, nengines);
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ intel_engine_pm_get(engine);
+
+ memset(&engines[idx].p, 0, sizeof(engines[idx].p));
+ engines[idx].p.engine = engine;
+
+ engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
+ "igt:%s", engine->name);
+ if (IS_ERR(engines[idx].tsk)) {
+ err = PTR_ERR(engines[idx].tsk);
+ intel_engine_pm_put(engine);
+ break;
+ }
+ get_task_struct(engines[idx++].tsk);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ int status;
+
+ if (IS_ERR(engines[idx].tsk))
+ break;
+
+ status = kthread_stop(engines[idx].tsk);
+ if (status && !err)
+ err = status;
+
+ intel_engine_pm_put(engine);
+ put_task_struct(engines[idx++].tsk);
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct perf_stats *p = &engines[idx].p;
+ u64 busy = 100 * ktime_to_ns(p->busy);
+ u64 dt = ktime_to_ns(p->time);
+ int integer, decimal;
+
+ if (dt) {
+ integer = div64_u64(busy, dt);
+ busy -= integer * dt;
+ decimal = div64_u64(100 * busy, dt);
+ } else {
+ integer = 0;
+ decimal = 0;
+ }
+
+ GEM_BUG_ON(engine != p->engine);
+ pr_info("%s %5s: { count:%lu, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+ name, engine->name, p->count, integer, decimal,
+ div_u64(p->runtime, 1000 * 1000),
+ div_u64(ktime_to_ns(p->time), 1000 * 1000));
+ idx++;
+ }
+ }
+
+ cpu_latency_qos_remove_request(&qos);
+ kfree(engines);
+ return err;
+}
+
+int i915_request_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_series_engines),
+ SUBTEST(perf_parallel_engines),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index d3bf9eefb682..1bc11c09faef 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -396,6 +396,35 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...)
return true;
}
+void igt_hexdump(const void *buf, size_t len)
+{
+ const size_t rowsize = 8 * sizeof(u32);
+ const void *prev = NULL;
+ bool skip = false;
+ size_t pos;
+
+ for (pos = 0; pos < len; pos += rowsize) {
+ char line[128];
+
+ if (prev && !memcmp(prev, buf + pos, rowsize)) {
+ if (!skip) {
+ pr_info("*\n");
+ skip = true;
+ }
+ continue;
+ }
+
+ WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
+ rowsize, sizeof(u32),
+ line, sizeof(line),
+ false) >= sizeof(line));
+ pr_info("[%04zx] %s\n", pos, line);
+
+ prev = buf + pos;
+ skip = false;
+ }
+}
+
module_param_named(st_random_seed, i915_selftest.random_seed, uint, 0400);
module_param_named(st_timeout, i915_selftest.timeout_ms, uint, 0400);
module_param_named(st_filter, i915_selftest.filter, charp, 0400);
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 9ad4ab088466..e35ba5f9e73f 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -169,8 +169,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
intel_gt_chipset_flush(engine->gt);
- if (engine->emit_init_breadcrumb &&
- i915_request_timeline(rq)->has_initial_breadcrumb) {
+ if (engine->emit_init_breadcrumb) {
err = engine->emit_init_breadcrumb(rq);
if (err)
goto cancel_rq;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 2a1d4ba1f9f3..6e80d99048e4 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -594,8 +594,11 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
void *addr;
obj = i915_gem_object_create_region(mr, size, 0);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
+ return ERR_PTR(-ENODEV);
return obj;
+ }
addr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(addr)) {
diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c
new file mode 100644
index 000000000000..58710ac3f979
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/librapl.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <asm/msr.h>
+
+#include "librapl.h"
+
+u64 librapl_energy_uJ(void)
+{
+ unsigned long long power;
+ u32 units;
+
+ if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
+ return 0;
+
+ units = (power & 0x1f00) >> 8;
+
+ if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power))
+ return 0;
+
+ return (1000000 * power) >> units; /* convert to uJ */
+}
diff --git a/drivers/gpu/drm/i915/selftests/librapl.h b/drivers/gpu/drm/i915/selftests/librapl.h
new file mode 100644
index 000000000000..887f3e91dd05
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/librapl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SELFTEST_LIBRAPL_H
+#define SELFTEST_LIBRAPL_H
+
+#include <linux/types.h>
+
+u64 librapl_energy_uJ(void);
+
+#endif /* SELFTEST_LIBRAPL_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 754d0eb6beaa..9b105b811f1f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -25,6 +25,8 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_managed.h>
+
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
@@ -55,6 +57,9 @@ static void mock_device_release(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
+ if (!i915->do_release)
+ goto out;
+
mock_device_flush(i915);
intel_gt_driver_remove(&i915->gt);
@@ -71,8 +76,9 @@ static void mock_device_release(struct drm_device *dev)
drm_mode_config_cleanup(&i915->drm);
- drm_dev_fini(&i915->drm);
+out:
put_device(&i915->drm.pdev->dev);
+ i915->drm.pdev = NULL;
}
static struct drm_driver mock_driver = {
@@ -114,9 +120,14 @@ struct drm_i915_private *mock_gem_device(void)
struct pci_dev *pdev;
int err;
- pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL);
+ pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
if (!pdev)
- goto err;
+ return NULL;
+ i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
+ if (!i915) {
+ kfree(pdev);
+ return NULL;
+ }
device_initialize(&pdev->dev);
pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
@@ -129,7 +140,6 @@ struct drm_i915_private *mock_gem_device(void)
pdev->dev.archdata.iommu = (void *)-1;
#endif
- i915 = (struct drm_i915_private *)(pdev + 1);
pci_set_drvdata(pdev, i915);
dev_pm_domain_set(&pdev->dev, &pm_domain);
@@ -141,9 +151,13 @@ struct drm_i915_private *mock_gem_device(void)
err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
if (err) {
pr_err("Failed to initialise mock GEM device: err=%d\n", err);
- goto put_device;
+ put_device(&pdev->dev);
+ kfree(i915);
+
+ return NULL;
}
i915->drm.pdev = pdev;
+ drmm_add_final_kfree(&i915->drm, i915);
intel_runtime_pm_init_early(&i915->runtime_pm);
@@ -178,16 +192,18 @@ struct drm_i915_private *mock_gem_device(void)
mkwrite_device_info(i915)->engine_mask = BIT(0);
- i915->engine[RCS0] = mock_engine(i915, "mock", RCS0);
- if (!i915->engine[RCS0])
+ i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0);
+ if (!i915->gt.engine[RCS0])
goto err_unlock;
- if (mock_engine_init(i915->engine[RCS0]))
+ if (mock_engine_init(i915->gt.engine[RCS0]))
goto err_context;
__clear_bit(I915_WEDGED, &i915->gt.reset.flags);
intel_engines_driver_register(i915);
+ i915->do_release = true;
+
return i915;
err_context:
@@ -198,9 +214,7 @@ err_drv:
intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
- drm_dev_fini(&i915->drm);
-put_device:
- put_device(&pdev->dev);
-err:
+ drm_dev_put(&i915->drm);
+
return NULL;
}
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index f22cfbf9353e..ba4ca17fd4d8 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -18,6 +18,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
@@ -143,10 +144,6 @@ static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs =
.atomic_check = dw_hdmi_imx_atomic_check,
};
-static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static enum drm_mode_status
imx6q_hdmi_mode_valid(struct drm_connector *con,
const struct drm_display_mode *mode)
@@ -236,8 +233,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
return ret;
drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
platform_set_drvdata(pdev, hdmi);
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index da87c70e413b..2e38f1a5cf8d 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -42,12 +42,6 @@ void imx_drm_connector_destroy(struct drm_connector *connector)
}
EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
-void imx_drm_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy);
-
static int imx_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -139,8 +133,8 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
encoder->possible_crtcs = crtc_mask;
- /* FIXME: this is the mask of outputs which can clone this output. */
- encoder->possible_clones = ~0;
+ /* FIXME: cloning support not clear, disable it all for now */
+ encoder->possible_clones = 0;
return 0;
}
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index ab9c6f706eb3..c3e1a3f14d30 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -38,7 +38,6 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np);
void imx_drm_connector_destroy(struct drm_connector *connector);
-void imx_drm_encoder_destroy(struct drm_encoder *encoder);
int ipu_planes_assign_pre(struct drm_device *dev,
struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 4da22a94790c..66ea68e8da87 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -26,6 +26,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
@@ -393,10 +394,6 @@ static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs =
.best_encoder = imx_ldb_connector_best_encoder,
};
-static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
- .destroy = imx_drm_encoder_destroy,
-};
-
static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
.atomic_mode_set = imx_ldb_encoder_atomic_mode_set,
.enable = imx_ldb_encoder_enable,
@@ -441,8 +438,7 @@ static int imx_ldb_register(struct drm_device *drm,
}
drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS);
if (imx_ldb_ch->bridge) {
ret = drm_bridge_attach(&imx_ldb_ch->encoder,
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 5bbfaa2cd0f4..ee63782c77e9 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -21,6 +21,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
@@ -348,10 +349,6 @@ static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs =
.mode_valid = imx_tve_connector_mode_valid,
};
-static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
- .destroy = imx_drm_encoder_destroy,
-};
-
static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
.mode_set = imx_tve_encoder_mode_set,
.enable = imx_tve_encoder_enable,
@@ -479,8 +476,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
return ret;
drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
- drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs,
- encoder_type, NULL);
+ drm_simple_encoder_init(drm, &tve->encoder, encoder_type);
drm_connector_helper_add(&tve->connector,
&imx_tve_connector_helper_funcs);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 08fafa4bf8c2..ac916c84a631 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -18,6 +18,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
@@ -256,10 +257,6 @@ static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
.best_encoder = imx_pd_connector_best_encoder,
};
-static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
- .destroy = imx_drm_encoder_destroy,
-};
-
static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
.enable = imx_pd_bridge_enable,
.disable = imx_pd_bridge_disable,
@@ -288,8 +285,7 @@ static int imx_pd_register(struct drm_device *drm,
*/
imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
imxpd->bridge.funcs = &imx_pd_bridge_funcs;
drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index 548cc25ea4ab..55b49a31729b 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -23,11 +23,13 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#define JZ_REG_LCD_CFG 0x00
@@ -488,15 +490,6 @@ static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void ingenic_drm_release(struct drm_device *drm)
-{
- struct ingenic_drm *priv = drm_device_get_priv(drm);
-
- drm_mode_config_cleanup(drm);
- drm_dev_fini(drm);
- kfree(priv);
-}
-
static int ingenic_drm_enable_vblank(struct drm_crtc *crtc)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
@@ -540,7 +533,6 @@ static struct drm_driver ingenic_drm_driver_data = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.irq_handler = ingenic_drm_irq_handler,
- .release = ingenic_drm_release,
};
static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = {
@@ -592,10 +584,6 @@ static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static const struct drm_encoder_funcs ingenic_drm_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void ingenic_drm_free_dma_hwdesc(void *d)
{
struct ingenic_drm *priv = d;
@@ -623,24 +611,21 @@ static int ingenic_drm_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_dev_alloc(dev, &ingenic_drm_driver_data,
+ struct ingenic_drm, drm);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->soc_info = soc_info;
priv->dev = dev;
drm = &priv->drm;
- drm->dev_private = priv;
platform_set_drvdata(pdev, priv);
- ret = devm_drm_dev_init(dev, drm, &ingenic_drm_driver_data);
- if (ret) {
- kfree(priv);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
return ret;
- }
- drm_mode_config_init(drm);
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = soc_info->max_width;
@@ -661,10 +646,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Failed to get platform irq");
+ if (irq < 0)
return irq;
- }
if (soc_info->needs_dev_clk) {
priv->lcd_clk = devm_clk_get(dev, "lcd");
@@ -730,8 +713,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
drm_encoder_helper_add(&priv->encoder,
&ingenic_drm_encoder_helper_funcs);
- ret = drm_encoder_init(drm, &priv->encoder, &ingenic_drm_encoder_funcs,
- DRM_MODE_ENCODER_DPI, NULL);
+ ret = drm_simple_encoder_init(drm, &priv->encoder,
+ DRM_MODE_ENCODER_DPI);
if (ret) {
dev_err(dev, "Failed to init encoder: %i", ret);
return ret;
@@ -791,9 +774,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
goto err_devclk_disable;
}
- ret = drm_fbdev_generic_setup(drm, 32);
- if (ret)
- dev_warn(dev, "Unable to start fbdev emulation: %i", ret);
+ drm_fbdev_generic_setup(drm, 32);
return 0;
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
index d589f09d04d9..fa1d4f5df31e 100644
--- a/drivers/gpu/drm/lima/Kconfig
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -10,5 +10,7 @@ config DRM_LIMA
depends on OF
select DRM_SCHED
select DRM_GEM_SHMEM_HELPER
+ select PM_DEVFREQ
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
help
DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile
index a85444b0a1d4..ca2097b8e1ad 100644
--- a/drivers/gpu/drm/lima/Makefile
+++ b/drivers/gpu/drm/lima/Makefile
@@ -14,6 +14,8 @@ lima-y := \
lima_sched.o \
lima_ctx.o \
lima_dlbu.o \
- lima_bcast.o
+ lima_bcast.o \
+ lima_trace.o \
+ lima_devfreq.o
obj-$(CONFIG_DRM_LIMA) += lima.o
diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
index 288398027bfa..fbc43f243c54 100644
--- a/drivers/gpu/drm/lima/lima_bcast.c
+++ b/drivers/gpu/drm/lima/lima_bcast.c
@@ -26,18 +26,33 @@ void lima_bcast_enable(struct lima_device *dev, int num_pp)
bcast_write(LIMA_BCAST_BROADCAST_MASK, mask);
}
+static int lima_bcast_hw_init(struct lima_ip *ip)
+{
+ bcast_write(LIMA_BCAST_BROADCAST_MASK, ip->data.mask << 16);
+ bcast_write(LIMA_BCAST_INTERRUPT_MASK, ip->data.mask);
+ return 0;
+}
+
+int lima_bcast_resume(struct lima_ip *ip)
+{
+ return lima_bcast_hw_init(ip);
+}
+
+void lima_bcast_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_bcast_init(struct lima_ip *ip)
{
- int i, mask = 0;
+ int i;
for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) {
if (ip->dev->ip[i].present)
- mask |= 1 << (i - lima_ip_pp0);
+ ip->data.mask |= 1 << (i - lima_ip_pp0);
}
- bcast_write(LIMA_BCAST_BROADCAST_MASK, mask << 16);
- bcast_write(LIMA_BCAST_INTERRUPT_MASK, mask);
- return 0;
+ return lima_bcast_hw_init(ip);
}
void lima_bcast_fini(struct lima_ip *ip)
diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
index c47e58563d0a..465ee587bceb 100644
--- a/drivers/gpu/drm/lima/lima_bcast.h
+++ b/drivers/gpu/drm/lima/lima_bcast.h
@@ -6,6 +6,8 @@
struct lima_ip;
+int lima_bcast_resume(struct lima_ip *ip);
+void lima_bcast_suspend(struct lima_ip *ip);
int lima_bcast_init(struct lima_ip *ip);
void lima_bcast_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c
index 22fff6caa961..891d5cd5019a 100644
--- a/drivers/gpu/drm/lima/lima_ctx.c
+++ b/drivers/gpu/drm/lima/lima_ctx.c
@@ -27,6 +27,9 @@ int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
if (err < 0)
goto err_out0;
+ ctx->pid = task_pid_nr(current);
+ get_task_comm(ctx->pname, current);
+
return 0;
err_out0:
diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h
index 6154e5c9bfe4..74e2be09090f 100644
--- a/drivers/gpu/drm/lima/lima_ctx.h
+++ b/drivers/gpu/drm/lima/lima_ctx.h
@@ -5,6 +5,7 @@
#define __LIMA_CTX_H__
#include <linux/xarray.h>
+#include <linux/sched.h>
#include "lima_device.h"
@@ -13,6 +14,10 @@ struct lima_ctx {
struct lima_device *dev;
struct lima_sched_context context[lima_pipe_num];
atomic_t guilty;
+
+ /* debug info */
+ char pname[TASK_COMM_LEN];
+ pid_t pid;
};
struct lima_ctx_mgr {
diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c
new file mode 100644
index 000000000000..bbe02817721b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_devfreq.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * Based on panfrost_devfreq.c:
+ * Copyright 2019 Collabora ltd.
+ */
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/property.h>
+
+#include "lima_device.h"
+#include "lima_devfreq.h"
+
+static void lima_devfreq_update_utilization(struct lima_devfreq *devfreq)
+{
+ ktime_t now, last;
+
+ now = ktime_get();
+ last = devfreq->time_last_update;
+
+ if (devfreq->busy_count > 0)
+ devfreq->busy_time += ktime_sub(now, last);
+ else
+ devfreq->idle_time += ktime_sub(now, last);
+
+ devfreq->time_last_update = now;
+}
+
+static int lima_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct dev_pm_opp *opp;
+ int err;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+ dev_pm_opp_put(opp);
+
+ err = dev_pm_opp_set_rate(dev, *freq);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void lima_devfreq_reset(struct lima_devfreq *devfreq)
+{
+ devfreq->busy_time = 0;
+ devfreq->idle_time = 0;
+ devfreq->time_last_update = ktime_get();
+}
+
+static int lima_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ struct lima_devfreq *devfreq = &ldev->devfreq;
+ unsigned long irqflags;
+
+ status->current_frequency = clk_get_rate(ldev->clk_gpu);
+
+ spin_lock_irqsave(&devfreq->lock, irqflags);
+
+ lima_devfreq_update_utilization(devfreq);
+
+ status->total_time = ktime_to_ns(ktime_add(devfreq->busy_time,
+ devfreq->idle_time));
+ status->busy_time = ktime_to_ns(devfreq->busy_time);
+
+ lima_devfreq_reset(devfreq);
+
+ spin_unlock_irqrestore(&devfreq->lock, irqflags);
+
+ dev_dbg(ldev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
+ status->busy_time, status->total_time,
+ status->busy_time / (status->total_time / 100),
+ status->current_frequency / 1000 / 1000);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile lima_devfreq_profile = {
+ .polling_ms = 50, /* ~3 frames */
+ .target = lima_devfreq_target,
+ .get_dev_status = lima_devfreq_get_dev_status,
+};
+
+void lima_devfreq_fini(struct lima_device *ldev)
+{
+ struct lima_devfreq *devfreq = &ldev->devfreq;
+
+ if (devfreq->cooling) {
+ devfreq_cooling_unregister(devfreq->cooling);
+ devfreq->cooling = NULL;
+ }
+
+ if (devfreq->devfreq) {
+ devm_devfreq_remove_device(ldev->dev, devfreq->devfreq);
+ devfreq->devfreq = NULL;
+ }
+
+ if (devfreq->opp_of_table_added) {
+ dev_pm_opp_of_remove_table(ldev->dev);
+ devfreq->opp_of_table_added = false;
+ }
+
+ if (devfreq->regulators_opp_table) {
+ dev_pm_opp_put_regulators(devfreq->regulators_opp_table);
+ devfreq->regulators_opp_table = NULL;
+ }
+
+ if (devfreq->clkname_opp_table) {
+ dev_pm_opp_put_clkname(devfreq->clkname_opp_table);
+ devfreq->clkname_opp_table = NULL;
+ }
+}
+
+int lima_devfreq_init(struct lima_device *ldev)
+{
+ struct thermal_cooling_device *cooling;
+ struct device *dev = ldev->dev;
+ struct opp_table *opp_table;
+ struct devfreq *devfreq;
+ struct lima_devfreq *ldevfreq = &ldev->devfreq;
+ struct dev_pm_opp *opp;
+ unsigned long cur_freq;
+ int ret;
+
+ if (!device_property_present(dev, "operating-points-v2"))
+ /* Optional, continue without devfreq */
+ return 0;
+
+ spin_lock_init(&ldevfreq->lock);
+
+ opp_table = dev_pm_opp_set_clkname(dev, "core");
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+ goto err_fini;
+ }
+
+ ldevfreq->clkname_opp_table = opp_table;
+
+ opp_table = dev_pm_opp_set_regulators(dev,
+ (const char *[]){ "mali" },
+ 1);
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+
+ /* Continue if the optional regulator is missing */
+ if (ret != -ENODEV)
+ goto err_fini;
+ } else {
+ ldevfreq->regulators_opp_table = opp_table;
+ }
+
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret)
+ goto err_fini;
+ ldevfreq->opp_of_table_added = true;
+
+ lima_devfreq_reset(ldevfreq);
+
+ cur_freq = clk_get_rate(ldev->clk_gpu);
+
+ opp = devfreq_recommended_opp(dev, &cur_freq, 0);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto err_fini;
+ }
+
+ lima_devfreq_profile.initial_freq = cur_freq;
+ dev_pm_opp_put(opp);
+
+ devfreq = devm_devfreq_add_device(dev, &lima_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+ if (IS_ERR(devfreq)) {
+ dev_err(dev, "Couldn't initialize GPU devfreq\n");
+ ret = PTR_ERR(devfreq);
+ goto err_fini;
+ }
+
+ ldevfreq->devfreq = devfreq;
+
+ cooling = of_devfreq_cooling_register(dev->of_node, devfreq);
+ if (IS_ERR(cooling))
+ dev_info(dev, "Failed to register cooling device\n");
+ else
+ ldevfreq->cooling = cooling;
+
+ return 0;
+
+err_fini:
+ lima_devfreq_fini(ldev);
+ return ret;
+}
+
+void lima_devfreq_record_busy(struct lima_devfreq *devfreq)
+{
+ unsigned long irqflags;
+
+ if (!devfreq->devfreq)
+ return;
+
+ spin_lock_irqsave(&devfreq->lock, irqflags);
+
+ lima_devfreq_update_utilization(devfreq);
+
+ devfreq->busy_count++;
+
+ spin_unlock_irqrestore(&devfreq->lock, irqflags);
+}
+
+void lima_devfreq_record_idle(struct lima_devfreq *devfreq)
+{
+ unsigned long irqflags;
+
+ if (!devfreq->devfreq)
+ return;
+
+ spin_lock_irqsave(&devfreq->lock, irqflags);
+
+ lima_devfreq_update_utilization(devfreq);
+
+ WARN_ON(--devfreq->busy_count < 0);
+
+ spin_unlock_irqrestore(&devfreq->lock, irqflags);
+}
+
+int lima_devfreq_resume(struct lima_devfreq *devfreq)
+{
+ unsigned long irqflags;
+
+ if (!devfreq->devfreq)
+ return 0;
+
+ spin_lock_irqsave(&devfreq->lock, irqflags);
+
+ lima_devfreq_reset(devfreq);
+
+ spin_unlock_irqrestore(&devfreq->lock, irqflags);
+
+ return devfreq_resume_device(devfreq->devfreq);
+}
+
+int lima_devfreq_suspend(struct lima_devfreq *devfreq)
+{
+ if (!devfreq->devfreq)
+ return 0;
+
+ return devfreq_suspend_device(devfreq->devfreq);
+}
diff --git a/drivers/gpu/drm/lima/lima_devfreq.h b/drivers/gpu/drm/lima/lima_devfreq.h
new file mode 100644
index 000000000000..5eed2975a375
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_devfreq.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com> */
+
+#ifndef __LIMA_DEVFREQ_H__
+#define __LIMA_DEVFREQ_H__
+
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+
+struct devfreq;
+struct opp_table;
+struct thermal_cooling_device;
+
+struct lima_device;
+
+struct lima_devfreq {
+ struct devfreq *devfreq;
+ struct opp_table *clkname_opp_table;
+ struct opp_table *regulators_opp_table;
+ struct thermal_cooling_device *cooling;
+ bool opp_of_table_added;
+
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ int busy_count;
+ /*
+ * Protect busy_time, idle_time, time_last_update and busy_count
+ * because these can be updated concurrently, for example by the GP
+ * and PP interrupts.
+ */
+ spinlock_t lock;
+};
+
+int lima_devfreq_init(struct lima_device *ldev);
+void lima_devfreq_fini(struct lima_device *ldev);
+
+void lima_devfreq_record_busy(struct lima_devfreq *devfreq);
+void lima_devfreq_record_idle(struct lima_devfreq *devfreq);
+
+int lima_devfreq_resume(struct lima_devfreq *devfreq);
+int lima_devfreq_suspend(struct lima_devfreq *devfreq);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index 19829b543024..65fdca366e41 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -25,6 +25,8 @@ struct lima_ip_desc {
int (*init)(struct lima_ip *ip);
void (*fini)(struct lima_ip *ip);
+ int (*resume)(struct lima_ip *ip);
+ void (*suspend)(struct lima_ip *ip);
};
#define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \
@@ -41,6 +43,8 @@ struct lima_ip_desc {
}, \
.init = lima_##func##_init, \
.fini = lima_##func##_fini, \
+ .resume = lima_##func##_resume, \
+ .suspend = lima_##func##_suspend, \
}
static struct lima_ip_desc lima_ip_desc[lima_ip_num] = {
@@ -77,26 +81,10 @@ const char *lima_ip_name(struct lima_ip *ip)
return lima_ip_desc[ip->id].name;
}
-static int lima_clk_init(struct lima_device *dev)
+static int lima_clk_enable(struct lima_device *dev)
{
int err;
- dev->clk_bus = devm_clk_get(dev->dev, "bus");
- if (IS_ERR(dev->clk_bus)) {
- err = PTR_ERR(dev->clk_bus);
- if (err != -EPROBE_DEFER)
- dev_err(dev->dev, "get bus clk failed %d\n", err);
- return err;
- }
-
- dev->clk_gpu = devm_clk_get(dev->dev, "core");
- if (IS_ERR(dev->clk_gpu)) {
- err = PTR_ERR(dev->clk_gpu);
- if (err != -EPROBE_DEFER)
- dev_err(dev->dev, "get core clk failed %d\n", err);
- return err;
- }
-
err = clk_prepare_enable(dev->clk_bus);
if (err)
return err;
@@ -105,15 +93,7 @@ static int lima_clk_init(struct lima_device *dev)
if (err)
goto error_out0;
- dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
-
- if (IS_ERR(dev->reset)) {
- err = PTR_ERR(dev->reset);
- if (err != -EPROBE_DEFER)
- dev_err(dev->dev, "get reset controller failed %d\n",
- err);
- goto error_out1;
- } else if (dev->reset != NULL) {
+ if (dev->reset) {
err = reset_control_deassert(dev->reset);
if (err) {
dev_err(dev->dev,
@@ -131,14 +111,76 @@ error_out0:
return err;
}
-static void lima_clk_fini(struct lima_device *dev)
+static void lima_clk_disable(struct lima_device *dev)
{
- if (dev->reset != NULL)
+ if (dev->reset)
reset_control_assert(dev->reset);
clk_disable_unprepare(dev->clk_gpu);
clk_disable_unprepare(dev->clk_bus);
}
+static int lima_clk_init(struct lima_device *dev)
+{
+ int err;
+
+ dev->clk_bus = devm_clk_get(dev->dev, "bus");
+ if (IS_ERR(dev->clk_bus)) {
+ err = PTR_ERR(dev->clk_bus);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get bus clk failed %d\n", err);
+ dev->clk_bus = NULL;
+ return err;
+ }
+
+ dev->clk_gpu = devm_clk_get(dev->dev, "core");
+ if (IS_ERR(dev->clk_gpu)) {
+ err = PTR_ERR(dev->clk_gpu);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get core clk failed %d\n", err);
+ dev->clk_gpu = NULL;
+ return err;
+ }
+
+ dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
+ if (IS_ERR(dev->reset)) {
+ err = PTR_ERR(dev->reset);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get reset controller failed %d\n",
+ err);
+ dev->reset = NULL;
+ return err;
+ }
+
+ return lima_clk_enable(dev);
+}
+
+static void lima_clk_fini(struct lima_device *dev)
+{
+ lima_clk_disable(dev);
+}
+
+static int lima_regulator_enable(struct lima_device *dev)
+{
+ int ret;
+
+ if (!dev->regulator)
+ return 0;
+
+ ret = regulator_enable(dev->regulator);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lima_regulator_disable(struct lima_device *dev)
+{
+ if (dev->regulator)
+ regulator_disable(dev->regulator);
+}
+
static int lima_regulator_init(struct lima_device *dev)
{
int ret;
@@ -154,25 +196,20 @@ static int lima_regulator_init(struct lima_device *dev)
return ret;
}
- ret = regulator_enable(dev->regulator);
- if (ret < 0) {
- dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return lima_regulator_enable(dev);
}
static void lima_regulator_fini(struct lima_device *dev)
{
- if (dev->regulator)
- regulator_disable(dev->regulator);
+ lima_regulator_disable(dev);
}
static int lima_init_ip(struct lima_device *dev, int index)
{
+ struct platform_device *pdev = to_platform_device(dev->dev);
struct lima_ip_desc *desc = lima_ip_desc + index;
struct lima_ip *ip = dev->ip + index;
+ const char *irq_name = desc->irq_name;
int offset = desc->offset[dev->id];
bool must = desc->must_have[dev->id];
int err;
@@ -183,8 +220,9 @@ static int lima_init_ip(struct lima_device *dev, int index)
ip->dev = dev;
ip->id = index;
ip->iomem = dev->iomem + offset;
- if (desc->irq_name) {
- err = platform_get_irq_byname(dev->pdev, desc->irq_name);
+ if (irq_name) {
+ err = must ? platform_get_irq_byname(pdev, irq_name) :
+ platform_get_irq_byname_optional(pdev, irq_name);
if (err < 0)
goto out;
ip->irq = err;
@@ -209,11 +247,34 @@ static void lima_fini_ip(struct lima_device *ldev, int index)
desc->fini(ip);
}
+static int lima_resume_ip(struct lima_device *ldev, int index)
+{
+ struct lima_ip_desc *desc = lima_ip_desc + index;
+ struct lima_ip *ip = ldev->ip + index;
+ int ret = 0;
+
+ if (ip->present)
+ ret = desc->resume(ip);
+
+ return ret;
+}
+
+static void lima_suspend_ip(struct lima_device *ldev, int index)
+{
+ struct lima_ip_desc *desc = lima_ip_desc + index;
+ struct lima_ip *ip = ldev->ip + index;
+
+ if (ip->present)
+ desc->suspend(ip);
+}
+
static int lima_init_gp_pipe(struct lima_device *dev)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
int err;
+ pipe->ldev = dev;
+
err = lima_sched_pipe_init(pipe, "gp");
if (err)
return err;
@@ -244,6 +305,8 @@ static int lima_init_pp_pipe(struct lima_device *dev)
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
int err, i;
+ pipe->ldev = dev;
+
err = lima_sched_pipe_init(pipe, "pp");
if (err)
return err;
@@ -290,8 +353,8 @@ static void lima_fini_pp_pipe(struct lima_device *dev)
int lima_device_init(struct lima_device *ldev)
{
+ struct platform_device *pdev = to_platform_device(ldev->dev);
int err, i;
- struct resource *res;
dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
@@ -322,8 +385,7 @@ int lima_device_init(struct lima_device *ldev)
} else
ldev->va_end = LIMA_VA_RESERVE_END;
- res = platform_get_resource(ldev->pdev, IORESOURCE_MEM, 0);
- ldev->iomem = devm_ioremap_resource(ldev->dev, res);
+ ldev->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ldev->iomem)) {
dev_err(ldev->dev, "fail to ioremap iomem\n");
err = PTR_ERR(ldev->iomem);
@@ -344,6 +406,12 @@ int lima_device_init(struct lima_device *ldev)
if (err)
goto err_out5;
+ ldev->dump.magic = LIMA_DUMP_MAGIC;
+ ldev->dump.version_major = LIMA_DUMP_MAJOR;
+ ldev->dump.version_minor = LIMA_DUMP_MINOR;
+ INIT_LIST_HEAD(&ldev->error_task_list);
+ mutex_init(&ldev->error_task_list_lock);
+
dev_info(ldev->dev, "bus rate = %lu\n", clk_get_rate(ldev->clk_bus));
dev_info(ldev->dev, "mod rate = %lu", clk_get_rate(ldev->clk_gpu));
@@ -370,6 +438,13 @@ err_out0:
void lima_device_fini(struct lima_device *ldev)
{
int i;
+ struct lima_sched_error_task *et, *tmp;
+
+ list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) {
+ list_del(&et->list);
+ kvfree(et);
+ }
+ mutex_destroy(&ldev->error_task_list_lock);
lima_fini_pp_pipe(ldev);
lima_fini_gp_pipe(ldev);
@@ -387,3 +462,72 @@ void lima_device_fini(struct lima_device *ldev)
lima_clk_fini(ldev);
}
+
+int lima_device_resume(struct device *dev)
+{
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ int i, err;
+
+ err = lima_clk_enable(ldev);
+ if (err) {
+ dev_err(dev, "resume clk fail %d\n", err);
+ return err;
+ }
+
+ err = lima_regulator_enable(ldev);
+ if (err) {
+ dev_err(dev, "resume regulator fail %d\n", err);
+ goto err_out0;
+ }
+
+ for (i = 0; i < lima_ip_num; i++) {
+ err = lima_resume_ip(ldev, i);
+ if (err) {
+ dev_err(dev, "resume ip %d fail\n", i);
+ goto err_out1;
+ }
+ }
+
+ err = lima_devfreq_resume(&ldev->devfreq);
+ if (err) {
+ dev_err(dev, "devfreq resume fail\n");
+ goto err_out1;
+ }
+
+ return 0;
+
+err_out1:
+ while (--i >= 0)
+ lima_suspend_ip(ldev, i);
+ lima_regulator_disable(ldev);
+err_out0:
+ lima_clk_disable(ldev);
+ return err;
+}
+
+int lima_device_suspend(struct device *dev)
+{
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ int i, err;
+
+ /* check any task running */
+ for (i = 0; i < lima_pipe_num; i++) {
+ if (atomic_read(&ldev->pipe[i].base.hw_rq_count))
+ return -EBUSY;
+ }
+
+ err = lima_devfreq_suspend(&ldev->devfreq);
+ if (err) {
+ dev_err(dev, "devfreq suspend fail\n");
+ return err;
+ }
+
+ for (i = lima_ip_num - 1; i >= 0; i--)
+ lima_suspend_ip(ldev, i);
+
+ lima_regulator_disable(ldev);
+
+ lima_clk_disable(ldev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/lima/lima_device.h b/drivers/gpu/drm/lima/lima_device.h
index 31158d86271c..41b9d7b4bcc7 100644
--- a/drivers/gpu/drm/lima/lima_device.h
+++ b/drivers/gpu/drm/lima/lima_device.h
@@ -6,8 +6,12 @@
#include <drm/drm_device.h>
#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include "lima_sched.h"
+#include "lima_dump.h"
+#include "lima_devfreq.h"
enum lima_gpu_id {
lima_gpu_mali400 = 0,
@@ -60,6 +64,8 @@ struct lima_ip {
bool async_reset;
/* l2 cache */
spinlock_t lock;
+ /* pmu/bcast */
+ u32 mask;
} data;
};
@@ -72,7 +78,6 @@ enum lima_pipe_id {
struct lima_device {
struct device *dev;
struct drm_device *ddev;
- struct platform_device *pdev;
enum lima_gpu_id id;
u32 gp_version;
@@ -94,6 +99,13 @@ struct lima_device {
u32 *dlbu_cpu;
dma_addr_t dlbu_dma;
+
+ struct lima_devfreq devfreq;
+
+ /* debug info */
+ struct lima_dump_head dump;
+ struct list_head error_task_list;
+ struct mutex error_task_list_lock;
};
static inline struct lima_device *
@@ -128,4 +140,7 @@ static inline int lima_poll_timeout(struct lima_ip *ip, lima_poll_func_t func,
return 0;
}
+int lima_device_suspend(struct device *dev);
+int lima_device_resume(struct device *dev);
+
#endif
diff --git a/drivers/gpu/drm/lima/lima_dlbu.c b/drivers/gpu/drm/lima/lima_dlbu.c
index 8399ceffb94b..c1d5ea35daa7 100644
--- a/drivers/gpu/drm/lima/lima_dlbu.c
+++ b/drivers/gpu/drm/lima/lima_dlbu.c
@@ -42,7 +42,7 @@ void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
dlbu_write(LIMA_DLBU_START_TILE_POS, reg[3]);
}
-int lima_dlbu_init(struct lima_ip *ip)
+static int lima_dlbu_hw_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
@@ -52,6 +52,21 @@ int lima_dlbu_init(struct lima_ip *ip)
return 0;
}
+int lima_dlbu_resume(struct lima_ip *ip)
+{
+ return lima_dlbu_hw_init(ip);
+}
+
+void lima_dlbu_suspend(struct lima_ip *ip)
+{
+
+}
+
+int lima_dlbu_init(struct lima_ip *ip)
+{
+ return lima_dlbu_hw_init(ip);
+}
+
void lima_dlbu_fini(struct lima_ip *ip)
{
diff --git a/drivers/gpu/drm/lima/lima_dlbu.h b/drivers/gpu/drm/lima/lima_dlbu.h
index 16f877984466..be71daaaee89 100644
--- a/drivers/gpu/drm/lima/lima_dlbu.h
+++ b/drivers/gpu/drm/lima/lima_dlbu.h
@@ -12,6 +12,8 @@ void lima_dlbu_disable(struct lima_device *dev);
void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
+int lima_dlbu_resume(struct lima_ip *ip);
+void lima_dlbu_suspend(struct lima_ip *ip);
int lima_dlbu_init(struct lima_ip *ip);
void lima_dlbu_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 2daac64d8955..a831565af813 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -5,17 +5,20 @@
#include <linux/of_platform.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
#include <drm/lima_drm.h>
+#include "lima_device.h"
#include "lima_drv.h"
#include "lima_gem.h"
#include "lima_vm.h"
int lima_sched_timeout_ms;
uint lima_heap_init_nr_pages = 8;
+uint lima_max_error_tasks;
MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms");
module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
@@ -23,6 +26,9 @@ module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages");
module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444);
+MODULE_PARM_DESC(max_error_tasks, "max number of error tasks to save");
+module_param_named(max_error_tasks, lima_max_error_tasks, uint, 0644);
+
static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_get_param *args = data;
@@ -272,6 +278,93 @@ static struct drm_driver lima_drm_driver = {
.gem_prime_mmap = drm_gem_prime_mmap,
};
+struct lima_block_reader {
+ void *dst;
+ size_t base;
+ size_t count;
+ size_t off;
+ ssize_t read;
+};
+
+static bool lima_read_block(struct lima_block_reader *reader,
+ void *src, size_t src_size)
+{
+ size_t max_off = reader->base + src_size;
+
+ if (reader->off < max_off) {
+ size_t size = min_t(size_t, max_off - reader->off,
+ reader->count);
+
+ memcpy(reader->dst, src + (reader->off - reader->base), size);
+
+ reader->dst += size;
+ reader->off += size;
+ reader->read += size;
+ reader->count -= size;
+ }
+
+ reader->base = max_off;
+
+ return !!reader->count;
+}
+
+static ssize_t lima_error_state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ struct lima_sched_error_task *et;
+ struct lima_block_reader reader = {
+ .dst = buf,
+ .count = count,
+ .off = off,
+ };
+
+ mutex_lock(&ldev->error_task_list_lock);
+
+ if (lima_read_block(&reader, &ldev->dump, sizeof(ldev->dump))) {
+ list_for_each_entry(et, &ldev->error_task_list, list) {
+ if (!lima_read_block(&reader, et->data, et->size))
+ break;
+ }
+ }
+
+ mutex_unlock(&ldev->error_task_list_lock);
+ return reader.read;
+}
+
+static ssize_t lima_error_state_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ struct lima_sched_error_task *et, *tmp;
+
+ mutex_lock(&ldev->error_task_list_lock);
+
+ list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) {
+ list_del(&et->list);
+ kvfree(et);
+ }
+
+ ldev->dump.size = 0;
+ ldev->dump.num_tasks = 0;
+
+ mutex_unlock(&ldev->error_task_list_lock);
+
+ return count;
+}
+
+static const struct bin_attribute lima_error_state_attr = {
+ .attr.name = "error",
+ .attr.mode = 0600,
+ .size = 0,
+ .read = lima_error_state_read,
+ .write = lima_error_state_write,
+};
+
static int lima_pdev_probe(struct platform_device *pdev)
{
struct lima_device *ldev;
@@ -288,7 +381,6 @@ static int lima_pdev_probe(struct platform_device *pdev)
goto err_out0;
}
- ldev->pdev = pdev;
ldev->dev = &pdev->dev;
ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
@@ -306,16 +398,34 @@ static int lima_pdev_probe(struct platform_device *pdev)
if (err)
goto err_out1;
+ err = lima_devfreq_init(ldev);
+ if (err) {
+ dev_err(&pdev->dev, "Fatal error during devfreq init\n");
+ goto err_out2;
+ }
+
+ pm_runtime_set_active(ldev->dev);
+ pm_runtime_mark_last_busy(ldev->dev);
+ pm_runtime_set_autosuspend_delay(ldev->dev, 200);
+ pm_runtime_use_autosuspend(ldev->dev);
+ pm_runtime_enable(ldev->dev);
+
/*
* Register the DRM device with the core and the connectors with
* sysfs.
*/
err = drm_dev_register(ddev, 0);
if (err < 0)
- goto err_out2;
+ goto err_out3;
+
+ if (sysfs_create_bin_file(&ldev->dev->kobj, &lima_error_state_attr))
+ dev_warn(ldev->dev, "fail to create error state sysfs\n");
return 0;
+err_out3:
+ pm_runtime_disable(ldev->dev);
+ lima_devfreq_fini(ldev);
err_out2:
lima_device_fini(ldev);
err_out1:
@@ -330,8 +440,17 @@ static int lima_pdev_remove(struct platform_device *pdev)
struct lima_device *ldev = platform_get_drvdata(pdev);
struct drm_device *ddev = ldev->ddev;
+ sysfs_remove_bin_file(&ldev->dev->kobj, &lima_error_state_attr);
+
drm_dev_unregister(ddev);
+
+ /* stop autosuspend to make sure device is in active state */
+ pm_runtime_set_autosuspend_delay(ldev->dev, -1);
+ pm_runtime_disable(ldev->dev);
+
+ lima_devfreq_fini(ldev);
lima_device_fini(ldev);
+
drm_dev_put(ddev);
lima_sched_slab_fini();
return 0;
@@ -344,26 +463,22 @@ static const struct of_device_id dt_match[] = {
};
MODULE_DEVICE_TABLE(of, dt_match);
+static const struct dev_pm_ops lima_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(lima_device_suspend, lima_device_resume, NULL)
+};
+
static struct platform_driver lima_platform_driver = {
.probe = lima_pdev_probe,
.remove = lima_pdev_remove,
.driver = {
.name = "lima",
+ .pm = &lima_pm_ops,
.of_match_table = dt_match,
},
};
-static int __init lima_init(void)
-{
- return platform_driver_register(&lima_platform_driver);
-}
-module_init(lima_init);
-
-static void __exit lima_exit(void)
-{
- platform_driver_unregister(&lima_platform_driver);
-}
-module_exit(lima_exit);
+module_platform_driver(lima_platform_driver);
MODULE_AUTHOR("Lima Project Developers");
MODULE_DESCRIPTION("Lima DRM Driver");
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
index f492ecc6a5d9..fdbd4077c768 100644
--- a/drivers/gpu/drm/lima/lima_drv.h
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -10,6 +10,7 @@
extern int lima_sched_timeout_ms;
extern uint lima_heap_init_nr_pages;
+extern uint lima_max_error_tasks;
struct lima_vm;
struct lima_bo;
diff --git a/drivers/gpu/drm/lima/lima_dump.h b/drivers/gpu/drm/lima/lima_dump.h
new file mode 100644
index 000000000000..ca243d99c51b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_dump.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DUMP_H__
+#define __LIMA_DUMP_H__
+
+#include <linux/types.h>
+
+/**
+ * dump file format for all the information to start a lima task
+ *
+ * top level format
+ * | magic code "LIMA" | format version | num tasks | data size |
+ * | reserved | reserved | reserved | reserved |
+ * | task 1 ID | task 1 size | num chunks | reserved | task 1 data |
+ * | task 2 ID | task 2 size | num chunks | reserved | task 2 data |
+ * ...
+ *
+ * task data format
+ * | chunk 1 ID | chunk 1 size | reserved | reserved | chunk 1 data |
+ * | chunk 2 ID | chunk 2 size | reserved | reserved | chunk 2 data |
+ * ...
+ *
+ */
+
+#define LIMA_DUMP_MAJOR 1
+#define LIMA_DUMP_MINOR 0
+
+#define LIMA_DUMP_MAGIC 0x414d494c
+
+struct lima_dump_head {
+ __u32 magic;
+ __u16 version_major;
+ __u16 version_minor;
+ __u32 num_tasks;
+ __u32 size;
+ __u32 reserved[4];
+};
+
+#define LIMA_DUMP_TASK_GP 0
+#define LIMA_DUMP_TASK_PP 1
+#define LIMA_DUMP_TASK_NUM 2
+
+struct lima_dump_task {
+ __u32 id;
+ __u32 size;
+ __u32 num_chunks;
+ __u32 reserved;
+};
+
+#define LIMA_DUMP_CHUNK_FRAME 0
+#define LIMA_DUMP_CHUNK_BUFFER 1
+#define LIMA_DUMP_CHUNK_PROCESS_NAME 2
+#define LIMA_DUMP_CHUNK_PROCESS_ID 3
+#define LIMA_DUMP_CHUNK_NUM 4
+
+struct lima_dump_chunk {
+ __u32 id;
+ __u32 size;
+ __u32 reserved[2];
+};
+
+struct lima_dump_chunk_buffer {
+ __u32 id;
+ __u32 size;
+ __u32 va;
+ __u32 reserved;
+};
+
+struct lima_dump_chunk_pid {
+ __u32 id;
+ __u32 size;
+ __u32 pid;
+ __u32 reserved;
+};
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index d8841c870d90..8dd501b7a3d0 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -274,6 +274,23 @@ static void lima_gp_print_version(struct lima_ip *ip)
static struct kmem_cache *lima_gp_task_slab;
static int lima_gp_task_slab_refcnt;
+static int lima_gp_hw_init(struct lima_ip *ip)
+{
+ ip->data.async_reset = false;
+ lima_gp_soft_reset_async(ip);
+ return lima_gp_soft_reset_async_wait(ip);
+}
+
+int lima_gp_resume(struct lima_ip *ip)
+{
+ return lima_gp_hw_init(ip);
+}
+
+void lima_gp_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_gp_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
@@ -281,9 +298,7 @@ int lima_gp_init(struct lima_ip *ip)
lima_gp_print_version(ip);
- ip->data.async_reset = false;
- lima_gp_soft_reset_async(ip);
- err = lima_gp_soft_reset_async_wait(ip);
+ err = lima_gp_hw_init(ip);
if (err)
return err;
diff --git a/drivers/gpu/drm/lima/lima_gp.h b/drivers/gpu/drm/lima/lima_gp.h
index 516e5c1babbb..02ec9af78a51 100644
--- a/drivers/gpu/drm/lima/lima_gp.h
+++ b/drivers/gpu/drm/lima/lima_gp.h
@@ -7,6 +7,8 @@
struct lima_ip;
struct lima_device;
+int lima_gp_resume(struct lima_ip *ip);
+void lima_gp_suspend(struct lima_ip *ip);
int lima_gp_init(struct lima_ip *ip);
void lima_gp_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c
index 6873a7af5a5c..c4080a02957b 100644
--- a/drivers/gpu/drm/lima/lima_l2_cache.c
+++ b/drivers/gpu/drm/lima/lima_l2_cache.c
@@ -38,9 +38,35 @@ int lima_l2_cache_flush(struct lima_ip *ip)
return ret;
}
+static int lima_l2_cache_hw_init(struct lima_ip *ip)
+{
+ int err;
+
+ err = lima_l2_cache_flush(ip);
+ if (err)
+ return err;
+
+ l2_cache_write(LIMA_L2_CACHE_ENABLE,
+ LIMA_L2_CACHE_ENABLE_ACCESS |
+ LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
+ l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
+
+ return 0;
+}
+
+int lima_l2_cache_resume(struct lima_ip *ip)
+{
+ return lima_l2_cache_hw_init(ip);
+}
+
+void lima_l2_cache_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_l2_cache_init(struct lima_ip *ip)
{
- int i, err;
+ int i;
u32 size;
struct lima_device *dev = ip->dev;
@@ -63,15 +89,7 @@ int lima_l2_cache_init(struct lima_ip *ip)
1 << (size & 0xff),
1 << ((size >> 24) & 0xff));
- err = lima_l2_cache_flush(ip);
- if (err)
- return err;
-
- l2_cache_write(LIMA_L2_CACHE_ENABLE,
- LIMA_L2_CACHE_ENABLE_ACCESS|LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
- l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
-
- return 0;
+ return lima_l2_cache_hw_init(ip);
}
void lima_l2_cache_fini(struct lima_ip *ip)
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.h b/drivers/gpu/drm/lima/lima_l2_cache.h
index c63fb676ff14..1aeeefd53fb9 100644
--- a/drivers/gpu/drm/lima/lima_l2_cache.h
+++ b/drivers/gpu/drm/lima/lima_l2_cache.h
@@ -6,6 +6,8 @@
struct lima_ip;
+int lima_l2_cache_resume(struct lima_ip *ip);
+void lima_l2_cache_suspend(struct lima_ip *ip);
int lima_l2_cache_init(struct lima_ip *ip);
void lima_l2_cache_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index f79d2af427e7..a1ae6c252dc2 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -59,12 +59,44 @@ static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-int lima_mmu_init(struct lima_ip *ip)
+static int lima_mmu_hw_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
int err;
u32 v;
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
+ err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
+ LIMA_MMU_DTE_ADDR, v, v == 0);
+ if (err)
+ return err;
+
+ mmu_write(LIMA_MMU_INT_MASK,
+ LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+ mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
+ return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+ LIMA_MMU_STATUS, v,
+ v & LIMA_MMU_STATUS_PAGING_ENABLED);
+}
+
+int lima_mmu_resume(struct lima_ip *ip)
+{
+ if (ip->id == lima_ip_ppmmu_bcast)
+ return 0;
+
+ return lima_mmu_hw_init(ip);
+}
+
+void lima_mmu_suspend(struct lima_ip *ip)
+{
+
+}
+
+int lima_mmu_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+
if (ip->id == lima_ip_ppmmu_bcast)
return 0;
@@ -74,12 +106,6 @@ int lima_mmu_init(struct lima_ip *ip)
return -EIO;
}
- mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
- err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
- LIMA_MMU_DTE_ADDR, v, v == 0);
- if (err)
- return err;
-
err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
@@ -87,11 +113,7 @@ int lima_mmu_init(struct lima_ip *ip)
return err;
}
- mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
- mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
- return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
- LIMA_MMU_STATUS, v,
- v & LIMA_MMU_STATUS_PAGING_ENABLED);
+ return lima_mmu_hw_init(ip);
}
void lima_mmu_fini(struct lima_ip *ip)
@@ -113,8 +135,7 @@ void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
LIMA_MMU_STATUS, v,
v & LIMA_MMU_STATUS_STALL_ACTIVE);
- if (vm)
- mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
+ mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
/* flush the TLB */
mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
diff --git a/drivers/gpu/drm/lima/lima_mmu.h b/drivers/gpu/drm/lima/lima_mmu.h
index 4f8ccbebcba1..f0c97ac75ea0 100644
--- a/drivers/gpu/drm/lima/lima_mmu.h
+++ b/drivers/gpu/drm/lima/lima_mmu.h
@@ -7,6 +7,8 @@
struct lima_ip;
struct lima_vm;
+int lima_mmu_resume(struct lima_ip *ip);
+void lima_mmu_suspend(struct lima_ip *ip);
int lima_mmu_init(struct lima_ip *ip);
void lima_mmu_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_pmu.c b/drivers/gpu/drm/lima/lima_pmu.c
index 571f6d661581..e397e1146e96 100644
--- a/drivers/gpu/drm/lima/lima_pmu.c
+++ b/drivers/gpu/drm/lima/lima_pmu.c
@@ -21,7 +21,7 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip)
v, v & LIMA_PMU_INT_CMD_MASK,
100, 100000);
if (err) {
- dev_err(dev->dev, "timeout wait pmd cmd\n");
+ dev_err(dev->dev, "timeout wait pmu cmd\n");
return err;
}
@@ -29,7 +29,41 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip)
return 0;
}
-int lima_pmu_init(struct lima_ip *ip)
+static u32 lima_pmu_get_ip_mask(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ u32 ret = 0;
+ int i;
+
+ ret |= LIMA_PMU_POWER_GP0_MASK;
+
+ if (dev->id == lima_gpu_mali400) {
+ ret |= LIMA_PMU_POWER_L2_MASK;
+ for (i = 0; i < 4; i++) {
+ if (dev->ip[lima_ip_pp0 + i].present)
+ ret |= LIMA_PMU_POWER_PP_MASK(i);
+ }
+ } else {
+ if (dev->ip[lima_ip_pp0].present)
+ ret |= LIMA450_PMU_POWER_PP0_MASK;
+ for (i = lima_ip_pp1; i <= lima_ip_pp3; i++) {
+ if (dev->ip[i].present) {
+ ret |= LIMA450_PMU_POWER_PP13_MASK;
+ break;
+ }
+ }
+ for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
+ if (dev->ip[i].present) {
+ ret |= LIMA450_PMU_POWER_PP47_MASK;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int lima_pmu_hw_init(struct lima_ip *ip)
{
int err;
u32 stat;
@@ -54,7 +88,44 @@ int lima_pmu_init(struct lima_ip *ip)
return 0;
}
-void lima_pmu_fini(struct lima_ip *ip)
+static void lima_pmu_hw_fini(struct lima_ip *ip)
{
+ u32 stat;
+
+ if (!ip->data.mask)
+ ip->data.mask = lima_pmu_get_ip_mask(ip);
+ stat = ~pmu_read(LIMA_PMU_STATUS) & ip->data.mask;
+ if (stat) {
+ pmu_write(LIMA_PMU_POWER_DOWN, stat);
+
+ /* Don't wait for interrupt on Mali400 if all domains are
+ * powered off because the HW won't generate an interrupt
+ * in this case.
+ */
+ if (ip->dev->id == lima_gpu_mali400)
+ pmu_write(LIMA_PMU_INT_CLEAR, LIMA_PMU_INT_CMD_MASK);
+ else
+ lima_pmu_wait_cmd(ip);
+ }
+}
+
+int lima_pmu_resume(struct lima_ip *ip)
+{
+ return lima_pmu_hw_init(ip);
+}
+
+void lima_pmu_suspend(struct lima_ip *ip)
+{
+ lima_pmu_hw_fini(ip);
+}
+
+int lima_pmu_init(struct lima_ip *ip)
+{
+ return lima_pmu_hw_init(ip);
+}
+
+void lima_pmu_fini(struct lima_ip *ip)
+{
+ lima_pmu_hw_fini(ip);
}
diff --git a/drivers/gpu/drm/lima/lima_pmu.h b/drivers/gpu/drm/lima/lima_pmu.h
index a2a18775eb07..652dc7af3047 100644
--- a/drivers/gpu/drm/lima/lima_pmu.h
+++ b/drivers/gpu/drm/lima/lima_pmu.h
@@ -6,6 +6,8 @@
struct lima_ip;
+int lima_pmu_resume(struct lima_ip *ip);
+void lima_pmu_suspend(struct lima_ip *ip);
int lima_pmu_init(struct lima_ip *ip);
void lima_pmu_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
index 8fef224b93c8..33f01383409c 100644
--- a/drivers/gpu/drm/lima/lima_pp.c
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -223,6 +223,23 @@ static void lima_pp_print_version(struct lima_ip *ip)
lima_ip_name(ip), name, major, minor);
}
+static int lima_pp_hw_init(struct lima_ip *ip)
+{
+ ip->data.async_reset = false;
+ lima_pp_soft_reset_async(ip);
+ return lima_pp_soft_reset_async_wait(ip);
+}
+
+int lima_pp_resume(struct lima_ip *ip)
+{
+ return lima_pp_hw_init(ip);
+}
+
+void lima_pp_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_pp_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
@@ -230,9 +247,7 @@ int lima_pp_init(struct lima_ip *ip)
lima_pp_print_version(ip);
- ip->data.async_reset = false;
- lima_pp_soft_reset_async(ip);
- err = lima_pp_soft_reset_async_wait(ip);
+ err = lima_pp_hw_init(ip);
if (err)
return err;
@@ -254,6 +269,16 @@ void lima_pp_fini(struct lima_ip *ip)
}
+int lima_pp_bcast_resume(struct lima_ip *ip)
+{
+ return 0;
+}
+
+void lima_pp_bcast_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_pp_bcast_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
diff --git a/drivers/gpu/drm/lima/lima_pp.h b/drivers/gpu/drm/lima/lima_pp.h
index bf60c77b2633..16ec96de15a9 100644
--- a/drivers/gpu/drm/lima/lima_pp.h
+++ b/drivers/gpu/drm/lima/lima_pp.h
@@ -7,9 +7,13 @@
struct lima_ip;
struct lima_device;
+int lima_pp_resume(struct lima_ip *ip);
+void lima_pp_suspend(struct lima_ip *ip);
int lima_pp_init(struct lima_ip *ip);
void lima_pp_fini(struct lima_ip *ip);
+int lima_pp_bcast_resume(struct lima_ip *ip);
+void lima_pp_bcast_suspend(struct lima_ip *ip);
int lima_pp_bcast_init(struct lima_ip *ip);
void lima_pp_bcast_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 3886999b4533..e6cefda00279 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -3,14 +3,17 @@
#include <linux/kthread.h>
#include <linux/slab.h>
-#include <linux/xarray.h>
+#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
+#include "lima_devfreq.h"
#include "lima_drv.h"
#include "lima_sched.h"
#include "lima_vm.h"
#include "lima_mmu.h"
#include "lima_l2_cache.h"
#include "lima_gem.h"
+#include "lima_trace.h"
struct lima_fence {
struct dma_fence base;
@@ -176,6 +179,7 @@ struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *conte
{
struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
+ trace_lima_task_submit(task);
drm_sched_entity_push_job(&task->base, &context->base);
return fence;
}
@@ -191,14 +195,36 @@ static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
return NULL;
}
+static int lima_pm_busy(struct lima_device *ldev)
+{
+ int ret;
+
+ /* resume GPU if it has been suspended by runtime PM */
+ ret = pm_runtime_get_sync(ldev->dev);
+ if (ret < 0)
+ return ret;
+
+ lima_devfreq_record_busy(&ldev->devfreq);
+ return 0;
+}
+
+static void lima_pm_idle(struct lima_device *ldev)
+{
+ lima_devfreq_record_idle(&ldev->devfreq);
+
+ /* GPU can do auto runtime suspend */
+ pm_runtime_mark_last_busy(ldev->dev);
+ pm_runtime_put_autosuspend(ldev->dev);
+}
+
static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
{
struct lima_sched_task *task = to_lima_task(job);
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_device *ldev = pipe->ldev;
struct lima_fence *fence;
struct dma_fence *ret;
- struct lima_vm *vm = NULL, *last_vm = NULL;
- int i;
+ int i, err;
/* after GPU reset */
if (job->s_fence->finished.error < 0)
@@ -207,6 +233,13 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
fence = lima_fence_create(pipe);
if (!fence)
return NULL;
+
+ err = lima_pm_busy(ldev);
+ if (err < 0) {
+ dma_fence_put(&fence->base);
+ return NULL;
+ }
+
task->fence = &fence->base;
/* for caller usage of the fence, otherwise irq handler
@@ -234,21 +267,17 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
for (i = 0; i < pipe->num_l2_cache; i++)
lima_l2_cache_flush(pipe->l2_cache[i]);
- if (task->vm != pipe->current_vm) {
- vm = lima_vm_get(task->vm);
- last_vm = pipe->current_vm;
- pipe->current_vm = task->vm;
- }
+ lima_vm_put(pipe->current_vm);
+ pipe->current_vm = lima_vm_get(task->vm);
if (pipe->bcast_mmu)
- lima_mmu_switch_vm(pipe->bcast_mmu, vm);
+ lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm);
else {
for (i = 0; i < pipe->num_mmu; i++)
- lima_mmu_switch_vm(pipe->mmu[i], vm);
+ lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm);
}
- if (last_vm)
- lima_vm_put(last_vm);
+ trace_lima_task_run(task);
pipe->error = false;
pipe->task_run(pipe, task);
@@ -256,10 +285,139 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
return task->fence;
}
+static void lima_sched_build_error_task_list(struct lima_sched_task *task)
+{
+ struct lima_sched_error_task *et;
+ struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
+ struct lima_ip *ip = pipe->processor[0];
+ int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp;
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_context *sched_ctx =
+ container_of(task->base.entity,
+ struct lima_sched_context, base);
+ struct lima_ctx *ctx =
+ container_of(sched_ctx, struct lima_ctx, context[pipe_id]);
+ struct lima_dump_task *dt;
+ struct lima_dump_chunk *chunk;
+ struct lima_dump_chunk_pid *pid_chunk;
+ struct lima_dump_chunk_buffer *buffer_chunk;
+ u32 size, task_size, mem_size;
+ int i;
+
+ mutex_lock(&dev->error_task_list_lock);
+
+ if (dev->dump.num_tasks >= lima_max_error_tasks) {
+ dev_info(dev->dev, "fail to save task state from %s pid %d: "
+ "error task list is full\n", ctx->pname, ctx->pid);
+ goto out;
+ }
+
+ /* frame chunk */
+ size = sizeof(struct lima_dump_chunk) + pipe->frame_size;
+ /* process name chunk */
+ size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname);
+ /* pid chunk */
+ size += sizeof(struct lima_dump_chunk);
+ /* buffer chunks */
+ for (i = 0; i < task->num_bos; i++) {
+ struct lima_bo *bo = task->bos[i];
+
+ size += sizeof(struct lima_dump_chunk);
+ size += bo->heap_size ? bo->heap_size : lima_bo_size(bo);
+ }
+
+ task_size = size + sizeof(struct lima_dump_task);
+ mem_size = task_size + sizeof(*et);
+ et = kvmalloc(mem_size, GFP_KERNEL);
+ if (!et) {
+ dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n",
+ mem_size);
+ goto out;
+ }
+
+ et->data = et + 1;
+ et->size = task_size;
+
+ dt = et->data;
+ memset(dt, 0, sizeof(*dt));
+ dt->id = pipe_id;
+ dt->size = size;
+
+ chunk = (struct lima_dump_chunk *)(dt + 1);
+ memset(chunk, 0, sizeof(*chunk));
+ chunk->id = LIMA_DUMP_CHUNK_FRAME;
+ chunk->size = pipe->frame_size;
+ memcpy(chunk + 1, task->frame, pipe->frame_size);
+ dt->num_chunks++;
+
+ chunk = (void *)(chunk + 1) + chunk->size;
+ memset(chunk, 0, sizeof(*chunk));
+ chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME;
+ chunk->size = sizeof(ctx->pname);
+ memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname));
+ dt->num_chunks++;
+
+ pid_chunk = (void *)(chunk + 1) + chunk->size;
+ memset(pid_chunk, 0, sizeof(*pid_chunk));
+ pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID;
+ pid_chunk->pid = ctx->pid;
+ dt->num_chunks++;
+
+ buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size;
+ for (i = 0; i < task->num_bos; i++) {
+ struct lima_bo *bo = task->bos[i];
+ void *data;
+
+ memset(buffer_chunk, 0, sizeof(*buffer_chunk));
+ buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER;
+ buffer_chunk->va = lima_vm_get_va(task->vm, bo);
+
+ if (bo->heap_size) {
+ buffer_chunk->size = bo->heap_size;
+
+ data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ if (!data) {
+ kvfree(et);
+ goto out;
+ }
+
+ memcpy(buffer_chunk + 1, data, buffer_chunk->size);
+
+ vunmap(data);
+ } else {
+ buffer_chunk->size = lima_bo_size(bo);
+
+ data = drm_gem_shmem_vmap(&bo->base.base);
+ if (IS_ERR_OR_NULL(data)) {
+ kvfree(et);
+ goto out;
+ }
+
+ memcpy(buffer_chunk + 1, data, buffer_chunk->size);
+
+ drm_gem_shmem_vunmap(&bo->base.base, data);
+ }
+
+ buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
+ dt->num_chunks++;
+ }
+
+ list_add(&et->list, &dev->error_task_list);
+ dev->dump.size += et->size;
+ dev->dump.num_tasks++;
+
+ dev_info(dev->dev, "save error task state success\n");
+
+out:
+ mutex_unlock(&dev->error_task_list_lock);
+}
+
static void lima_sched_timedout_job(struct drm_sched_job *job)
{
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
struct lima_sched_task *task = to_lima_task(job);
+ struct lima_device *ldev = pipe->ldev;
if (!pipe->error)
DRM_ERROR("lima job timeout\n");
@@ -268,6 +426,8 @@ static void lima_sched_timedout_job(struct drm_sched_job *job)
drm_sched_increase_karma(&task->base);
+ lima_sched_build_error_task_list(task);
+
pipe->task_error(pipe);
if (pipe->bcast_mmu)
@@ -279,12 +439,12 @@ static void lima_sched_timedout_job(struct drm_sched_job *job)
lima_mmu_page_fault_resume(pipe->mmu[i]);
}
- if (pipe->current_vm)
- lima_vm_put(pipe->current_vm);
-
+ lima_vm_put(pipe->current_vm);
pipe->current_vm = NULL;
pipe->current_task = NULL;
+ lima_pm_idle(ldev);
+
drm_sched_resubmit_jobs(&pipe->base);
drm_sched_start(&pipe->base, true);
}
@@ -355,6 +515,7 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
{
struct lima_sched_task *task = pipe->current_task;
+ struct lima_device *ldev = pipe->ldev;
if (pipe->error) {
if (task && task->recoverable)
@@ -364,5 +525,7 @@ void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
} else {
pipe->task_fini(pipe);
dma_fence_signal(task->fence);
+
+ lima_pm_idle(ldev);
}
}
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index d64393fb50a9..90f03c48ef4a 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -5,9 +5,18 @@
#define __LIMA_SCHED_H__
#include <drm/gpu_scheduler.h>
+#include <linux/list.h>
+#include <linux/xarray.h>
+struct lima_device;
struct lima_vm;
+struct lima_sched_error_task {
+ struct list_head list;
+ void *data;
+ u32 size;
+};
+
struct lima_sched_task {
struct drm_sched_job base;
@@ -44,6 +53,8 @@ struct lima_sched_pipe {
u32 fence_seqno;
spinlock_t fence_lock;
+ struct lima_device *ldev;
+
struct lima_sched_task *current_task;
struct lima_vm *current_vm;
diff --git a/drivers/gpu/drm/lima/lima_trace.c b/drivers/gpu/drm/lima/lima_trace.c
new file mode 100644
index 000000000000..ea1c7289bebc
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_trace.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#include "lima_sched.h"
+
+#define CREATE_TRACE_POINTS
+#include "lima_trace.h"
diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h
new file mode 100644
index 000000000000..3a430e93d384
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_trace.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#if !defined(_LIMA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _LIMA_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lima
+#define TRACE_INCLUDE_FILE lima_trace
+
+DECLARE_EVENT_CLASS(lima_task,
+ TP_PROTO(struct lima_sched_task *task),
+ TP_ARGS(task),
+ TP_STRUCT__entry(
+ __field(uint64_t, task_id)
+ __field(unsigned int, context)
+ __field(unsigned int, seqno)
+ __string(pipe, task->base.sched->name)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->base.id;
+ __entry->context = task->base.s_fence->finished.context;
+ __entry->seqno = task->base.s_fence->finished.seqno;
+ __assign_str(pipe, task->base.sched->name)
+ ),
+
+ TP_printk("task=%llu, context=%u seqno=%u pipe=%s",
+ __entry->task_id, __entry->context, __entry->seqno,
+ __get_str(pipe))
+);
+
+DEFINE_EVENT(lima_task, lima_task_submit,
+ TP_PROTO(struct lima_sched_task *task),
+ TP_ARGS(task)
+);
+
+DEFINE_EVENT(lima_task, lima_task_run,
+ TP_PROTO(struct lima_sched_task *task),
+ TP_ARGS(task)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/lima
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
index 22aeec77d84d..3a7c74822d8b 100644
--- a/drivers/gpu/drm/lima/lima_vm.h
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -54,7 +54,8 @@ static inline struct lima_vm *lima_vm_get(struct lima_vm *vm)
static inline void lima_vm_put(struct lima_vm *vm)
{
- kref_put(&vm->refcount, lima_vm_release);
+ if (vm)
+ kref_put(&vm->refcount, lima_vm_release);
}
void lima_vm_print(struct lima_vm *vm);
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index e59907e68854..04e1d38d41f7 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -948,7 +948,7 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
struct drm_pending_vblank_event *event;
drm_crtc_vblank_off(crtc);
@@ -1020,7 +1020,7 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
struct drm_pending_vblank_event *event = crtc->state->event;
struct drm_plane *plane = &pipe->plane;
struct drm_plane_state *pstate = plane->state;
@@ -1078,7 +1078,7 @@ static int mcde_display_enable_vblank(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
u32 val;
/* Enable all VBLANK IRQs */
@@ -1097,7 +1097,7 @@ static void mcde_display_disable_vblank(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
/* Disable all VBLANK IRQs */
writel(0, mcde->regs + MCDE_IMSCPP);
@@ -1117,7 +1117,7 @@ static struct drm_simple_display_pipe_funcs mcde_display_funcs = {
int mcde_display_init(struct drm_device *drm)
{
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
int ret;
static const u32 formats[] = {
DRM_FORMAT_ARGB8888,
diff --git a/drivers/gpu/drm/mcde/mcde_drm.h b/drivers/gpu/drm/mcde/mcde_drm.h
index 80edd6628979..679c2c4e6d9d 100644
--- a/drivers/gpu/drm/mcde/mcde_drm.h
+++ b/drivers/gpu/drm/mcde/mcde_drm.h
@@ -34,6 +34,8 @@ struct mcde {
struct regulator *vana;
};
+#define to_mcde(dev) container_of(dev, struct mcde, drm)
+
bool mcde_dsi_irq(struct mipi_dsi_device *mdsi);
void mcde_dsi_te_request(struct mipi_dsi_device *mdsi);
extern struct platform_driver mcde_dsi_driver;
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index f28cb7a576ba..84f3e2dbd77b 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -72,6 +72,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_panel.h>
@@ -163,7 +164,7 @@ static irqreturn_t mcde_irq(int irq, void *data)
static int mcde_modeset_init(struct drm_device *drm)
{
struct drm_mode_config *mode_config;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
int ret;
if (!mcde->bridge) {
@@ -183,13 +184,13 @@ static int mcde_modeset_init(struct drm_device *drm)
ret = drm_vblank_init(drm, 1);
if (ret) {
dev_err(drm->dev, "failed to init vblank\n");
- goto out_config;
+ return ret;
}
ret = mcde_display_init(drm);
if (ret) {
dev_err(drm->dev, "failed to init display\n");
- goto out_config;
+ return ret;
}
/*
@@ -203,7 +204,7 @@ static int mcde_modeset_init(struct drm_device *drm)
mcde->bridge);
if (ret) {
dev_err(drm->dev, "failed to attach display output bridge\n");
- goto out_config;
+ return ret;
}
drm_mode_config_reset(drm);
@@ -211,19 +212,6 @@ static int mcde_modeset_init(struct drm_device *drm)
drm_fbdev_generic_setup(drm, 32);
return 0;
-
-out_config:
- drm_mode_config_cleanup(drm);
- return ret;
-}
-
-static void mcde_release(struct drm_device *drm)
-{
- struct mcde *mcde = drm->dev_private;
-
- drm_mode_config_cleanup(drm);
- drm_dev_fini(drm);
- kfree(mcde);
}
DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
@@ -231,7 +219,6 @@ DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
static struct drm_driver mcde_drm_driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .release = mcde_release,
.lastclose = drm_fb_helper_lastclose,
.ioctls = NULL,
.fops = &drm_fops,
@@ -259,7 +246,9 @@ static int mcde_drm_bind(struct device *dev)
struct drm_device *drm = dev_get_drvdata(dev);
int ret;
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
ret = component_bind_all(drm->dev, drm);
if (ret) {
@@ -318,35 +307,27 @@ static int mcde_probe(struct platform_device *pdev)
int ret;
int i;
- mcde = kzalloc(sizeof(*mcde), GFP_KERNEL);
- if (!mcde)
- return -ENOMEM;
- mcde->dev = dev;
-
- ret = drm_dev_init(&mcde->drm, &mcde_drm_driver, dev);
- if (ret) {
- kfree(mcde);
- return ret;
- }
+ mcde = devm_drm_dev_alloc(dev, &mcde_drm_driver, struct mcde, drm);
+ if (IS_ERR(mcde))
+ return PTR_ERR(mcde);
drm = &mcde->drm;
- drm->dev_private = mcde;
+ mcde->dev = dev;
platform_set_drvdata(pdev, drm);
/* Enable continuous updates: this is what Linux' framebuffer expects */
mcde->oneshot_mode = false;
- drm->dev_private = mcde;
/* First obtain and turn on the main power */
mcde->epod = devm_regulator_get(dev, "epod");
if (IS_ERR(mcde->epod)) {
ret = PTR_ERR(mcde->epod);
dev_err(dev, "can't get EPOD regulator\n");
- goto dev_unref;
+ return ret;
}
ret = regulator_enable(mcde->epod);
if (ret) {
dev_err(dev, "can't enable EPOD regulator\n");
- goto dev_unref;
+ return ret;
}
mcde->vana = devm_regulator_get(dev, "vana");
if (IS_ERR(mcde->vana)) {
@@ -497,8 +478,6 @@ regulator_off:
regulator_disable(mcde->vana);
regulator_epod_off:
regulator_disable(mcde->epod);
-dev_unref:
- drm_dev_put(drm);
return ret;
}
@@ -506,13 +485,12 @@ dev_unref:
static int mcde_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
component_master_del(&pdev->dev, &mcde_drm_comp_ops);
clk_disable_unprepare(mcde->mcde_clk);
regulator_disable(mcde->vana);
regulator_disable(mcde->epod);
- drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 7af5ebb0c436..f303369305a3 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1020,7 +1020,7 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = data;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
struct mcde_dsi *d = dev_get_drvdata(dev);
struct device_node *child;
struct drm_panel *panel = NULL;
@@ -1073,10 +1073,9 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
panel = NULL;
bridge = of_drm_find_bridge(child);
- if (IS_ERR(bridge)) {
- dev_err(dev, "failed to find bridge (%ld)\n",
- PTR_ERR(bridge));
- return PTR_ERR(bridge);
+ if (!bridge) {
+ dev_err(dev, "failed to find bridge\n");
+ return -EINVAL;
}
}
}
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index fa5ffc4fe823..c420f5a3d33b 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -11,6 +11,7 @@ config DRM_MEDIATEK
select DRM_MIPI_DSI
select DRM_PANEL
select MEMORY
+ select MTK_MMSYS
select MTK_SMI
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 6fb0d6983a4a..3ae9c810845b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -119,7 +119,10 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_color_funcs);
if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to initialize component: %d\n",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 891d80c73e04..28651bc579bc 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -386,7 +386,10 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_ovl_funcs);
if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to initialize component: %d\n",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 0cb848d64206..e04319fedf46 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -294,7 +294,10 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_rdma_funcs);
if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to initialize component: %d\n",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 4f0ce4cd5b8c..d4f0fb7ad312 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -10,7 +10,9 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/types.h>
@@ -20,6 +22,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "mtk_dpi_regs.h"
#include "mtk_drm_ddp_comp.h"
@@ -74,6 +77,9 @@ struct mtk_dpi {
enum mtk_dpi_out_yc_map yc_map;
enum mtk_dpi_out_bit_num bit_num;
enum mtk_dpi_out_channel_swap channel_swap;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_gpio;
+ struct pinctrl_state *pins_dpi;
int refcount;
};
@@ -379,6 +385,9 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
if (--dpi->refcount != 0)
return;
+ if (dpi->pinctrl && dpi->pins_gpio)
+ pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+
mtk_dpi_disable(dpi);
clk_disable_unprepare(dpi->pixel_clk);
clk_disable_unprepare(dpi->engine_clk);
@@ -403,6 +412,9 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
goto err_pixel;
}
+ if (dpi->pinctrl && dpi->pins_dpi)
+ pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
+
mtk_dpi_enable(dpi);
return 0;
@@ -509,15 +521,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
return 0;
}
-static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
- .destroy = mtk_dpi_encoder_destroy,
-};
-
static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -596,8 +599,8 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = drm_encoder_init(drm_dev, &dpi->encoder, &mtk_dpi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ ret = drm_simple_encoder_init(drm_dev, &dpi->encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret) {
dev_err(dev, "Failed to initialize decoder: %d\n", ret);
goto err_unregister;
@@ -705,6 +708,26 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dpi->dev = dev;
dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
+ dpi->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(dpi->pinctrl)) {
+ dpi->pinctrl = NULL;
+ dev_dbg(&pdev->dev, "Cannot find pinctrl!\n");
+ }
+ if (dpi->pinctrl) {
+ dpi->pins_gpio = pinctrl_lookup_state(dpi->pinctrl, "sleep");
+ if (IS_ERR(dpi->pins_gpio)) {
+ dpi->pins_gpio = NULL;
+ dev_dbg(&pdev->dev, "Cannot find pinctrl idle!\n");
+ }
+ if (dpi->pins_gpio)
+ pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+
+ dpi->pins_dpi = pinctrl_lookup_state(dpi->pinctrl, "default");
+ if (IS_ERR(dpi->pins_dpi)) {
+ dpi->pins_dpi = NULL;
+ dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n");
+ }
+ }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dpi->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(dpi->regs)) {
@@ -716,21 +739,27 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dpi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dpi->engine_clk)) {
ret = PTR_ERR(dpi->engine_clk);
- dev_err(dev, "Failed to get engine clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get engine clock: %d\n", ret);
+
return ret;
}
dpi->pixel_clk = devm_clk_get(dev, "pixel");
if (IS_ERR(dpi->pixel_clk)) {
ret = PTR_ERR(dpi->pixel_clk);
- dev_err(dev, "Failed to get pixel clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get pixel clock: %d\n", ret);
+
return ret;
}
dpi->tvd_clk = devm_clk_get(dev, "pll");
if (IS_ERR(dpi->tvd_clk)) {
ret = PTR_ERR(dpi->tvd_clk);
- dev_err(dev, "Failed to get tvdpll clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get tvdpll clock: %d\n", ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index fe85e487e477..fe46c4bac64d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -28,7 +29,7 @@
* @enabled: records whether crtc_enable succeeded
* @planes: array of 4 drm_plane structures, one for each overlay plane
* @pending_planes: whether any plane has pending changes to be applied
- * @config_regs: memory mapped mmsys configuration register space
+ * @mmsys_dev: pointer to the mmsys device for configuration registers
* @mutex: handle to one of the ten disp_mutex streams
* @ddp_comp_nr: number of components in ddp_comp
* @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
@@ -50,7 +51,7 @@ struct mtk_drm_crtc {
u32 cmdq_event;
#endif
- void __iomem *config_regs;
+ struct device *mmsys_dev;
struct mtk_disp_mutex *mutex;
unsigned int ddp_comp_nr;
struct mtk_ddp_comp **ddp_comp;
@@ -300,9 +301,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
- mtk_ddp_add_comp_to_path(mtk_crtc->config_regs,
- mtk_crtc->ddp_comp[i]->id,
- mtk_crtc->ddp_comp[i + 1]->id);
+ mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
+ mtk_crtc->ddp_comp[i]->id,
+ mtk_crtc->ddp_comp[i + 1]->id);
mtk_disp_mutex_add_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
@@ -360,9 +361,9 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
mtk_crtc->ddp_comp[i]->id);
mtk_disp_mutex_disable(mtk_crtc->mutex);
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
- mtk_ddp_remove_comp_from_path(mtk_crtc->config_regs,
- mtk_crtc->ddp_comp[i]->id,
- mtk_crtc->ddp_comp[i + 1]->id);
+ mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
+ mtk_crtc->ddp_comp[i]->id,
+ mtk_crtc->ddp_comp[i + 1]->id);
mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
@@ -766,7 +767,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (!mtk_crtc)
return -ENOMEM;
- mtk_crtc->config_regs = priv->config_regs;
+ mtk_crtc->mmsys_dev = priv->mmsys_dev;
mtk_crtc->ddp_comp_nr = path_len;
mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
sizeof(*mtk_crtc->ddp_comp),
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 13035c906035..014c1bbe1df2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -13,26 +13,6 @@
#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
-#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
-#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
-#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
-#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c
-#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
-#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
-#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
-#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
-#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
-#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
-#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
-#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
-#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
-#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
-
-#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
-#define DISP_REG_CONFIG_OUT_SEL 0x04c
-#define DISP_REG_CONFIG_DSI_SEL 0x050
-#define DISP_REG_CONFIG_DPI_SEL 0x064
-
#define MT2701_DISP_MUTEX0_MOD0 0x2c
#define MT2701_DISP_MUTEX0_SOF0 0x30
@@ -94,48 +74,6 @@
#define MUTEX_SOF_DSI2 5
#define MUTEX_SOF_DSI3 6
-#define OVL0_MOUT_EN_COLOR0 0x1
-#define OD_MOUT_EN_RDMA0 0x1
-#define OD1_MOUT_EN_RDMA1 BIT(16)
-#define UFOE_MOUT_EN_DSI0 0x1
-#define COLOR0_SEL_IN_OVL0 0x1
-#define OVL1_MOUT_EN_COLOR1 0x1
-#define GAMMA_MOUT_EN_RDMA1 0x1
-#define RDMA0_SOUT_DPI0 0x2
-#define RDMA0_SOUT_DPI1 0x3
-#define RDMA0_SOUT_DSI1 0x1
-#define RDMA0_SOUT_DSI2 0x4
-#define RDMA0_SOUT_DSI3 0x5
-#define RDMA1_SOUT_DPI0 0x2
-#define RDMA1_SOUT_DPI1 0x3
-#define RDMA1_SOUT_DSI1 0x1
-#define RDMA1_SOUT_DSI2 0x4
-#define RDMA1_SOUT_DSI3 0x5
-#define RDMA2_SOUT_DPI0 0x2
-#define RDMA2_SOUT_DPI1 0x3
-#define RDMA2_SOUT_DSI1 0x1
-#define RDMA2_SOUT_DSI2 0x4
-#define RDMA2_SOUT_DSI3 0x5
-#define DPI0_SEL_IN_RDMA1 0x1
-#define DPI0_SEL_IN_RDMA2 0x3
-#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
-#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
-#define DSI0_SEL_IN_RDMA1 0x1
-#define DSI0_SEL_IN_RDMA2 0x4
-#define DSI1_SEL_IN_RDMA1 0x1
-#define DSI1_SEL_IN_RDMA2 0x4
-#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
-#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
-#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
-#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
-#define COLOR1_SEL_IN_OVL1 0x1
-
-#define OVL_MOUT_EN_RDMA 0x1
-#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
-#define BLS_TO_DPI_RDMA1_TO_DSI 0x2
-#define DSI_SEL_IN_BLS 0x0
-#define DPI_SEL_IN_BLS 0x0
-#define DSI_SEL_IN_RDMA 0x1
struct mtk_disp_mutex {
int id;
@@ -246,200 +184,6 @@ static const struct mtk_ddp_data mt8173_ddp_driver_data = {
.mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
};
-static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next,
- unsigned int *addr)
-{
- unsigned int value;
-
- if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
- *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
- value = OVL0_MOUT_EN_COLOR0;
- } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
- *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
- value = OVL_MOUT_EN_RDMA;
- } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
- *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
- value = OD_MOUT_EN_RDMA0;
- } else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DISP_UFOE_MOUT_EN;
- value = UFOE_MOUT_EN_DSI0;
- } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
- *addr = DISP_REG_CONFIG_DISP_OVL1_MOUT_EN;
- value = OVL1_MOUT_EN_COLOR1;
- } else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
- *addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
- value = GAMMA_MOUT_EN_RDMA1;
- } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
- *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
- value = OD1_MOUT_EN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI3;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI3;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI3;
- } else {
- value = 0;
- }
-
- return value;
-}
-
-static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next,
- unsigned int *addr)
-{
- unsigned int value;
-
- if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
- *addr = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN;
- value = COLOR0_SEL_IN_OVL0;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI0_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI1_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI0_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI1_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI2_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI3_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI0_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI0_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI2_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI3_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
- *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
- value = COLOR1_SEL_IN_OVL1;
- } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSI_SEL;
- value = DSI_SEL_IN_BLS;
- } else {
- value = 0;
- }
-
- return value;
-}
-
-static void mtk_ddp_sout_sel(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
- writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
- config_regs + DISP_REG_CONFIG_OUT_SEL);
- } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DPI0) {
- writel_relaxed(BLS_TO_DPI_RDMA1_TO_DSI,
- config_regs + DISP_REG_CONFIG_OUT_SEL);
- writel_relaxed(DSI_SEL_IN_RDMA,
- config_regs + DISP_REG_CONFIG_DSI_SEL);
- writel_relaxed(DPI_SEL_IN_BLS,
- config_regs + DISP_REG_CONFIG_DPI_SEL);
- }
-}
-
-void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- unsigned int addr, value, reg;
-
- value = mtk_ddp_mout_en(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) | value;
- writel_relaxed(reg, config_regs + addr);
- }
-
- mtk_ddp_sout_sel(config_regs, cur, next);
-
- value = mtk_ddp_sel_in(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) | value;
- writel_relaxed(reg, config_regs + addr);
- }
-}
-
-void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- unsigned int addr, value, reg;
-
- value = mtk_ddp_mout_en(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) & ~value;
- writel_relaxed(reg, config_regs + addr);
- }
-
- value = mtk_ddp_sel_in(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) & ~value;
- writel_relaxed(reg, config_regs + addr);
- }
-}
-
struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id)
{
struct mtk_ddp *ddp = dev_get_drvdata(dev);
@@ -628,7 +372,8 @@ static int mtk_ddp_probe(struct platform_device *pdev)
if (!ddp->data->no_clk) {
ddp->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ddp->clk)) {
- dev_err(dev, "Failed to get clock\n");
+ if (PTR_ERR(ddp->clk) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clock\n");
return PTR_ERR(ddp->clk);
}
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
index 827be424a148..6b691a57be4a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
@@ -12,13 +12,6 @@ struct regmap;
struct device;
struct mtk_disp_mutex;
-void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next);
-void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next);
-
struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id);
int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex);
void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 0563c6813333..6bd369434d9d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -10,6 +10,7 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/dma-mapping.h>
#include <drm/drm_atomic.h>
@@ -162,7 +163,9 @@ static int mtk_drm_kms_init(struct drm_device *drm)
}
private->mutex_dev = &pdev->dev;
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
drm->mode_config.min_width = 64;
drm->mode_config.min_height = 64;
@@ -179,7 +182,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = component_bind_all(drm->dev, drm);
if (ret)
- goto err_config_cleanup;
+ return ret;
/*
* We currently support two fixed data streams, each optional,
@@ -255,8 +258,6 @@ err_unset_dma_parms:
dma_dev->dma_parms = NULL;
err_component_unbind:
component_unbind_all(drm->dev, drm);
-err_config_cleanup:
- drm_mode_config_cleanup(drm);
return ret;
}
@@ -272,7 +273,6 @@ static void mtk_drm_kms_deinit(struct drm_device *drm)
private->dma_dev->dma_parms = NULL;
component_unbind_all(drm->dev, drm);
- drm_mode_config_cleanup(drm);
}
static const struct file_operations mtk_drm_fops = {
@@ -348,9 +348,7 @@ static int mtk_drm_bind(struct device *dev)
if (ret < 0)
goto err_deinit;
- ret = drm_fbdev_generic_setup(drm, 32);
- if (ret)
- DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
+ drm_fbdev_generic_setup(drm, 32);
return 0;
@@ -421,11 +419,22 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
{ }
};
+static const struct of_device_id mtk_drm_of_ids[] = {
+ { .compatible = "mediatek,mt2701-mmsys",
+ .data = &mt2701_mmsys_driver_data},
+ { .compatible = "mediatek,mt2712-mmsys",
+ .data = &mt2712_mmsys_driver_data},
+ { .compatible = "mediatek,mt8173-mmsys",
+ .data = &mt8173_mmsys_driver_data},
+ { }
+};
+
static int mtk_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *phandle = dev->parent->of_node;
+ const struct of_device_id *of_id;
struct mtk_drm_private *private;
- struct resource *mem;
struct device_node *node;
struct component_match *match = NULL;
int ret;
@@ -436,18 +445,20 @@ static int mtk_drm_probe(struct platform_device *pdev)
return -ENOMEM;
private->data = of_device_get_match_data(dev);
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- private->config_regs = devm_ioremap_resource(dev, mem);
- if (IS_ERR(private->config_regs)) {
- ret = PTR_ERR(private->config_regs);
- dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n",
- ret);
- return ret;
+ private->mmsys_dev = dev->parent;
+ if (!private->mmsys_dev) {
+ dev_err(dev, "Failed to get MMSYS device\n");
+ return -ENODEV;
}
+ of_id = of_match_node(mtk_drm_of_ids, phandle);
+ if (!of_id)
+ return -ENODEV;
+
+ private->data = of_id->data;
+
/* Iterate over sibling DISP function blocks */
- for_each_child_of_node(dev->of_node->parent, node) {
+ for_each_child_of_node(phandle->parent, node) {
const struct of_device_id *of_id;
enum mtk_ddp_comp_type comp_type;
int comp_id;
@@ -581,22 +592,11 @@ static int mtk_drm_sys_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
mtk_drm_sys_resume);
-static const struct of_device_id mtk_drm_of_ids[] = {
- { .compatible = "mediatek,mt2701-mmsys",
- .data = &mt2701_mmsys_driver_data},
- { .compatible = "mediatek,mt2712-mmsys",
- .data = &mt2712_mmsys_driver_data},
- { .compatible = "mediatek,mt8173-mmsys",
- .data = &mt8173_mmsys_driver_data},
- { }
-};
-
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
.remove = mtk_drm_remove,
.driver = {
.name = "mediatek-drm",
- .of_match_table = mtk_drm_of_ids,
.pm = &mtk_drm_pm_ops,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 17bc99b9f5d4..b5be63e53176 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -39,7 +39,7 @@ struct mtk_drm_private {
struct device_node *mutex_node;
struct device *mutex_dev;
- void __iomem *config_regs;
+ struct device *mmsys_dev;
struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
const struct mtk_mmsys_driver_data *data;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index b04a3c2b111e..f8fd8b98c30e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -224,6 +224,9 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
expected = sg_dma_address(sg->sgl);
for_each_sg(sg->sgl, s, sg->nents, i) {
+ if (!sg_dma_len(s))
+ break;
+
if (sg_dma_address(s) != expected) {
DRM_ERROR("sg_table is not contiguous");
ret = -EINVAL;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 0ede69830a9d..270bf22c98fe 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -22,6 +22,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "mtk_drm_ddp_comp.h"
@@ -787,15 +788,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
dsi->enabled = false;
}
-static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = {
- .destroy = mtk_dsi_encoder_destroy,
-};
-
static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -888,8 +880,8 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
{
int ret;
- ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ ret = drm_simple_encoder_init(drm, &dsi->encoder,
+ DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("Failed to encoder init to drm\n");
return ret;
@@ -1194,14 +1186,18 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
ret = PTR_ERR(dsi->engine_clk);
- dev_err(dev, "Failed to get engine clock: %d\n", ret);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get engine clock: %d\n", ret);
goto err_unregister_host;
}
dsi->digital_clk = devm_clk_get(dev, "digital");
if (IS_ERR(dsi->digital_clk)) {
ret = PTR_ERR(dsi->digital_clk);
- dev_err(dev, "Failed to get digital clock: %d\n", ret);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get digital clock: %d\n", ret);
goto err_unregister_host;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index ff43a3d80410..5feb760617cb 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -311,14 +311,10 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
u8 checksum;
int ctrl_frame_en = 0;
- frame_type = *buffer;
- buffer += 1;
- frame_ver = *buffer;
- buffer += 1;
- frame_len = *buffer;
- buffer += 1;
- checksum = *buffer;
- buffer += 1;
+ frame_type = *buffer++;
+ frame_ver = *buffer++;
+ frame_len = *buffer++;
+ checksum = *buffer++;
frame_data = buffer;
dev_dbg(hdmi->dev,
@@ -982,7 +978,7 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct hdmi_avi_infoframe frame;
- u8 buffer[17];
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
ssize_t err;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
@@ -1008,7 +1004,7 @@ static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
const char *product)
{
struct hdmi_spd_infoframe frame;
- u8 buffer[29];
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE];
ssize_t err;
err = hdmi_spd_infoframe_init(&frame, vendor, product);
@@ -1031,7 +1027,7 @@ static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
static int mtk_hdmi_setup_audio_infoframe(struct mtk_hdmi *hdmi)
{
struct hdmi_audio_infoframe frame;
- u8 buffer[14];
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
ssize_t err;
err = hdmi_audio_infoframe_init(&frame);
@@ -1474,7 +1470,9 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
ret = mtk_hdmi_get_all_clk(hdmi, np);
if (ret) {
- dev_err(dev, "Failed to get clocks: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clocks: %d\n", ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index e4d34484ecc8..8cee2591e728 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -88,6 +88,44 @@ static const struct phy_ops mtk_mipi_tx_ops = {
.owner = THIS_MODULE,
};
+static void mtk_mipi_tx_get_calibration_datal(struct mtk_mipi_tx *mipi_tx)
+{
+ struct nvmem_cell *cell;
+ size_t len;
+ u32 *buf;
+
+ cell = nvmem_cell_get(mipi_tx->dev, "calibration-data");
+ if (IS_ERR(cell)) {
+ dev_info(mipi_tx->dev, "can't get nvmem_cell_get, ignore it\n");
+ return;
+ }
+ buf = (u32 *)nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(buf)) {
+ dev_info(mipi_tx->dev, "can't get data, ignore it\n");
+ return;
+ }
+
+ if (len < 3 * sizeof(u32)) {
+ dev_info(mipi_tx->dev, "invalid calibration data\n");
+ kfree(buf);
+ return;
+ }
+
+ mipi_tx->rt_code[0] = ((buf[0] >> 6 & 0x1f) << 5) |
+ (buf[0] >> 11 & 0x1f);
+ mipi_tx->rt_code[1] = ((buf[1] >> 27 & 0x1f) << 5) |
+ (buf[0] >> 1 & 0x1f);
+ mipi_tx->rt_code[2] = ((buf[1] >> 17 & 0x1f) << 5) |
+ (buf[1] >> 22 & 0x1f);
+ mipi_tx->rt_code[3] = ((buf[1] >> 7 & 0x1f) << 5) |
+ (buf[1] >> 12 & 0x1f);
+ mipi_tx->rt_code[4] = ((buf[2] >> 27 & 0x1f) << 5) |
+ (buf[1] >> 2 & 0x1f);
+ kfree(buf);
+}
+
static int mtk_mipi_tx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -125,6 +163,20 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return ret;
}
+ ret = of_property_read_u32(dev->of_node, "drive-strength-microamp",
+ &mipi_tx->mipitx_drive);
+ /* If can't get the "mipi_tx->mipitx_drive", set it default 0x8 */
+ if (ret < 0)
+ mipi_tx->mipitx_drive = 4600;
+
+ /* check the mipitx_drive valid */
+ if (mipi_tx->mipitx_drive > 6000 || mipi_tx->mipitx_drive < 3000) {
+ dev_warn(dev, "drive-strength-microamp is invalid %d, not in 3000 ~ 6000\n",
+ mipi_tx->mipitx_drive);
+ mipi_tx->mipitx_drive = clamp_val(mipi_tx->mipitx_drive, 3000,
+ 6000);
+ }
+
ref_clk_name = __clk_get_name(ref_clk);
ret = of_property_read_string(dev->of_node, "clock-output-names",
@@ -160,6 +212,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
mipi_tx->dev = dev;
+ mtk_mipi_tx_get_calibration_datal(mipi_tx);
+
return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
mipi_tx->pll);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
index 413f35d86219..c76f07c3fdeb 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
@@ -12,9 +12,11 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/slab.h>
struct mtk_mipitx_data {
const u32 mppll_preserve;
@@ -27,6 +29,8 @@ struct mtk_mipi_tx {
struct device *dev;
void __iomem *regs;
u32 data_rate;
+ u32 mipitx_drive;
+ u32 rt_code[5];
const struct mtk_mipitx_data *driver_data;
struct clk_hw pll_hw;
struct clk *pll;
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
index 91f08a351fd0..9f3e55aeebb2 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
@@ -17,6 +17,9 @@
#define RG_DSI_BG_CORE_EN BIT(7)
#define RG_DSI_PAD_TIEL_SEL BIT(8)
+#define MIPITX_VOLTAGE_SEL 0x0010
+#define RG_DSI_HSTX_LDO_REF_SEL (0xf << 6)
+
#define MIPITX_PLL_PWR 0x0028
#define MIPITX_PLL_CON0 0x002c
#define MIPITX_PLL_CON1 0x0030
@@ -25,6 +28,7 @@
#define MIPITX_PLL_CON4 0x003c
#define RG_DSI_PLL_IBIAS (3 << 10)
+#define MIPITX_D2P_RTCODE 0x0100
#define MIPITX_D2_SW_CTL_EN 0x0144
#define MIPITX_D0_SW_CTL_EN 0x0244
#define MIPITX_CK_CKMODE_EN 0x0328
@@ -105,6 +109,24 @@ static const struct clk_ops mtk_mipi_tx_pll_ops = {
.recalc_rate = mtk_mipi_tx_pll_recalc_rate,
};
+static void mtk_mipi_tx_config_calibration_data(struct mtk_mipi_tx *mipi_tx)
+{
+ int i, j;
+
+ for (i = 0; i < 5; i++) {
+ if ((mipi_tx->rt_code[i] & 0x1f) == 0)
+ mipi_tx->rt_code[i] |= 0x10;
+
+ if ((mipi_tx->rt_code[i] >> 5 & 0x1f) == 0)
+ mipi_tx->rt_code[i] |= 0x10 << 5;
+
+ for (j = 0; j < 10; j++)
+ mtk_mipi_tx_update_bits(mipi_tx,
+ MIPITX_D2P_RTCODE * (i + 1) + j * 4,
+ 1, mipi_tx->rt_code[i] >> j & 1);
+ }
+}
+
static void mtk_mipi_tx_power_on_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
@@ -123,6 +145,12 @@ static void mtk_mipi_tx_power_on_signal(struct phy *phy)
mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_VOLTAGE_SEL,
+ RG_DSI_HSTX_LDO_REF_SEL,
+ (mipi_tx->mipitx_drive - 3000) / 200 << 6);
+
+ mtk_mipi_tx_config_calibration_data(mipi_tx);
+
mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
}
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 8c2e1b47e81a..4c5aafcec799 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -11,6 +11,7 @@
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/sys_soc.h>
#include <linux/platform_device.h>
#include <linux/soc/amlogic/meson-canvas.h>
@@ -183,6 +184,24 @@ static void meson_remove_framebuffers(void)
kfree(ap);
}
+struct meson_drm_soc_attr {
+ struct meson_drm_soc_limits limits;
+ const struct soc_device_attribute *attrs;
+};
+
+static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = {
+ /* S805X/S805Y HDMI PLL won't lock for HDMI PHY freq > 1,65GHz */
+ {
+ .limits = {
+ .max_hdmi_phy_freq = 1650000,
+ },
+ .attrs = (const struct soc_device_attribute []) {
+ { .soc_id = "GXL (S805*)", },
+ { /* sentinel */ },
+ }
+ },
+};
+
static int meson_drv_bind_master(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -191,7 +210,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
struct drm_device *drm;
struct resource *res;
void __iomem *regs;
- int ret;
+ int ret, i;
/* Checks if an output connector is available */
if (!meson_vpu_has_available_connectors(dev)) {
@@ -281,10 +300,20 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto free_drm;
+ /* Assign limits per soc revision/package */
+ for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) {
+ if (soc_device_match(meson_drm_soc_attrs[i].attrs)) {
+ priv->limits = &meson_drm_soc_attrs[i].limits;
+ break;
+ }
+ }
+
/* Remove early framebuffers (ie. simplefb) */
meson_remove_framebuffers();
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ goto free_drm;
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;
drm->mode_config.funcs = &meson_mode_config_funcs;
@@ -379,7 +408,6 @@ static void meson_drv_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_irq_uninstall(drm);
drm_kms_helper_poll_fini(drm);
- drm_mode_config_cleanup(drm);
drm_dev_put(drm);
}
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 04fdf3826643..5b23704a80d6 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -30,6 +30,10 @@ struct meson_drm_match_data {
struct meson_afbcd_ops *afbcd_ops;
};
+struct meson_drm_soc_limits {
+ unsigned int max_hdmi_phy_freq;
+};
+
struct meson_drm {
struct device *dev;
enum vpu_compatible compat;
@@ -48,6 +52,8 @@ struct meson_drm {
struct drm_plane *primary_plane;
struct drm_plane *overlay_plane;
+ const struct meson_drm_soc_limits *limits;
+
/* Components Data */
struct {
bool osd1_enabled;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 64cb6ba4bc42..24a12c453095 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -695,7 +695,7 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
dev_dbg(connector->dev->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
__func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
- return meson_vclk_vic_supported_freq(phy_freq, vclk_freq);
+ return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
}
/* Encoder */
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index d5cbc47835bf..35338ed18209 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -223,7 +223,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
OSD_COLOR_MATRIX_16_RGB565;
break;
- };
+ }
}
switch (fb->format->format) {
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index fdf26dac9fa8..0eb86943a358 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -725,6 +725,13 @@ meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
/* In DMT mode, path after PLL is always /10 */
freq *= 10;
+ /* Check against soc revision/package limits */
+ if (priv->limits) {
+ if (priv->limits->max_hdmi_phy_freq &&
+ freq > priv->limits->max_hdmi_phy_freq)
+ return MODE_CLOCK_HIGH;
+ }
+
if (meson_hdmi_pll_find_params(priv, freq, &m, &frac, &od))
return MODE_OK;
@@ -762,7 +769,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
}
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int phy_freq,
+meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
unsigned int vclk_freq)
{
int i;
@@ -770,6 +777,13 @@ meson_vclk_vic_supported_freq(unsigned int phy_freq,
DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
phy_freq, vclk_freq);
+ /* Check against soc revision/package limits */
+ if (priv->limits) {
+ if (priv->limits->max_hdmi_phy_freq &&
+ phy_freq > priv->limits->max_hdmi_phy_freq)
+ return MODE_CLOCK_HIGH;
+ }
+
for (i = 0 ; params[i].pixel_freq ; ++i) {
DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
i, params[i].pixel_freq,
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index aed0ab2efa71..60617aaf18dd 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -25,7 +25,8 @@ enum {
enum drm_mode_status
meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int phy_freq, unsigned int vclk_freq);
+meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+ unsigned int vclk_freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int phy_freq, unsigned int vclk_freq,
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index d491edd317ff..aebc9ce43d55 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -260,7 +260,7 @@ int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = (struct mga_device *)dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
struct drm_gem_object *obj;
struct drm_gem_vram_object *gbo = NULL;
int ret;
@@ -307,7 +307,7 @@ err_drm_gem_object_put_unlocked:
int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
- struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(crtc->dev);
/* Our origin is at (64,64) */
x += 64;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 7a5bad2f57d7..c2f0e4b40b05 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -77,6 +77,8 @@ static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_mgag200_driver_unload;
+ drm_fbdev_generic_setup(dev, 0);
+
return 0;
err_mgag200_driver_unload:
@@ -118,7 +120,7 @@ int mgag200_driver_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
unsigned long pg_align;
if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 9691252d6233..d9b7e96b214f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -104,11 +104,6 @@ struct mga_crtc {
bool enabled;
};
-struct mga_mode_info {
- bool mode_config_initialized;
- struct mga_crtc *crtc;
-};
-
struct mga_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
@@ -160,17 +155,14 @@ struct mga_device {
void __iomem *rmmio;
struct mga_mc mc;
- struct mga_mode_info mode_info;
struct mga_cursor cursor;
size_t vram_fb_available;
bool suspended;
- int num_crtc;
enum mga_type type;
int has_sdram;
- struct drm_display_mode mode;
int bpp_shifts[4];
@@ -179,9 +171,15 @@ struct mga_device {
/* SE model number stored in reg 0x1e24 */
u32 unique_rev_id;
+ struct mga_connector connector;
struct drm_encoder encoder;
};
+static inline struct mga_device *to_mga_device(struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
static inline enum mga_type
mgag200_type_from_driver_data(kernel_ulong_t driver_data)
{
@@ -196,7 +194,6 @@ mgag200_flags_from_driver_data(kernel_ulong_t driver_data)
/* mgag200_mode.c */
int mgag200_modeset_init(struct mga_device *mdev);
-void mgag200_modeset_fini(struct mga_device *mdev);
/* mgag200_main.c */
int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 9f4635916d32..09731e614e46 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -61,34 +61,34 @@ static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
static void mga_gpio_setsda(void *data, int state)
{
struct mga_i2c_chan *i2c = data;
- struct mga_device *mdev = i2c->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(i2c->dev);
mga_i2c_set(mdev, i2c->data, state);
}
static void mga_gpio_setscl(void *data, int state)
{
struct mga_i2c_chan *i2c = data;
- struct mga_device *mdev = i2c->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(i2c->dev);
mga_i2c_set(mdev, i2c->clock, state);
}
static int mga_gpio_getsda(void *data)
{
struct mga_i2c_chan *i2c = data;
- struct mga_device *mdev = i2c->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(i2c->dev);
return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
}
static int mga_gpio_getscl(void *data)
{
struct mga_i2c_chan *i2c = data;
- struct mga_device *mdev = i2c->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(i2c->dev);
return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
}
struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
{
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
struct mga_i2c_chan *i2c;
int ret;
int data, clock;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index e278b6a547bd..86df799fd38c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -10,15 +10,8 @@
#include <linux/pci.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-
#include "mgag200_drv.h"
-static const struct drm_mode_config_funcs mga_mode_funcs = {
- .fb_create = drm_gem_fb_create
-};
-
static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
{
int offset;
@@ -66,51 +59,54 @@ static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
/* Map the framebuffer from the card and configure the core */
static int mga_vram_init(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
void __iomem *mem;
/* BAR 0 is VRAM */
- mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
- mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
+ mdev->mc.vram_base = pci_resource_start(dev->pdev, 0);
+ mdev->mc.vram_window = pci_resource_len(dev->pdev, 0);
- if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
- "mgadrmfb_vram")) {
+ if (!devm_request_mem_region(dev->dev, mdev->mc.vram_base,
+ mdev->mc.vram_window, "mgadrmfb_vram")) {
DRM_ERROR("can't reserve VRAM\n");
return -ENXIO;
}
- mem = pci_iomap(mdev->dev->pdev, 0, 0);
+ mem = pci_iomap(dev->pdev, 0, 0);
if (!mem)
return -ENOMEM;
mdev->mc.vram_size = mga_probe_vram(mdev, mem);
- pci_iounmap(mdev->dev->pdev, mem);
+ pci_iounmap(dev->pdev, mem);
return 0;
}
-static int mgag200_device_init(struct drm_device *dev,
- uint32_t flags)
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
{
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev;
int ret, option;
+ mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
+ if (mdev == NULL)
+ return -ENOMEM;
+ dev->dev_private = (void *)mdev;
+ mdev->dev = dev;
+
mdev->flags = mgag200_flags_from_driver_data(flags);
mdev->type = mgag200_type_from_driver_data(flags);
- /* Hardcode the number of CRTCs to 1 */
- mdev->num_crtc = 1;
-
pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
mdev->has_sdram = !(option & (1 << 14));
/* BAR 0 is the framebuffer, BAR 1 contains registers */
- mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
- mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
+ mdev->rmmio_base = pci_resource_start(dev->pdev, 1);
+ mdev->rmmio_size = pci_resource_len(dev->pdev, 1);
- if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
- "mgadrmfb_mmio")) {
- DRM_ERROR("can't reserve mmio registers\n");
+ if (!devm_request_mem_region(dev->dev, mdev->rmmio_base,
+ mdev->rmmio_size, "mgadrmfb_mmio")) {
+ drm_err(dev, "can't reserve mmio registers\n");
return -ENOMEM;
}
@@ -121,90 +117,43 @@ static int mgag200_device_init(struct drm_device *dev,
/* stash G200 SE model number for later use */
if (IS_G200_SE(mdev)) {
mdev->unique_rev_id = RREG32(0x1e24);
- DRM_DEBUG("G200 SE unique revision id is 0x%x\n",
- mdev->unique_rev_id);
+ drm_dbg(dev, "G200 SE unique revision id is 0x%x\n",
+ mdev->unique_rev_id);
}
ret = mga_vram_init(mdev);
if (ret)
return ret;
- mdev->bpp_shifts[0] = 0;
- mdev->bpp_shifts[1] = 1;
- mdev->bpp_shifts[2] = 0;
- mdev->bpp_shifts[3] = 2;
- return 0;
-}
-
-/*
- * Functions here will be called by the core once it's bound the driver to
- * a PCI device
- */
-
-
-int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct mga_device *mdev;
- int r;
-
- mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
- if (mdev == NULL)
- return -ENOMEM;
- dev->dev_private = (void *)mdev;
- mdev->dev = dev;
-
- r = mgag200_device_init(dev, flags);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
- return r;
- }
- r = mgag200_mm_init(mdev);
- if (r)
+ ret = mgag200_mm_init(mdev);
+ if (ret)
goto err_mm;
- drm_mode_config_init(dev);
- dev->mode_config.funcs = (void *)&mga_mode_funcs;
- if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
- dev->mode_config.preferred_depth = 16;
- else
- dev->mode_config.preferred_depth = 32;
- dev->mode_config.prefer_shadow = 1;
-
- r = mgag200_modeset_init(mdev);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
- goto err_modeset;
+ ret = mgag200_modeset_init(mdev);
+ if (ret) {
+ drm_err(dev, "Fatal error during modeset init: %d\n", ret);
+ goto err_mgag200_mm_fini;
}
- r = mgag200_cursor_init(mdev);
- if (r)
- dev_warn(&dev->pdev->dev,
- "Could not initialize cursors. Not doing hardware cursors.\n");
-
- r = drm_fbdev_generic_setup(mdev->dev, 0);
- if (r)
- goto err_modeset;
+ ret = mgag200_cursor_init(mdev);
+ if (ret)
+ drm_err(dev, "Could not initialize cursors. Not doing hardware cursors.\n");
return 0;
-err_modeset:
- drm_mode_config_cleanup(dev);
- mgag200_cursor_fini(mdev);
+err_mgag200_mm_fini:
mgag200_mm_fini(mdev);
err_mm:
dev->dev_private = NULL;
-
- return r;
+ return ret;
}
void mgag200_driver_unload(struct drm_device *dev)
{
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
if (mdev == NULL)
return;
- mgag200_modeset_fini(mdev);
- drm_mode_config_cleanup(dev);
mgag200_cursor_fini(mdev);
mgag200_mm_fini(mdev);
dev->dev_private = NULL;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d90e83959fca..5f4ac36a9776 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -13,6 +13,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -28,7 +29,7 @@
static void mga_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
struct drm_framebuffer *fb = crtc->primary->fb;
u16 *r_ptr, *g_ptr, *b_ptr;
int i;
@@ -728,7 +729,7 @@ static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
static void mga_g200wb_prepare(struct drm_crtc *crtc)
{
- struct mga_device *mdev = crtc->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(crtc->dev);
u8 tmp;
int iter_max;
@@ -783,7 +784,7 @@ static void mga_g200wb_prepare(struct drm_crtc *crtc)
static void mga_g200wb_commit(struct drm_crtc *crtc)
{
u8 tmp;
- struct mga_device *mdev = crtc->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(crtc->dev);
/* 1- The first step is to ensure that the vrsten and hrsten are set */
WREG8(MGAREG_CRTCEXT_INDEX, 1);
@@ -833,7 +834,7 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
*/
static void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
{
- struct mga_device *mdev = crtc->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(crtc->dev);
u32 addr;
int count;
u8 crtcext0;
@@ -902,7 +903,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
const struct drm_framebuffer *fb = crtc->primary->fb;
int hdisplay, hsyncstart, hsyncend, htotal;
int vdisplay, vsyncstart, vsyncend, vtotal;
@@ -1135,9 +1136,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
WREG8(MGA_MISC_OUT, misc);
- if (adjusted_mode)
- memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
-
mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
/* reset tagfifo */
@@ -1263,7 +1261,7 @@ static int mga_resume(struct drm_crtc *crtc)
static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
u8 seq1 = 0, crtcext1 = 0;
switch (mode) {
@@ -1317,7 +1315,7 @@ static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
static void mga_crtc_prepare(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
u8 tmp;
/* mga_resume(crtc);*/
@@ -1353,7 +1351,7 @@ static void mga_crtc_prepare(struct drm_crtc *crtc)
static void mga_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
u8 tmp;
@@ -1433,6 +1431,7 @@ static const struct drm_crtc_helper_funcs mga_helper_funcs = {
/* CRTC setup */
static void mga_crtc_init(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
struct mga_crtc *mga_crtc;
mga_crtc = kzalloc(sizeof(struct mga_crtc) +
@@ -1442,14 +1441,17 @@ static void mga_crtc_init(struct mga_device *mdev)
if (mga_crtc == NULL)
return;
- drm_crtc_init(mdev->dev, &mga_crtc->base, &mga_crtc_funcs);
+ drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs);
drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
- mdev->mode_info.crtc = mga_crtc;
drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
}
+/*
+ * Connector
+ */
+
static int mga_vga_get_modes(struct drm_connector *connector)
{
struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1495,7 +1497,7 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- struct mga_device *mdev = (struct mga_device*)dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
int bpp = 32;
if (IS_G200_SE(mdev)) {
@@ -1574,7 +1576,6 @@ static void mga_connector_destroy(struct drm_connector *connector)
struct mga_connector *mga_connector = to_mga_connector(connector);
mgag200_i2c_destroy(mga_connector->i2c);
drm_connector_cleanup(connector);
- kfree(connector);
}
static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
@@ -1588,70 +1589,96 @@ static const struct drm_connector_funcs mga_vga_connector_funcs = {
.destroy = mga_connector_destroy,
};
-static struct drm_connector *mga_vga_init(struct drm_device *dev)
+static int mgag200_vga_connector_init(struct mga_device *mdev)
{
- struct drm_connector *connector;
- struct mga_connector *mga_connector;
-
- mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
- if (!mga_connector)
- return NULL;
-
- connector = &mga_connector->base;
- mga_connector->i2c = mgag200_i2c_create(dev);
- if (!mga_connector->i2c)
- DRM_ERROR("failed to add ddc bus\n");
+ struct drm_device *dev = mdev->dev;
+ struct mga_connector *mconnector = &mdev->connector;
+ struct drm_connector *connector = &mconnector->base;
+ struct mga_i2c_chan *i2c;
+ int ret;
- drm_connector_init_with_ddc(dev, connector,
- &mga_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &mga_connector->i2c->adapter);
+ i2c = mgag200_i2c_create(dev);
+ if (!i2c)
+ drm_warn(dev, "failed to add DDC bus\n");
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mga_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret)
+ goto err_mgag200_i2c_destroy;
drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
- drm_connector_register(connector);
+ mconnector->i2c = i2c;
- return connector;
+ return 0;
+
+err_mgag200_i2c_destroy:
+ mgag200_i2c_destroy(i2c);
+ return ret;
}
+static const struct drm_mode_config_funcs mgag200_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create
+};
+
+static unsigned int mgag200_preferred_depth(struct mga_device *mdev)
+{
+ if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
+ return 16;
+ else
+ return 32;
+}
int mgag200_modeset_init(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
struct drm_encoder *encoder = &mdev->encoder;
- struct drm_connector *connector;
+ struct drm_connector *connector = &mdev->connector.base;
int ret;
- mdev->mode_info.mode_config_initialized = true;
+ mdev->bpp_shifts[0] = 0;
+ mdev->bpp_shifts[1] = 1;
+ mdev->bpp_shifts[2] = 0;
+ mdev->bpp_shifts[3] = 2;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret) {
+ drm_err(dev, "drmm_mode_config_init() failed, error %d\n",
+ ret);
+ return ret;
+ }
+
+ dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
+ dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
- mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
- mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
+ dev->mode_config.preferred_depth = mgag200_preferred_depth(mdev);
+ dev->mode_config.prefer_shadow = 1;
- mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
+ dev->mode_config.fb_base = mdev->mc.vram_base;
+
+ dev->mode_config.funcs = &mgag200_mode_config_funcs;
mga_crtc_init(mdev);
- ret = drm_simple_encoder_init(mdev->dev, encoder,
- DRM_MODE_ENCODER_DAC);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
if (ret) {
- drm_err(mdev->dev,
+ drm_err(dev,
"drm_simple_encoder_init() failed, error %d\n",
ret);
return ret;
}
encoder->possible_crtcs = 0x1;
- connector = mga_vga_init(mdev->dev);
- if (!connector) {
- DRM_ERROR("mga_vga_init failed\n");
- return -1;
+ ret = mgag200_vga_connector_init(mdev);
+ if (ret) {
+ drm_err(dev,
+ "mgag200_vga_connector_init() failed, error %d\n",
+ ret);
+ return ret;
}
drm_connector_attach_encoder(connector, encoder);
return 0;
}
-
-void mgag200_modeset_fini(struct mga_device *mdev)
-{
-
-}
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 1579cf0d828f..42f8aae28b31 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -65,6 +65,7 @@ msm-y := \
disp/dpu1/dpu_hw_lm.o \
disp/dpu1/dpu_hw_pingpong.o \
disp/dpu1/dpu_hw_sspp.o \
+ disp/dpu1/dpu_hw_dspp.o \
disp/dpu1/dpu_hw_top.o \
disp/dpu1/dpu_hw_util.o \
disp/dpu1/dpu_hw_vbif.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 1f83bc18d500..60f6472a3e58 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -401,6 +401,21 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
return state;
}
+static struct msm_gem_address_space *
+a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu);
+ struct msm_gem_address_space *aspace;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
+ SZ_16M + 0xfff * SZ_64K);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
/* Register offset defines for A2XX - copy of A3XX */
static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
@@ -429,6 +444,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a2xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = a2xx_create_address_space,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index b67f88872726..0a5ea9f56cb8 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -441,6 +441,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a3xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 253d8d85daad..b9b26b2bf9c5 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -66,19 +66,22 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
}
}
- for (i = 0; i < 4; i++) {
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
- 0x00000922);
- }
+ /* No CCU for A405 */
+ if (!adreno_is_a405(adreno_gpu)) {
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
+ 0x00000922);
+ }
- for (i = 0; i < 4; i++) {
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
- 0x00000000);
- }
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
+ 0x00000000);
+ }
- for (i = 0; i < 4; i++) {
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
- 0x00000001);
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
+ 0x00000001);
+ }
}
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
@@ -137,7 +140,9 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
uint32_t *ptr, len;
int i, ret;
- if (adreno_is_a420(adreno_gpu)) {
+ if (adreno_is_a405(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else if (adreno_is_a420(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
@@ -440,6 +445,52 @@ static const unsigned int a4xx_registers[] = {
~0 /* sentinel */
};
+static const unsigned int a405_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
+ 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
+ 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
+ /* CP */
+ 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
+ 0x0578, 0x058F,
+ /* VSC */
+ 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
+ /* GRAS */
+ 0x0C80, 0x0C81, 0x0C88, 0x0C8F,
+ /* RB */
+ 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
+ /* PC */
+ 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+ /* VFD */
+ 0x0E40, 0x0E4A,
+ /* VPC */
+ 0x0E60, 0x0E61, 0x0E63, 0x0E68,
+ /* UCHE */
+ 0x0E80, 0x0E84, 0x0E88, 0x0E95,
+ /* GRAS CTX 0 */
+ 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
+ /* PC CTX 0 */
+ 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
+ /* VFD CTX 0 */
+ 0x2200, 0x2204, 0x2208, 0x22A9,
+ /* GRAS CTX 1 */
+ 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
+ /* PC CTX 1 */
+ 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
+ /* VFD CTX 1 */
+ 0x2600, 0x2604, 0x2608, 0x26A9,
+ /* VBIF version 0x20050000*/
+ 0x3000, 0x3007, 0x302C, 0x302C, 0x3030, 0x3030, 0x3034, 0x3036,
+ 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040, 0x3049, 0x3049,
+ 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068, 0x306C, 0x306D,
+ 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094, 0x3098, 0x3098,
+ 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8, 0x30D0, 0x30D0,
+ 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100, 0x3108, 0x3108,
+ 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120, 0x3124, 0x3125,
+ 0x3129, 0x3129, 0x340C, 0x340C, 0x3410, 0x3410,
+ ~0 /* sentinel */
+};
+
static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
{
struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -532,6 +583,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a4xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a4xx_get_timestamp,
};
@@ -563,13 +615,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
gpu->perfcntrs = NULL;
gpu->num_perfcntrs = 0;
- adreno_gpu->registers = a4xx_registers;
- adreno_gpu->reg_offsets = a4xx_register_offsets;
-
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
+ adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers :
+ a4xx_registers;
+ adreno_gpu->reg_offsets = a4xx_register_offsets;
+
/* if needed, allocate gmem: */
if (adreno_is_a4xx(adreno_gpu)) {
ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index 075ecce4b5e0..8cae2ca4af6b 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -148,27 +148,19 @@ reset_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
-int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
{
struct drm_device *dev;
- int ret;
if (!minor)
- return 0;
+ return;
dev = minor->dev;
- ret = drm_debugfs_create_files(a5xx_debugfs_list,
- ARRAY_SIZE(a5xx_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
- return ret;
- }
+ drm_debugfs_create_files(a5xx_debugfs_list,
+ ARRAY_SIZE(a5xx_debugfs_list),
+ minor->debugfs_root, minor);
debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev,
&reset_fops);
-
- return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 724024a2243a..d95970a73fb4 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1404,6 +1404,10 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
{
u64 busy_cycles, busy_time;
+ /* Only read the gpu busy if the hardware is already active */
+ if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0)
+ return 0;
+
busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
@@ -1412,6 +1416,8 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
gpu->devfreq.busy_cycles = busy_cycles;
+ pm_runtime_put(&gpu->pdev->dev);
+
if (WARN_ON(busy_time > ~0LU))
return ~0LU;
@@ -1439,6 +1445,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a5xx_gpu_busy,
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a5xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 833468ce6b6d..54868d4e3958 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -41,7 +41,7 @@ struct a5xx_gpu {
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
#ifdef CONFIG_DEBUG_FS
-int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
/*
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index ed78fee2a262..47840b73cdda 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -1047,6 +1047,8 @@ enum a6xx_tex_type {
#define REG_A6XX_CP_MISC_CNTL 0x00000840
+#define REG_A6XX_CP_APRIV_CNTL 0x00000844
+
#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
@@ -1764,6 +1766,8 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010
+#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011
+
#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
@@ -2418,6 +2422,16 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c
+
#define REG_A6XX_TPL1_PERFCTR_TP_SEL_0 0x0000b610
#define REG_A6XX_TPL1_PERFCTR_TP_SEL_1 0x0000b611
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index c4e71abbdd53..096be97ce9f9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,14 +2,16 @@
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
-#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
+#include <drm/drm_gem.h>
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
{
@@ -127,8 +129,6 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
if (ret)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
- gmu->freq = gmu->gpu_freqs[index];
-
/*
* Eventually we will want to scale the path vote with the frequency but
* for now leave it at max so that the performance is nominal.
@@ -151,8 +151,21 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
break;
gmu->current_perf_index = perf_index;
+ gmu->freq = gmu->gpu_freqs[perf_index];
+
+ /*
+ * This can get called from devfreq while the hardware is idle. Don't
+ * bring up the power if it isn't already active
+ */
+ if (pm_runtime_get_if_in_use(gmu->dev) == 0)
+ return;
- __a6xx_gmu_set_freq(gmu, perf_index);
+ if (gmu->legacy)
+ __a6xx_gmu_set_freq(gmu, perf_index);
+ else
+ a6xx_hfi_set_freq(gmu, perf_index);
+
+ pm_runtime_put(gmu->dev);
}
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
@@ -196,6 +209,12 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
u32 val;
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+
+ /* Set the log wptr index
+ * note: downstream saves the value in poweroff and restores it here
+ */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
+
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
@@ -232,8 +251,13 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
switch (state) {
case GMU_OOB_GPU_SET:
- request = GMU_OOB_GPU_SET_REQUEST;
- ack = GMU_OOB_GPU_SET_ACK;
+ if (gmu->legacy) {
+ request = GMU_OOB_GPU_SET_REQUEST;
+ ack = GMU_OOB_GPU_SET_ACK;
+ } else {
+ request = GMU_OOB_GPU_SET_REQUEST_NEW;
+ ack = GMU_OOB_GPU_SET_ACK_NEW;
+ }
name = "GPU_SET";
break;
case GMU_OOB_BOOT_SLUMBER:
@@ -272,6 +296,13 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
/* Clear a pending OOB state in the GMU */
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
+ if (!gmu->legacy) {
+ WARN_ON(state != GMU_OOB_GPU_SET);
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_GPU_SET_CLEAR_NEW);
+ return;
+ }
+
switch (state) {
case GMU_OOB_GPU_SET:
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
@@ -294,6 +325,9 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
int ret;
u32 val;
+ if (!gmu->legacy)
+ return 0;
+
gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
@@ -313,6 +347,9 @@ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
u32 val;
int ret;
+ if (!gmu->legacy)
+ return;
+
/* Make sure retention is on */
gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
@@ -356,6 +393,11 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
a6xx_sptprac_disable(gmu);
+ if (!gmu->legacy) {
+ ret = a6xx_hfi_send_prep_slumber(gmu);
+ goto out;
+ }
+
/* Tell the GMU to get ready to slumber */
gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
@@ -371,6 +413,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
}
}
+out:
/* Put fence into allow mode */
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
return ret;
@@ -392,7 +435,7 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
return ret;
}
- ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
+ ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
!val, 100, 10000);
if (ret) {
@@ -418,7 +461,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
- ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+ ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
val, val & (1 << 16), 100, 10000);
if (ret)
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
@@ -441,32 +484,48 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
+ uint32_t pdc_address_offset;
if (!pdcptr || !seqptr)
goto err;
+ if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu))
+ pdc_address_offset = 0x30090;
+ else if (adreno_is_a650(adreno_gpu))
+ pdc_address_offset = 0x300a0;
+ else
+ pdc_address_offset = 0x30080;
+
/* Disable SDE clock gating */
- gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
+ gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
/* Setup RSC PDC handshake for sleep and wakeup */
- gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
- gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
- gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
- gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
- gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
/* Load RSC sequencer uCode for sleep and wakeup */
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
+ if (adreno_is_a650(adreno_gpu)) {
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
+ } else {
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
+ }
/* Load PDC sequencer uCode for power up and power down sequence */
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
@@ -487,10 +546,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
- if (adreno_is_a618(adreno_gpu))
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090);
- else
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
@@ -502,17 +558,12 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
- if (adreno_is_a618(adreno_gpu))
+ if (adreno_is_a618(adreno_gpu) || adreno_is_a650(adreno_gpu))
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
else
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
-
-
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
- if (adreno_is_a618(adreno_gpu))
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090);
- else
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
@@ -542,6 +593,8 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
{
/* Disable GMU WB/RB buffer */
gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
+ gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
+ gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
@@ -571,14 +624,95 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
}
+struct block_header {
+ u32 addr;
+ u32 size;
+ u32 type;
+ u32 value;
+ u32 data[];
+};
+
+/* this should be a general kernel helper */
+static int in_range(u32 addr, u32 start, u32 size)
+{
+ return addr >= start && addr < start + size;
+}
+
+static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
+{
+ if (!in_range(blk->addr, bo->iova, bo->size))
+ return false;
+
+ memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
+ return true;
+}
+
+static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
+ const struct block_header *blk;
+ u32 reg_offset;
+
+ u32 itcm_base = 0x00000000;
+ u32 dtcm_base = 0x00040000;
+
+ if (adreno_is_a650(adreno_gpu))
+ dtcm_base = 0x10004000;
+
+ if (gmu->legacy) {
+ /* Sanity check the size of the firmware that was loaded */
+ if (fw_image->size > 0x8000) {
+ DRM_DEV_ERROR(gmu->dev,
+ "GMU firmware is bigger than the available region\n");
+ return -EINVAL;
+ }
+
+ gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
+ (u32*) fw_image->data, fw_image->size);
+ return 0;
+ }
+
+
+ for (blk = (const struct block_header *) fw_image->data;
+ (const u8*) blk < fw_image->data + fw_image->size;
+ blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
+ if (blk->size == 0)
+ continue;
+
+ if (in_range(blk->addr, itcm_base, SZ_16K)) {
+ reg_offset = (blk->addr - itcm_base) >> 2;
+ gmu_write_bulk(gmu,
+ REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
+ blk->data, blk->size);
+ } else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
+ reg_offset = (blk->addr - dtcm_base) >> 2;
+ gmu_write_bulk(gmu,
+ REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
+ blk->data, blk->size);
+ } else if (!fw_block_mem(&gmu->icache, blk) &&
+ !fw_block_mem(&gmu->dcache, blk) &&
+ !fw_block_mem(&gmu->dummy, blk)) {
+ DRM_DEV_ERROR(gmu->dev,
+ "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
+ blk->addr, blk->size, blk->data[0]);
+ }
+ }
+
+ return 0;
+}
+
static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
{
static bool rpmh_init;
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
- int i, ret;
+ int ret;
u32 chipid;
- u32 *image;
+
+ if (adreno_is_a650(adreno_gpu))
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
if (state == GMU_WARM_BOOT) {
ret = a6xx_rpmh_start(gmu);
@@ -589,13 +723,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
"GMU firmware is not loaded\n"))
return -ENOENT;
- /* Sanity check the size of the firmware that was loaded */
- if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
- DRM_DEV_ERROR(gmu->dev,
- "GMU firmware is bigger than the available region\n");
- return -EINVAL;
- }
-
/* Turn on register retention */
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
@@ -609,18 +736,16 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
return ret;
}
- image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
-
- for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
- gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
- image[i]);
+ ret = a6xx_gmu_fw_load(gmu);
+ if (ret)
+ return ret;
}
gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
/* Write the iova of the HFI table */
- gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
@@ -633,6 +758,9 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
+ gmu->log.iova | (gmu->log.size / SZ_4K - 1));
+
/* Set up the lowest idle level on the GMU */
a6xx_gmu_power_config(gmu);
@@ -640,9 +768,11 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
if (ret)
return ret;
- ret = a6xx_gmu_gfx_rail_on(gmu);
- if (ret)
- return ret;
+ if (gmu->legacy) {
+ ret = a6xx_gmu_gfx_rail_on(gmu);
+ if (ret)
+ return ret;
+ }
/* Enable SPTP_PC if the CPU is responsible for it */
if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
@@ -683,13 +813,13 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
u32 val;
/* Make sure there are no outstanding RPMh votes */
- gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
(val & 1), 100, 10000);
- gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
(val & 1), 100, 10000);
- gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
(val & 1), 100, 10000);
- gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
(val & 1), 100, 1000);
}
@@ -744,6 +874,13 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
GMU_WARM_BOOT : GMU_COLD_BOOT;
+ /*
+ * Warm boot path does not work on newer GPUs
+ * Presumably this is because icache/dcache regions must be restored
+ */
+ if (!gmu->legacy)
+ status = GMU_COLD_BOOT;
+
ret = a6xx_gmu_fw_start(gmu, status);
if (ret)
goto out;
@@ -761,7 +898,10 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
enable_irq(gmu->hfi_irq);
/* Set the GPU to the current freq */
- __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
+ if (gmu->legacy)
+ __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
+ else
+ a6xx_hfi_set_freq(gmu, gmu->current_perf_index);
/*
* "enable" the GX power domain which won't actually do anything but it
@@ -919,34 +1059,75 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
return 0;
}
-static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
+static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
{
- if (IS_ERR_OR_NULL(bo))
- return;
-
- dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
- kfree(bo);
+ msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false);
+
+ gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
+ msm_gem_address_space_put(gmu->aspace);
}
-static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
- size_t size)
+static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
+ size_t size, u64 iova)
{
- struct a6xx_gmu_bo *bo;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct drm_device *dev = a6xx_gpu->base.base.dev;
+ uint32_t flags = MSM_BO_WC;
+ u64 range_start, range_end;
+ int ret;
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
+ size = PAGE_ALIGN(size);
+ if (!iova) {
+ /* no fixed address - use GMU's uncached range */
+ range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
+ range_end = 0x80000000;
+ } else {
+ /* range for fixed address */
+ range_start = iova;
+ range_end = iova + size;
+ /* use IOMMU_PRIV for icache/dcache */
+ flags |= MSM_BO_MAP_PRIV;
+ }
- bo->size = PAGE_ALIGN(size);
+ bo->obj = msm_gem_new(dev, size, flags);
+ if (IS_ERR(bo->obj))
+ return PTR_ERR(bo->obj);
- bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
+ ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
+ range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT);
+ if (ret) {
+ drm_gem_object_put(bo->obj);
+ return ret;
+ }
+
+ bo->virt = msm_gem_get_vaddr(bo->obj);
+ bo->size = size;
+
+ return 0;
+}
+
+static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+{
+ struct iommu_domain *domain;
+ struct msm_mmu *mmu;
- if (!bo->virt) {
- kfree(bo);
- return ERR_PTR(-ENOMEM);
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return -ENODEV;
+
+ mmu = msm_iommu_new(gmu->dev, domain);
+ gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
+ if (IS_ERR(gmu->aspace)) {
+ iommu_domain_free(domain);
+ return PTR_ERR(gmu->aspace);
}
- return bo;
+ return 0;
}
/* Return the 'arc-level' for the given frequency */
@@ -1011,8 +1192,8 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
if (j == pri_count) {
DRM_DEV_ERROR(dev,
- "Level %u not found in in the RPMh list\n",
- level);
+ "Level %u not found in the RPMh list\n",
+ level);
DRM_DEV_ERROR(dev, "Available levels:\n");
for (j = 0; j < pri_count; j++)
DRM_DEV_ERROR(dev, " %u\n", pri[j]);
@@ -1190,6 +1371,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
{
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct platform_device *pdev = to_platform_device(gmu->dev);
if (!gmu->initialized)
return;
@@ -1202,9 +1384,12 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
}
iounmap(gmu->mmio);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
+ iounmap(gmu->rscc);
gmu->mmio = NULL;
+ gmu->rscc = NULL;
- a6xx_gmu_memory_free(gmu, gmu->hfi);
+ a6xx_gmu_memory_free(gmu);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
@@ -1217,6 +1402,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct platform_device *pdev = of_find_device_by_node(node);
int ret;
@@ -1226,15 +1412,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- /* Pass force_dma false to require the DT to set the dma region */
- ret = of_dma_configure(gmu->dev, node, false);
- if (ret)
- return ret;
-
- /* Set the mask after the of_dma_configure() */
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
- if (ret)
- return ret;
+ of_dma_configure(gmu->dev, node, true);
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1246,20 +1424,64 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
+ ret = a6xx_gmu_memory_probe(gmu);
+ if (ret)
+ goto err_put_device;
+
+ /* Allocate memory for the GMU dummy page */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, SZ_4K, 0x60000000);
+ if (ret)
+ goto err_memory;
+
+ if (adreno_is_a650(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
+ SZ_16M - SZ_16K, 0x04000);
+ if (ret)
+ goto err_memory;
+ } else if (adreno_is_a640(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
+ SZ_256K - SZ_16K, 0x04000);
+ if (ret)
+ goto err_memory;
+
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
+ SZ_256K - SZ_16K, 0x44000);
+ if (ret)
+ goto err_memory;
+ } else {
+ /* HFI v1, has sptprac */
+ gmu->legacy = true;
+
+ /* Allocate memory for the GMU debug region */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
+ if (ret)
+ goto err_memory;
+ }
+
/* Allocate memory for for the HFI queues */
- gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
- if (IS_ERR(gmu->hfi))
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
+ if (ret)
goto err_memory;
- /* Allocate memory for the GMU debug region */
- gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
- if (IS_ERR(gmu->debug))
+ /* Allocate memory for the GMU log region */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0);
+ if (ret)
goto err_memory;
/* Map the GMU registers */
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
- if (IS_ERR(gmu->mmio))
+ if (IS_ERR(gmu->mmio)) {
+ ret = PTR_ERR(gmu->mmio);
goto err_memory;
+ }
+
+ if (adreno_is_a650(adreno_gpu)) {
+ gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
+ if (IS_ERR(gmu->rscc))
+ goto err_mmio;
+ } else {
+ gmu->rscc = gmu->mmio + 0x23000;
+ }
/* Get the HFI and GMU interrupts */
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
@@ -1286,13 +1508,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
err_mmio:
iounmap(gmu->mmio);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
+ iounmap(gmu->rscc);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
-err_memory:
- a6xx_gmu_memory_free(gmu, gmu->hfi);
ret = -ENODEV;
+err_memory:
+ a6xx_gmu_memory_free(gmu);
err_put_device:
/* Drop reference taken in of_find_device_by_node */
put_device(gmu->dev);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 4af65a36d5ca..47df4745db50 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -10,9 +10,10 @@
#include "a6xx_hfi.h"
struct a6xx_gmu_bo {
+ struct drm_gem_object *obj;
void *virt;
size_t size;
- dma_addr_t iova;
+ u64 iova;
};
/*
@@ -43,7 +44,10 @@ struct a6xx_gmu_bo {
struct a6xx_gmu {
struct device *dev;
+ struct msm_gem_address_space *aspace;
+
void * __iomem mmio;
+ void * __iomem rscc;
int hfi_irq;
int gmu_irq;
@@ -52,8 +56,12 @@ struct a6xx_gmu {
int idle_level;
- struct a6xx_gmu_bo *hfi;
- struct a6xx_gmu_bo *debug;
+ struct a6xx_gmu_bo hfi;
+ struct a6xx_gmu_bo debug;
+ struct a6xx_gmu_bo icache;
+ struct a6xx_gmu_bo dcache;
+ struct a6xx_gmu_bo dummy;
+ struct a6xx_gmu_bo log;
int nr_clocks;
struct clk_bulk_data *clocks;
@@ -76,6 +84,7 @@ struct a6xx_gmu {
bool initialized;
bool hung;
+ bool legacy; /* a618 or a630 */
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
@@ -88,6 +97,13 @@ static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
return msm_writel(value, gmu->mmio + (offset << 2));
}
+static inline void
+gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size)
+{
+ memcpy_toio(gmu->mmio + (offset << 2), data, size);
+ wmb();
+}
+
static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
{
u32 val = gmu_read(gmu, reg);
@@ -111,6 +127,15 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
+static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ return msm_writel(value, gmu->rscc + (offset << 2));
+}
+
+#define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gmu)->rscc + ((addr) << 2), val, cond, \
+ interval, timeout)
+
/*
* These are the available OOB (out of band requests) to the GMU where "out of
* band" means that the CPU talks to the GMU directly and not through HFI.
@@ -156,10 +181,16 @@ enum a6xx_gmu_oob_state {
#define GMU_OOB_GPU_SET_ACK 24
#define GMU_OOB_GPU_SET_CLEAR 24
+#define GMU_OOB_GPU_SET_REQUEST_NEW 30
+#define GMU_OOB_GPU_SET_ACK_NEW 31
+#define GMU_OOB_GPU_SET_CLEAR_NEW 31
+
void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
+int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index 1cc1c135236b..176ae94d9fe6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -101,6 +101,10 @@ static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val)
#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff
+#define REG_A6XX_GMU_ICACHE_CONFIG 0x00004c00
+
+#define REG_A6XX_GMU_DCACHE_CONFIG 0x00004c01
+
#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f
#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000
@@ -199,6 +203,12 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec
+#define REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF 0x000050f0
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG 0x00005100
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP 0x00005101
+
#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0
#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157
@@ -330,8 +340,6 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
-#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00008c04
-
#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307
#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308
@@ -344,39 +352,41 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
-#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00008c08
+#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00000004
+
+#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00000008
-#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00008c09
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00000009
-#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x00008c0a
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x0000000a
-#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x00008c0b
+#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x0000000b
-#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x00008c0d
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x0000000d
-#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x00008c0e
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x0000000e
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00008c82
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00000082
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00008c83
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00000083
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00008c89
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00000089
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x00008c8c
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x0000008c
-#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00008d00
+#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00000100
-#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00008d01
+#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101
-#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00008d80
+#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180
-#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00008f46
+#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346
-#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000090ae
+#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000003ee
-#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00009216
+#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00000496
-#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000937e
+#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000053e
#endif /* A6XX_GMU_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 68af24150de5..a1589e040c57 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -414,7 +414,17 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
a6xx_set_hwcg(gpu, true);
/* VBIF/GBIF start*/
- gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+ if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
+ } else {
+ gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+ }
+
if (adreno_is_a630(adreno_gpu))
gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
@@ -429,25 +439,35 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
- /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
- gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
- REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
+ if (!adreno_is_a650(adreno_gpu)) {
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
- gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
- REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
- 0x00100000 + adreno_gpu->gmem - 1);
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
+ 0x00100000 + adreno_gpu->gmem - 1);
+ }
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
- gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
+ if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
+ else
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
/* Setting the mem pool size */
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
/* Setting the primFifo thresholds default values */
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
+ if (adreno_is_a650(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300000);
+ else if (adreno_is_a640(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200000);
+ else
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
/* Set the AHB default slave response to "ERROR" */
gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
@@ -471,6 +491,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
+ /* Set weights for bicubic filtering */
+ if (adreno_is_a650(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
+ 0x3fe05ff4);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
+ 0x3fa0ebee);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
+ 0x3f5193ed);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
+ 0x3f0243f0);
+ }
+
/* Protect registers from the CP */
gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
@@ -508,6 +541,11 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
A6XX_PROTECT_RDONLY(0x980, 0x4));
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
+ if (adreno_is_a650(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
+ (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
+ }
+
/* Enable interrupts */
gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
@@ -566,8 +604,10 @@ out:
*/
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
- /* Take the GMU out of its special boot mode */
- a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
+ if (a6xx_gpu->gmu.legacy) {
+ /* Take the GMU out of its special boot mode */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
+ }
return ret;
}
@@ -810,6 +850,11 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
u64 busy_cycles, busy_time;
+
+ /* Only read the gpu busy if the hardware is already active */
+ if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0)
+ return 0;
+
busy_cycles = gmu_read64(&a6xx_gpu->gmu,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
@@ -819,6 +864,8 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
gpu->devfreq.busy_cycles = busy_cycles;
+ pm_runtime_put(a6xx_gpu->gmu.dev);
+
if (WARN_ON(busy_time > ~0LU))
return ~0LU;
@@ -846,6 +893,7 @@ static const struct adreno_gpu_funcs funcs = {
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
#endif
},
.get_timestamp = a6xx_get_timestamp,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index e450e0b97211..9921e632f1ca 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -17,10 +17,14 @@ static const char * const a6xx_hfi_msg_id[] = {
HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_TEST),
+ HFI_MSG_ID(HFI_H2F_MSG_START),
+ HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
+ HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
+ HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
};
-static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
- u32 dwords)
+static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
{
struct a6xx_hfi_queue_header *header = queue->header;
u32 i, hdr, index = header->read_index;
@@ -48,6 +52,9 @@ static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
index = (index + 1) % header->size;
}
+ if (!gmu->legacy)
+ index = ALIGN(index, 4) % header->size;
+
header->read_index = index;
return HFI_HEADER_SIZE(hdr);
}
@@ -73,6 +80,12 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
index = (index + 1) % header->size;
}
+ /* Cookify any non used data at the end of the write buffer */
+ if (!gmu->legacy) {
+ for (; index % 4; index = (index + 1) % header->size)
+ queue->data[index] = 0xfafafafa;
+ }
+
header->write_index = index;
spin_unlock(&queue->lock);
@@ -106,7 +119,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
struct a6xx_hfi_msg_response resp;
/* Get the next packet */
- ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
+ ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
sizeof(resp) >> 2);
/* If the queue is empty our response never made it */
@@ -176,8 +189,8 @@ static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
{
struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
- msg.dbg_buffer_addr = (u32) gmu->debug->iova;
- msg.dbg_buffer_size = (u32) gmu->debug->size;
+ msg.dbg_buffer_addr = (u32) gmu->debug.iova;
+ msg.dbg_buffer_size = (u32) gmu->debug.size;
msg.boot_state = boot_state;
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
@@ -195,6 +208,28 @@ static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
version, sizeof(*version));
}
+static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
+ int i;
+
+ msg.num_gpu_levels = gmu->nr_gpu_freqs;
+ msg.num_gmu_levels = gmu->nr_gmu_freqs;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
+ }
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
+ msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
+ }
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_perf_table msg = { 0 };
@@ -205,6 +240,7 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
for (i = 0; i < gmu->nr_gpu_freqs; i++) {
msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].acd = 0xffffffff;
msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
}
@@ -306,7 +342,45 @@ static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
NULL, 0);
}
-int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
+static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_start msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_core_fw_start msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
+{
+ struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
+
+ msg.ack_type = 1; /* blocking */
+ msg.freq = index;
+ msg.bw = 0; /* TODO: bus scaling */
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
+
+ /* TODO: should freq and bw fields be non-zero ? */
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
{
int ret;
@@ -324,7 +398,7 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
* the GMU firmware
*/
- ret = a6xx_hfi_send_perf_table(gmu);
+ ret = a6xx_hfi_send_perf_table_v1(gmu);
if (ret)
return ret;
@@ -341,6 +415,37 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
return 0;
}
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
+{
+ int ret;
+
+ if (gmu->legacy)
+ return a6xx_hfi_start_v1(gmu, boot_state);
+
+
+ ret = a6xx_hfi_send_perf_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_bw_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_core_fw_start(gmu);
+ if (ret)
+ return ret;
+
+ /*
+ * Downstream driver sends this in its "a6xx_hw_init" equivalent,
+ * but seems to be no harm in sending it here
+ */
+ ret = a6xx_hfi_send_start(gmu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
void a6xx_hfi_stop(struct a6xx_gmu *gmu)
{
int i;
@@ -385,7 +490,7 @@ static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
void a6xx_hfi_init(struct a6xx_gmu *gmu)
{
- struct a6xx_gmu_bo *hfi = gmu->hfi;
+ struct a6xx_gmu_bo *hfi = &gmu->hfi;
struct a6xx_hfi_queue_table_header *table = hfi->virt;
struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
u64 offset;
@@ -415,5 +520,5 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu)
/* GMU response queue */
offset += SZ_4K;
a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
- hfi->iova + offset, 4);
+ hfi->iova + offset, gmu->legacy ? 4 : 1);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 60d1319fa44f..2bd670ca42d6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -51,7 +51,8 @@ struct a6xx_hfi_queue {
/* HFI message types */
#define HFI_MSG_CMD 0
-#define HFI_MSG_ACK 2
+#define HFI_MSG_ACK 1
+#define HFI_MSG_ACK_V1 2
#define HFI_F2H_MSG_ACK 126
@@ -94,7 +95,13 @@ struct perf_level {
u32 freq;
};
-struct a6xx_hfi_msg_perf_table {
+struct perf_gx_level {
+ u32 vote;
+ u32 acd;
+ u32 freq;
+};
+
+struct a6xx_hfi_msg_perf_table_v1 {
u32 header;
u32 num_gpu_levels;
u32 num_gmu_levels;
@@ -103,6 +110,15 @@ struct a6xx_hfi_msg_perf_table {
struct perf_level cx_votes[4];
};
+struct a6xx_hfi_msg_perf_table {
+ u32 header;
+ u32 num_gpu_levels;
+ u32 num_gmu_levels;
+
+ struct perf_gx_level gx_votes[16];
+ struct perf_level cx_votes[4];
+};
+
#define HFI_H2F_MSG_BW_TABLE 3
struct a6xx_hfi_msg_bw_table {
@@ -124,4 +140,34 @@ struct a6xx_hfi_msg_test {
u32 header;
};
+#define HFI_H2F_MSG_START 10
+
+struct a6xx_hfi_msg_start {
+ u32 header;
+};
+
+#define HFI_H2F_MSG_CORE_FW_START 14
+
+struct a6xx_hfi_msg_core_fw_start {
+ u32 header;
+ u32 handle;
+};
+
+#define HFI_H2F_MSG_GX_BW_PERF_VOTE 30
+
+struct a6xx_hfi_gx_bw_perf_vote_cmd {
+ u32 header;
+ u32 ack_type;
+ u32 freq;
+ u32 bw;
+};
+
+#define HFI_H2F_MSG_PREPARE_SLUMBER 33
+
+struct a6xx_hfi_prep_slumber_cmd {
+ u32 header;
+ u32 bw;
+ u32 freq;
+};
+
#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index cb3a6e597d76..7732f03d9e3a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -93,6 +93,17 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
+ .rev = ADRENO_REV(4, 0, 5, ANY_ID),
+ .revn = 405,
+ .name = "A405",
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a4xx_gpu_init,
+ }, {
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
.revn = 420,
.name = "A420",
@@ -189,6 +200,30 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a630_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(6, 4, 0, ANY_ID),
+ .revn = 640,
+ .name = "A640",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a640_gmu.bin",
+ },
+ .gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a640_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(6, 5, 0, ANY_ID),
+ .revn = 650,
+ .name = "A650",
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a650_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a650_zap.mdt",
},
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 1d5c43c22269..89673c7ed473 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -185,6 +185,23 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
+struct msm_gem_address_space *
+adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev)
+{
+ struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
+ struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
+ struct msm_gem_address_space *aspace;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
+ 0xfffffff);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -197,7 +214,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->gmem;
return 0;
case MSM_PARAM_GMEM_BASE:
- *value = 0x100000;
+ *value = !adreno_is_a650(adreno_gpu) ? 0x100000 : 0;
return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->rev.patchid |
@@ -459,7 +476,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
break;
/* fall-thru */
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
+ OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
@@ -988,12 +1005,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
- adreno_gpu_config.va_start = SZ_16M;
- adreno_gpu_config.va_end = 0xffffffff;
- /* maximum range of a2xx mmu */
- if (adreno_is_a2xx(adreno_gpu))
- adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
-
adreno_gpu_config.nr_rings = nr_rings;
adreno_get_pwrlevels(&pdev->dev, gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 9ff4e550e7bd..2f5d2c3acc3a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -202,6 +202,11 @@ static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
return (gpu->revn >= 400) && (gpu->revn < 500);
}
+static inline int adreno_is_a405(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 405;
+}
+
static inline int adreno_is_a420(struct adreno_gpu *gpu)
{
return gpu->revn == 420;
@@ -237,6 +242,16 @@ static inline int adreno_is_a630(struct adreno_gpu *gpu)
return gpu->revn == 630;
}
+static inline int adreno_is_a640(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 640;
+}
+
+static inline int adreno_is_a650(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 650;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
@@ -273,6 +288,14 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
int adreno_gpu_state_put(struct msm_gpu_state *state);
/*
+ * Common helper function to initialize the default address space for arm-smmu
+ * attached targets
+ */
+struct msm_gem_address_space *
+adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev);
+
+/*
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU
* out of secure mode
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 11f2bebe3869..7c230f719ad3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -36,22 +36,6 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
return to_dpu_kms(priv->kms);
}
-static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
-{
- struct drm_crtc *tmp_crtc;
-
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
- tmp_crtc->enabled) {
- DPU_DEBUG("video interface connected crtc:%d\n",
- tmp_crtc->base.id);
- return true;
- }
- }
-
- return false;
-}
-
static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
struct drm_crtc *crtc,
struct drm_crtc_state *state,
@@ -94,7 +78,6 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
u32 bw, threshold;
u64 bw_sum_of_intfs = 0;
enum dpu_crtc_client_type curr_client_type;
- bool is_video_mode;
struct dpu_crtc_state *dpu_cstate;
struct drm_crtc *tmp_crtc;
struct dpu_kms *kms;
@@ -144,11 +127,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
DPU_DEBUG("calculated bandwidth=%uk\n", bw);
- is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
- threshold = (is_video_mode ||
- _dpu_core_video_mode_intf_connected(crtc)) ?
- kms->catalog->perf.max_bw_low :
- kms->catalog->perf.max_bw_high;
+ threshold = kms->catalog->perf.max_bw_high;
DPU_DEBUG("final threshold bw limit = %d\n", threshold);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 17448505a9b5..e15b42a780e0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -9,6 +9,7 @@
#include <linux/sort.h>
#include <linux/debugfs.h>
#include <linux/ktime.h>
+#include <linux/bits.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
@@ -20,6 +21,7 @@
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
+#include "dpu_hw_dspp.h"
#include "dpu_crtc.h"
#include "dpu_plane.h"
#include "dpu_encoder.h"
@@ -40,6 +42,9 @@
/* timeout in ms waiting for frame done */
#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
+#define CONVERT_S3_15(val) \
+ (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
+
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@@ -88,11 +93,9 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *crtc_state;
int lm_idx, lm_horiz_position;
- dpu_crtc = to_dpu_crtc(crtc);
crtc_state = to_dpu_crtc_state(crtc->state);
lm_horiz_position = 0;
@@ -422,6 +425,74 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
drm_mode_debug_printmodeline(adj_mode);
}
+static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
+ struct dpu_hw_pcc_cfg *cfg)
+{
+ struct drm_color_ctm *ctm;
+
+ memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
+
+ ctm = (struct drm_color_ctm *)state->ctm->data;
+
+ if (!ctm)
+ return;
+
+ cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
+ cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
+ cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
+
+ cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
+ cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
+ cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
+
+ cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
+ cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
+ cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
+}
+
+static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *state = crtc->state;
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_crtc_mixer *mixer = cstate->mixers;
+ struct dpu_hw_pcc_cfg cfg;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+ struct dpu_hw_dspp *dspp;
+ int i;
+
+
+ if (!state->color_mgmt_changed)
+ return;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ ctl = mixer[i].lm_ctl;
+ lm = mixer[i].hw_lm;
+ dspp = mixer[i].hw_dspp;
+
+ if (!dspp || !dspp->ops.setup_pcc)
+ continue;
+
+ if (!state->ctm) {
+ dspp->ops.setup_pcc(dspp, NULL);
+ } else {
+ _dpu_crtc_get_pcc_coeff(state, &cfg);
+ dspp->ops.setup_pcc(dspp, &cfg);
+ }
+
+ mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
+ mixer[i].hw_dspp->idx);
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+ DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n",
+ mixer[i].hw_lm->idx - DSPP_0,
+ ctl->idx - CTL_0,
+ mixer[i].flush_mask);
+ }
+}
+
static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -430,7 +501,6 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_encoder *encoder;
struct drm_device *dev;
unsigned long flags;
- struct dpu_crtc_smmu_state_data *smmu_state;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
@@ -448,7 +518,6 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(crtc->state);
dev = crtc->dev;
- smmu_state = &dpu_crtc->smmu_state;
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
@@ -475,6 +544,8 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
_dpu_crtc_blend_setup(crtc);
+ _dpu_crtc_setup_cp_blocks(crtc);
+
/*
* PP_DONE irq is only used by command mode for now.
* It is better to request pending before FLUSH and START trigger
@@ -491,7 +562,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_device *dev;
struct drm_plane *plane;
struct msm_drm_private *priv;
- struct msm_drm_thread *event_thread;
unsigned long flags;
struct dpu_crtc_state *cstate;
@@ -513,8 +583,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
return;
}
- event_thread = &priv->event_thread[crtc->index];
-
if (dpu_crtc->event) {
DPU_DEBUG("already received dpu_crtc->event\n");
} else {
@@ -567,7 +635,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
if (!crtc || !state) {
@@ -575,7 +642,6 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
return;
}
- dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(state);
DPU_DEBUG("crtc%d\n", crtc->base.id);
@@ -662,11 +728,9 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
/**
* dpu_crtc_duplicate_state - state duplicate hook
* @crtc: Pointer to drm crtc structure
- * @Returns: Pointer to new drm_crtc_state structure
*/
static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate, *old_cstate;
if (!crtc || !crtc->state) {
@@ -674,7 +738,6 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
return NULL;
}
- dpu_crtc = to_dpu_crtc(crtc);
old_cstate = to_dpu_crtc_state(crtc->state);
cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
if (!cstate) {
@@ -693,9 +756,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
- struct drm_display_mode *mode;
struct drm_encoder *encoder;
- struct msm_drm_private *priv;
unsigned long flags;
bool release_bandwidth = false;
@@ -705,8 +766,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
}
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(crtc->state);
- mode = &cstate->base.adjusted_mode;
- priv = crtc->dev->dev_private;
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
@@ -768,14 +827,12 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
{
struct dpu_crtc *dpu_crtc;
struct drm_encoder *encoder;
- struct msm_drm_private *priv;
bool request_bandwidth;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return;
}
- priv = crtc->dev->dev_private;
pm_runtime_get_sync(crtc->dev->dev);
@@ -1319,6 +1376,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+ drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
+
/* save user friendly CRTC name for later */
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 5174e86124cc..cec3474340e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -73,12 +73,14 @@ struct dpu_crtc_smmu_state_data {
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
* @lm_ctl: CTL Path HW driver context
+ * @lm_dspp: DSPP HW driver context
* @mixer_op_mode: mixer blending operation mode
* @flush_mask: mixer flush mask for ctl, mixer and pipe
*/
struct dpu_crtc_mixer {
struct dpu_hw_mixer *hw_lm;
struct dpu_hw_ctl *lm_ctl;
+ struct dpu_hw_dspp *hw_dspp;
u32 mixer_op_mode;
u32 flush_mask;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index a1b79ee2bd9d..63976dcd2ac8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -20,6 +20,7 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
#include "dpu_hw_ctl.h"
+#include "dpu_hw_dspp.h"
#include "dpu_formats.h"
#include "dpu_encoder_phys.h"
#include "dpu_crtc.h"
@@ -536,6 +537,7 @@ static struct msm_display_topology dpu_encoder_get_topology(
* 1 LM, 1 INTF
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
*
+ * Adding color blocks only to primary interface
*/
if (intf_count == 2)
topology.num_lm = 2;
@@ -544,6 +546,9 @@ static struct msm_display_topology dpu_encoder_get_topology(
else
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI)
+ topology.num_dspp = topology.num_lm;
+
topology.num_enc = 0;
topology.num_intf = intf_count;
@@ -959,7 +964,8 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
- int num_lm, num_ctl, num_pp;
+ struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
+ int num_lm, num_ctl, num_pp, num_dspp;
int i, j;
if (!drm_enc) {
@@ -1008,6 +1014,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+ num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
+ ARRAY_SIZE(hw_dspp));
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
@@ -1020,6 +1029,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
+ cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
}
cstate->num_mixers = num_lm;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index c567917541e8..29d4fde3172b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -41,6 +41,8 @@
#define PINGPONG_SDM845_SPLIT_MASK \
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
+
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
@@ -291,29 +293,30 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
-#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair) \
+#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair, _dspp) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x320, \
.features = _fmask, \
.sblk = _sblk, \
.pingpong = _pp, \
- .lm_pair_mask = (1 << _lmpair) \
+ .lm_pair_mask = (1 << _lmpair), \
+ .dspp = _dspp \
}
static const struct dpu_lm_cfg sdm845_lm[] = {
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_0, LM_1),
+ &sdm845_lm_sblk, PINGPONG_0, LM_1, 0),
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_1, LM_0),
+ &sdm845_lm_sblk, PINGPONG_1, LM_0, 0),
LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_2, LM_5),
+ &sdm845_lm_sblk, PINGPONG_2, LM_5, 0),
LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_MAX, 0),
+ &sdm845_lm_sblk, PINGPONG_MAX, 0, 0),
LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_MAX, 0),
+ &sdm845_lm_sblk, PINGPONG_MAX, 0, 0),
LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_3, LM_2),
+ &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
};
/* SC7180 */
@@ -328,11 +331,30 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
static const struct dpu_lm_cfg sc7180_lm[] = {
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
- &sc7180_lm_sblk, PINGPONG_0, LM_1),
+ &sc7180_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK,
- &sc7180_lm_sblk, PINGPONG_1, LM_0),
+ &sc7180_lm_sblk, PINGPONG_1, LM_0, 0),
+};
+
+/*************************************************************
+ * DSPP sub blocks config
+ *************************************************************/
+static const struct dpu_dspp_sub_blks sc7180_dspp_sblk = {
+ .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
+ .len = 0x90, .version = 0x10000},
};
+#define DSPP_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1800, \
+ .features = DSPP_SC7180_MASK, \
+ .sblk = &sc7180_dspp_sblk \
+ }
+
+static const struct dpu_dspp_cfg sc7180_dspp[] = {
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000),
+};
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
@@ -515,8 +537,8 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
};
static const struct dpu_perf_cfg sc7180_perf_data = {
- .max_bw_low = 3900000,
- .max_bw_high = 5500000,
+ .max_bw_low = 6800000,
+ .max_bw_high = 6800000,
.min_core_ib = 2400000,
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
@@ -587,6 +609,8 @@ static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.sspp = sc7180_sspp,
.mixer_count = ARRAY_SIZE(sc7180_lm),
.mixer = sc7180_lm,
+ .dspp_count = ARRAY_SIZE(sc7180_dspp),
+ .dspp = sc7180_dspp,
.pingpong_count = ARRAY_SIZE(sc7180_pp),
.pingpong = sc7180_pp,
.intf_count = ARRAY_SIZE(sc7180_intf),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 09df7d87dd43..f7de43838c69 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -146,6 +146,17 @@ enum {
};
/**
+ * DSPP sub-blocks
+ * @DPU_DSPP_PCC Panel color correction block
+ * @DPU_DSPP_GC Gamma correction block
+ */
+enum {
+ DPU_DSPP_PCC = 0x1,
+ DPU_DSPP_GC,
+ DPU_DSPP_MAX
+};
+
+/**
* PINGPONG sub-blocks
* @DPU_PINGPONG_TE Tear check block
* @DPU_PINGPONG_TE2 Additional tear check block for split pipes
@@ -377,6 +388,16 @@ struct dpu_lm_sub_blks {
struct dpu_pp_blk gc;
};
+/**
+ * struct dpu_dspp_sub_blks: Information of DSPP block
+ * @gc : gamma correction block
+ * @pcc: pixel color correction block
+ */
+struct dpu_dspp_sub_blks {
+ struct dpu_pp_blk gc;
+ struct dpu_pp_blk pcc;
+};
+
struct dpu_pingpong_sub_blks {
struct dpu_pp_blk te;
struct dpu_pp_blk te2;
@@ -471,10 +492,24 @@ struct dpu_lm_cfg {
DPU_HW_BLK_INFO;
const struct dpu_lm_sub_blks *sblk;
u32 pingpong;
+ u32 dspp;
unsigned long lm_pair_mask;
};
/**
+ * struct dpu_dspp_cfg - information of DSPP blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * supported by this block
+ * @sblk sub-blocks information
+ */
+struct dpu_dspp_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_dspp_sub_blks *sblk;
+};
+
+/**
* struct dpu_pingpong_cfg - information of PING-PONG blocks
* @id enum identifying this block
* @base register offset of this block
@@ -688,6 +723,9 @@ struct dpu_mdss_cfg {
u32 ad_count;
+ u32 dspp_count;
+ const struct dpu_dspp_cfg *dspp;
+
/* Add additional block data structures here */
struct dpu_perf_cfg perf;
@@ -716,6 +754,7 @@ struct dpu_mdss_hw_cfg_handler {
#define BLK_PINGPONG(s) ((s)->pingpong)
#define BLK_INTF(s) ((s)->intf)
#define BLK_AD(s) ((s)->ad)
+#define BLK_DSPP(s) ((s)->dspp)
/**
* dpu_hw_catalog_init - dpu hardware catalog init API retrieves
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 831e5f7a9b7f..613ae8f0cfcd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -272,6 +272,31 @@ static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
return 0;
}
+static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp dspp)
+{
+ uint32_t flushbits = 0;
+
+ switch (dspp) {
+ case DSPP_0:
+ flushbits = BIT(13);
+ break;
+ case DSPP_1:
+ flushbits = BIT(14);
+ break;
+ case DSPP_2:
+ flushbits = BIT(15);
+ break;
+ case DSPP_3:
+ flushbits = BIT(21);
+ break;
+ default:
+ return 0;
+ }
+
+ return flushbits;
+}
+
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -548,6 +573,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
+ ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
};
static struct dpu_hw_blk_ops dpu_hw_ops;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 09e1263c72e2..ec579b470a80 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -139,6 +139,9 @@ struct dpu_hw_ctl_ops {
uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
enum dpu_lm blk);
+ uint32_t (*get_bitmask_dspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp blk);
+
/**
* Query the value of the intf flush mask
* No effect on hardware
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
new file mode 100644
index 000000000000..a7a24539921f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_dspp.h"
+#include "dpu_kms.h"
+
+
+/* DSPP_PCC */
+#define PCC_EN BIT(0)
+#define PCC_DIS 0
+#define PCC_RED_R_OFF 0x10
+#define PCC_RED_G_OFF 0x1C
+#define PCC_RED_B_OFF 0x28
+#define PCC_GREEN_R_OFF 0x14
+#define PCC_GREEN_G_OFF 0x20
+#define PCC_GREEN_B_OFF 0x2C
+#define PCC_BLUE_R_OFF 0x18
+#define PCC_BLUE_G_OFF 0x24
+#define PCC_BLUE_B_OFF 0x30
+
+static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
+ struct dpu_hw_pcc_cfg *cfg)
+{
+
+ u32 base = ctx->cap->sblk->pcc.base;
+
+ if (!ctx || !base) {
+ DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
+ return;
+ }
+
+ if (!cfg) {
+ DRM_DEBUG_DRIVER("disable pcc feature\n");
+ DPU_REG_WRITE(&ctx->hw, base, PCC_DIS);
+ return;
+ }
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_R_OFF, cfg->r.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_G_OFF, cfg->r.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_B_OFF, cfg->r.b);
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_R_OFF, cfg->g.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_G_OFF, cfg->g.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_B_OFF, cfg->g.b);
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_R_OFF, cfg->b.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_G_OFF, cfg->b.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_B_OFF, cfg->b.b);
+
+ DPU_REG_WRITE(&ctx->hw, base, PCC_EN);
+}
+
+static void _setup_dspp_ops(struct dpu_hw_dspp *c,
+ unsigned long features)
+{
+ if (test_bit(DPU_DSPP_PCC, &features) &&
+ IS_SC7180_TARGET(c->hw.hwversion))
+ c->ops.setup_pcc = dpu_setup_dspp_pcc;
+}
+
+static const struct dpu_dspp_cfg *_dspp_offset(enum dpu_dspp dspp,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if (!m || !addr || !b)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < m->dspp_count; i++) {
+ if (dspp == m->dspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->dspp[i].base;
+ b->length = m->dspp[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_DSPP;
+ return &m->dspp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops;
+
+struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_dspp *c;
+ const struct dpu_dspp_cfg *cfg;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _dspp_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_dspp_ops(c, c->cap->features);
+
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_DSPP, idx, &dpu_hw_ops);
+
+ return c;
+}
+
+void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp)
+{
+ if (dspp)
+ dpu_hw_blk_destroy(&dspp->base);
+
+ kfree(dspp);
+}
+
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
new file mode 100644
index 000000000000..7fa189cfcb06
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_DSPP_H
+#define _DPU_HW_DSPP_H
+
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_dspp;
+
+/**
+ * struct dpu_hw_pcc_coeff - PCC coefficient structure for each color
+ * component.
+ * @r: red coefficient.
+ * @g: green coefficient.
+ * @b: blue coefficient.
+ */
+
+struct dpu_hw_pcc_coeff {
+ __u32 r;
+ __u32 g;
+ __u32 b;
+};
+
+/**
+ * struct dpu_hw_pcc - pcc feature structure
+ * @r: red coefficients.
+ * @g: green coefficients.
+ * @b: blue coefficients.
+ */
+struct dpu_hw_pcc_cfg {
+ struct dpu_hw_pcc_coeff r;
+ struct dpu_hw_pcc_coeff g;
+ struct dpu_hw_pcc_coeff b;
+};
+
+/**
+ * struct dpu_hw_dspp_ops - interface to the dspp hardware driver functions
+ * Caller must call the init function to get the dspp context for each dspp
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_dspp_ops {
+ /**
+ * setup_pcc - setup dspp pcc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pcc)(struct dpu_hw_dspp *ctx, struct dpu_hw_pcc_cfg *cfg);
+
+};
+
+/**
+ * struct dpu_hw_dspp - dspp description
+ * @base: Hardware block base structure
+ * @hw: Block hardware details
+ * @idx: DSPP index
+ * @cap: Pointer to layer_cfg
+ * @ops: Pointer to operations possible for this DSPP
+ */
+struct dpu_hw_dspp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* dspp */
+ int idx;
+ const struct dpu_dspp_cfg *cap;
+
+ /* Ops */
+ struct dpu_hw_dspp_ops ops;
+};
+
+/**
+ * dpu_hw_dspp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_dspp, base);
+}
+
+/**
+ * dpu_hw_dspp_init - initializes the dspp hw driver object.
+ * should be called once before accessing every dspp.
+ * @idx: DSPP index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @Return: pointer to structure or ERR_PTR
+ */
+struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
+ void __iomem *addr, const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_dspp_destroy(): Destroys DSPP driver context
+ * @dspp: Pointer to DSPP driver context
+ */
+void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp);
+
+#endif /*_DPU_HW_DSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 686882132bf6..402dc5832361 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -95,6 +95,7 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_PINGPONG,
DPU_HW_BLK_INTF,
DPU_HW_BLK_WB,
+ DPU_HW_BLK_DSPP,
DPU_HW_BLK_MAX,
};
@@ -425,5 +426,6 @@ struct dpu_mdss_color {
#define DPU_DBG_MASK_TOP (1 << 7)
#define DPU_DBG_MASK_VBIF (1 << 8)
#define DPU_DBG_MASK_ROT (1 << 9)
+#define DPU_DBG_MASK_DSPP (1 << 10)
#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index ce19f1d39367..b8615d4fe8a3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -772,29 +772,21 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
struct iommu_domain *domain;
struct msm_gem_address_space *aspace;
- int ret;
+ struct msm_mmu *mmu;
domain = iommu_domain_alloc(&platform_bus_type);
if (!domain)
return 0;
- domain->geometry.aperture_start = 0x1000;
- domain->geometry.aperture_end = 0xffffffff;
+ mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
+ aspace = msm_gem_address_space_create(mmu, "dpu1",
+ 0x1000, 0xfffffff);
- aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
- domain, "dpu1");
if (IS_ERR(aspace)) {
- iommu_domain_free(domain);
+ mmu->funcs->destroy(mmu);
return PTR_ERR(aspace);
}
- ret = aspace->mmu->funcs->attach(aspace->mmu);
- if (ret) {
- DPU_ERROR("failed to attach iommu %d\n", ret);
- msm_gem_address_space_put(aspace);
- return ret;
- }
-
dpu_kms->base.aspace = aspace;
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 211f5de99a44..a3b122bfb676 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -158,6 +158,7 @@ struct dpu_global_state {
uint32_t mixer_to_enc_id[LM_MAX - LM_0];
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
uint32_t intf_to_enc_id[INTF_MAX - INTF_0];
+ uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
};
struct dpu_global_state
@@ -170,7 +171,7 @@ struct dpu_global_state
*
* Main debugfs documentation is located at,
*
- * Documentation/filesystems/debugfs.txt
+ * Documentation/filesystems/debugfs.rst
*
* @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
* @dpu_debugfs_create_regset32: Create 32-bit register dump file
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 9b62451b01ee..9b2b5044e8e0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -9,6 +9,7 @@
#include "dpu_hw_ctl.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_intf.h"
+#include "dpu_hw_dspp.h"
#include "dpu_encoder.h"
#include "dpu_trace.h"
@@ -174,6 +175,23 @@ int dpu_rm_init(struct dpu_rm *rm,
rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
}
+ for (i = 0; i < cat->dspp_count; i++) {
+ struct dpu_hw_dspp *hw;
+ const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
+
+ if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) {
+ DPU_ERROR("skip dspp %d with invalid id\n", dspp->id);
+ continue;
+ }
+ hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed dspp object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
+ }
+
return 0;
fail:
@@ -222,12 +240,17 @@ static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
* if lm, and all other hardwired blocks connected to the lm (pp) is
* available and appropriate
* @pp_idx: output parameter, index of pingpong block attached to the layer
- * mixer in rm->pongpong_blks[].
+ * mixer in rm->pingpong_blks[].
+ * @dspp_idx: output parameter, index of dspp block attached to the layer
+ * mixer in rm->dspp_blks[].
+ * @reqs: input parameter, rm requirements for HW blocks needed in the
+ * datapath.
* @Return: true if lm matches all requirements, false otherwise
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id, int lm_idx, int *pp_idx)
+ uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
+ struct dpu_rm_requirements *reqs)
{
const struct dpu_lm_cfg *lm_cfg;
int idx;
@@ -251,6 +274,23 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
return false;
}
*pp_idx = idx;
+
+ if (!reqs->topology.num_dspp)
+ return true;
+
+ idx = lm_cfg->dspp - DSPP_0;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
+ DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
+ return false;
+ }
+
+ if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
+ DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
+ lm_cfg->dspp);
+ return false;
+ }
+ *dspp_idx = idx;
+
return true;
}
@@ -262,6 +302,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
{
int lm_idx[MAX_BLOCKS];
int pp_idx[MAX_BLOCKS];
+ int dspp_idx[MAX_BLOCKS] = {0};
int i, j, lm_count = 0;
if (!reqs->topology.num_lm) {
@@ -279,7 +320,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
lm_idx[lm_count] = i;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
- enc_id, i, &pp_idx[lm_count])) {
+ enc_id, i, &pp_idx[lm_count],
+ &dspp_idx[lm_count], reqs)) {
continue;
}
@@ -299,7 +341,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
global_state, enc_id, j,
- &pp_idx[lm_count])) {
+ &pp_idx[lm_count], &dspp_idx[lm_count],
+ reqs)) {
continue;
}
@@ -316,6 +359,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
for (i = 0; i < lm_count; i++) {
global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
+ global_state->dspp_to_enc_id[dspp_idx[i]] =
+ reqs->topology.num_dspp ? enc_id : 0;
trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
pp_idx[i] + PINGPONG_0);
@@ -560,6 +605,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
hw_to_enc_id = global_state->intf_to_enc_id;
max_blks = ARRAY_SIZE(rm->intf_blks);
break;
+ case DPU_HW_BLK_DSPP:
+ hw_blks = rm->dspp_blks;
+ hw_to_enc_id = global_state->dspp_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->dspp_blks);
+ break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 6d2b04f306f0..08726bb1063a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -19,6 +19,7 @@ struct dpu_global_state;
* @mixer_blks: array of layer mixer hardware resources
* @ctl_blks: array of ctl hardware resources
* @intf_blks: array of intf hardware resources
+ * @dspp_blks: array of dspp hardware resources
* @lm_max_width: cached layer mixer maximum width
* @rm_lock: resource manager mutex
*/
@@ -27,6 +28,7 @@ struct dpu_rm {
struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0];
+ struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
uint32_t lm_max_width;
};
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index dda05436f716..08897184b1d9 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -510,18 +510,20 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- aspace = msm_gem_address_space_create(&pdev->dev,
- config->iommu, "mdp4");
+ struct msm_mmu *mmu = msm_iommu_new(&pdev->dev,
+ config->iommu);
+
+ aspace = msm_gem_address_space_create(mmu,
+ "mdp4", 0x1000, 0xffffffff);
+
if (IS_ERR(aspace)) {
+ if (!IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
ret = PTR_ERR(aspace);
goto fail;
}
kms->aspace = aspace;
-
- ret = aspace->mmu->funcs->attach(aspace->mmu);
- if (ret)
- goto fail;
} else {
DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
@@ -569,10 +571,6 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(&platform_bus_type);
- if (config.iommu) {
- config.iommu->geometry.aperture_start = 0x1000;
- config.iommu->geometry.aperture_end = 0xffffffff;
- }
return &config;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index e3c4c250238b..25a13a2a57a9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -342,6 +342,81 @@ static const struct mdp5_cfg_hw msm8x16_config = {
.max_clk = 320000000,
};
+static const struct mdp5_cfg_hw msm8x36_config = {
+ .name = "msm8x36",
+ .mdp = {
+ .count = 1,
+ .base = { 0x0 },
+ .caps = MDP_CAP_SMP |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 8,
+ .mmb_size = 10240,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0x4003ffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x47000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .pp = {
+ .count = 1,
+ .base = { 0x70000 },
+ },
+ .ad = {
+ .count = 1,
+ .base = { 0x78000 },
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+ },
+ .intf = {
+ .base = { 0x00000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .max_clk = 366670000,
+};
+
static const struct mdp5_cfg_hw msm8x94_config = {
.name = "msm8x94",
.mdp = {
@@ -840,6 +915,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
{ .revision = 6, .config = { .hw = &msm8x16_config } },
+ { .revision = 8, .config = { .hw = &msm8x36_config } },
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
{ .revision = 11, .config = { .hw = &msm8x76_config } },
@@ -941,10 +1017,6 @@ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
static struct mdp5_cfg_platform config = {};
config.iommu = iommu_domain_alloc(&platform_bus_type);
- if (config.iommu) {
- config.iommu->geometry.aperture_start = 0x1000;
- config.iommu->geometry.aperture_end = 0xffffffff;
- }
return &config;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 998bef1190a3..b5fed67c4651 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -959,7 +959,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!ctl)
return -EINVAL;
- /* don't support LM cursors when we we have source split enabled */
+ /* don't support LM cursors when we have source split enabled */
if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
@@ -1030,7 +1030,7 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return -EINVAL;
}
- /* don't support LM cursors when we we have source split enabled */
+ /* don't support LM cursors when we have source split enabled */
if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 47b989834af1..19ec48695ffb 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -259,17 +259,9 @@ static struct drm_info_list mdp5_debugfs_list[] = {
static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
- int ret;
-
- ret = drm_debugfs_create_files(mdp5_debugfs_list,
- ARRAY_SIZE(mdp5_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
- return ret;
- }
+ drm_debugfs_create_files(mdp5_debugfs_list,
+ ARRAY_SIZE(mdp5_debugfs_list),
+ minor->debugfs_root, minor);
return 0;
}
@@ -632,25 +624,25 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
+ struct msm_mmu *mmu;
+
iommu_dev = &pdev->dev;
if (!dev_iommu_fwspec_get(iommu_dev))
iommu_dev = iommu_dev->parent;
- aspace = msm_gem_address_space_create(iommu_dev,
- config->platform.iommu, "mdp5");
+ mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
+
+ aspace = msm_gem_address_space_create(mmu, "mdp5",
+ 0x1000, 0xffffffff);
+
if (IS_ERR(aspace)) {
+ if (!IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
ret = PTR_ERR(aspace);
goto fail;
}
kms->aspace = aspace;
-
- ret = aspace->mmu->funcs->attach(aspace->mmu);
- if (ret) {
- DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
- ret);
- goto fail;
- }
} else {
DRM_DEV_INFO(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
@@ -943,7 +935,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
return 0;
fail:
- mdp5_destroy(pdev);
+ if (mdp5_kms)
+ mdp5_destroy(pdev);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 1c74381a4fc9..ee2e270f464c 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -214,31 +214,20 @@ int msm_debugfs_late_init(struct drm_device *dev)
return ret;
}
-int msm_debugfs_init(struct drm_minor *minor)
+void msm_debugfs_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct msm_drm_private *priv = dev->dev_private;
- int ret;
-
- ret = drm_debugfs_create_files(msm_debugfs_list,
- ARRAY_SIZE(msm_debugfs_list),
- minor->debugfs_root, minor);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n");
- return ret;
- }
+ drm_debugfs_create_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list),
+ minor->debugfs_root, minor);
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
- if (priv->kms && priv->kms->funcs->debugfs_init) {
- ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
- if (ret)
- return ret;
- }
-
- return ret;
+ if (priv->kms && priv->kms->funcs->debugfs_init)
+ priv->kms->funcs->debugfs_init(priv->kms, minor);
}
#endif
diff --git a/drivers/gpu/drm/msm/msm_debugfs.h b/drivers/gpu/drm/msm/msm_debugfs.h
index 2b91f8c178ad..ef58f66abbb3 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.h
+++ b/drivers/gpu/drm/msm/msm_debugfs.h
@@ -8,7 +8,7 @@
#define __MSM_DEBUGFS_H__
#ifdef CONFIG_DEBUG_FS
-int msm_debugfs_init(struct drm_minor *minor);
+void msm_debugfs_init(struct drm_minor *minor);
#endif
#endif /* __MSM_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 29295dee2a2e..f6ce40bf3699 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -37,9 +37,10 @@
* - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
* GEM object's debug name
* - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
+ * - 1.6.0 - Syncobj support
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 5
+#define MSM_VERSION_MINOR 6
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -1002,7 +1003,8 @@ static struct drm_driver msm_driver = {
.driver_features = DRIVER_GEM |
DRIVER_RENDER |
DRIVER_ATOMIC |
- DRIVER_MODESET,
+ DRIVER_MODESET |
+ DRIVER_SYNCOBJ,
.open = msm_open,
.postclose = msm_postclose,
.lastclose = drm_fb_helper_lastclose,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 194d900a460e..e2d6a6056418 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -105,6 +105,7 @@ struct msm_display_topology {
u32 num_lm;
u32 num_enc;
u32 num_intf;
+ u32 num_dspp;
};
/**
@@ -236,7 +237,8 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc);
void msm_crtc_disable_vblank(struct drm_crtc *crtc);
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int npages);
+ struct msm_gem_vma *vma, int npages,
+ u64 range_start, u64 range_end);
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
@@ -250,12 +252,8 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace,
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
struct msm_gem_address_space *
-msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
- const char *name);
-
-struct msm_gem_address_space *
-msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
- const char *name, uint64_t va_start, uint64_t va_end);
+msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 size);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
@@ -276,6 +274,9 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova,
+ u64 range_start, u64 range_end);
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 5a6a79fbc9d6..6277fde13df9 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -389,7 +389,8 @@ put_iova(struct drm_gem_object *obj)
}
static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+ struct msm_gem_address_space *aspace, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
@@ -404,7 +405,8 @@ static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
+ ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
+ range_start, range_end);
if (ret) {
del_vma(vma);
return ret;
@@ -426,6 +428,9 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
prot |= IOMMU_WRITE;
+ if (msm_obj->flags & MSM_BO_MAP_PRIV)
+ prot |= IOMMU_PRIV;
+
WARN_ON(!mutex_is_locked(&msm_obj->lock));
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
@@ -443,9 +448,13 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
msm_obj->sgt, obj->size >> PAGE_SHIFT);
}
-/* get iova and pin it. Should have a matching put */
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+/*
+ * get iova and pin it. Should have a matching put
+ * limits iova to specified range (in pages)
+ */
+int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
u64 local;
@@ -453,7 +462,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
mutex_lock(&msm_obj->lock);
- ret = msm_gem_get_iova_locked(obj, aspace, &local);
+ ret = msm_gem_get_iova_locked(obj, aspace, &local,
+ range_start, range_end);
if (!ret)
ret = msm_gem_pin_iova(obj, aspace);
@@ -465,6 +475,13 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
return ret;
}
+/* get iova and pin it. Should have a matching put */
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
+}
+
/*
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
@@ -476,7 +493,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
int ret;
mutex_lock(&msm_obj->lock);
- ret = msm_gem_get_iova_locked(obj, aspace, iova);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
mutex_unlock(&msm_obj->lock);
return ret;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 30584eaf8cc8..972490b14ba5 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -13,6 +13,7 @@
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+#define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */
struct msm_gem_address_space {
const char *name;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 385d4965a8d0..6630aa817505 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -8,7 +8,9 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
+#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
#include "msm_drv.h"
#include "msm_gpu.h"
@@ -391,6 +393,186 @@ static void submit_cleanup(struct msm_gem_submit *submit)
}
}
+
+struct msm_submit_post_dep {
+ struct drm_syncobj *syncobj;
+ uint64_t point;
+ struct dma_fence_chain *chain;
+};
+
+static struct drm_syncobj **msm_wait_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t in_syncobjs_addr,
+ uint32_t nr_in_syncobjs,
+ size_t syncobj_stride,
+ struct msm_ringbuffer *ring)
+{
+ struct drm_syncobj **syncobjs = NULL;
+ struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!syncobjs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_in_syncobjs; ++i) {
+ uint64_t address = in_syncobjs_addr + i * syncobj_stride;
+ struct dma_fence *fence;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (syncobj_desc.point &&
+ !drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = drm_syncobj_find_fence(file, syncobj_desc.handle,
+ syncobj_desc.point, 0, &fence);
+ if (ret)
+ break;
+
+ if (!dma_fence_match_context(fence, ring->fctx->context))
+ ret = dma_fence_wait(fence, true);
+
+ dma_fence_put(fence);
+ if (ret)
+ break;
+
+ if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
+ syncobjs[i] =
+ drm_syncobj_find(file, syncobj_desc.handle);
+ if (!syncobjs[i]) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ if (syncobjs[j])
+ drm_syncobj_put(syncobjs[j]);
+ }
+ kfree(syncobjs);
+ return ERR_PTR(ret);
+ }
+ return syncobjs;
+}
+
+static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
+ uint32_t nr_syncobjs)
+{
+ uint32_t i;
+
+ for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
+ if (syncobjs[i])
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+ }
+}
+
+static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t syncobjs_addr,
+ uint32_t nr_syncobjs,
+ size_t syncobj_stride)
+{
+ struct msm_submit_post_dep *post_deps;
+ struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!post_deps)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_syncobjs; ++i) {
+ uint64_t address = syncobjs_addr + i * syncobj_stride;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ post_deps[i].point = syncobj_desc.point;
+ post_deps[i].chain = NULL;
+
+ if (syncobj_desc.flags) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (syncobj_desc.point) {
+ if (!drm_core_check_feature(dev,
+ DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ post_deps[i].chain =
+ kmalloc(sizeof(*post_deps[i].chain),
+ GFP_KERNEL);
+ if (!post_deps[i].chain) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ post_deps[i].syncobj =
+ drm_syncobj_find(file, syncobj_desc.handle);
+ if (!post_deps[i].syncobj) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ kfree(post_deps[j].chain);
+ if (post_deps[j].syncobj)
+ drm_syncobj_put(post_deps[j].syncobj);
+ }
+
+ kfree(post_deps);
+ return ERR_PTR(ret);
+ }
+
+ return post_deps;
+}
+
+static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
+ uint32_t count, struct dma_fence *fence)
+{
+ uint32_t i;
+
+ for (i = 0; post_deps && i < count; ++i) {
+ if (post_deps[i].chain) {
+ drm_syncobj_add_point(post_deps[i].syncobj,
+ post_deps[i].chain,
+ fence, post_deps[i].point);
+ post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(post_deps[i].syncobj,
+ fence);
+ }
+ }
+}
+
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -403,6 +585,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct sync_file *sync_file = NULL;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
+ struct msm_submit_post_dep *post_deps = NULL;
+ struct drm_syncobj **syncobjs_to_reset = NULL;
int out_fence_fd = -1;
struct pid *pid = get_pid(task_pid(current));
bool has_ww_ticket = false;
@@ -411,6 +595,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!gpu)
return -ENXIO;
+ if (args->pad)
+ return -EINVAL;
+
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
@@ -458,9 +645,29 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
return ret;
}
+ if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
+ syncobjs_to_reset = msm_wait_deps(dev, file,
+ args->in_syncobjs,
+ args->nr_in_syncobjs,
+ args->syncobj_stride, ring);
+ if (IS_ERR(syncobjs_to_reset))
+ return PTR_ERR(syncobjs_to_reset);
+ }
+
+ if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
+ post_deps = msm_parse_post_deps(dev, file,
+ args->out_syncobjs,
+ args->nr_out_syncobjs,
+ args->syncobj_stride);
+ if (IS_ERR(post_deps)) {
+ ret = PTR_ERR(post_deps);
+ goto out_post_unlock;
+ }
+ }
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto out_post_unlock;
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
@@ -587,6 +794,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->fence_fd = out_fence_fd;
}
+ msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
+ msm_process_post_deps(post_deps, args->nr_out_syncobjs,
+ submit->fence);
+
+
out:
submit_cleanup(submit);
if (has_ww_ticket)
@@ -597,5 +809,23 @@ out_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
mutex_unlock(&dev->struct_mutex);
+
+out_post_unlock:
+ if (!IS_ERR_OR_NULL(post_deps)) {
+ for (i = 0; i < args->nr_out_syncobjs; ++i) {
+ kfree(post_deps[i].chain);
+ drm_syncobj_put(post_deps[i].syncobj);
+ }
+ kfree(post_deps);
+ }
+
+ if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
+ for (i = 0; i < args->nr_in_syncobjs; ++i) {
+ if (syncobjs_to_reset[i])
+ drm_syncobj_put(syncobjs_to_reset[i]);
+ }
+ kfree(syncobjs_to_reset);
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 1af5354bcd46..5f6a11211b64 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -103,7 +103,8 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace,
/* Initialize a new vma and allocate an iova for it */
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int npages)
+ struct msm_gem_vma *vma, int npages,
+ u64 range_start, u64 range_end)
{
int ret;
@@ -111,7 +112,8 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace,
return -EBUSY;
spin_lock(&aspace->lock);
- ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
+ ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
+ 0, range_start, range_end, 0);
spin_unlock(&aspace->lock);
if (ret)
@@ -125,37 +127,14 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace,
return 0;
}
-
struct msm_gem_address_space *
-msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
- const char *name)
+msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 size)
{
struct msm_gem_address_space *aspace;
- u64 size = domain->geometry.aperture_end -
- domain->geometry.aperture_start;
-
- aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
- if (!aspace)
- return ERR_PTR(-ENOMEM);
-
- spin_lock_init(&aspace->lock);
- aspace->name = name;
- aspace->mmu = msm_iommu_new(dev, domain);
-
- drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
- size >> PAGE_SHIFT);
- kref_init(&aspace->kref);
-
- return aspace;
-}
-
-struct msm_gem_address_space *
-msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
- const char *name, uint64_t va_start, uint64_t va_end)
-{
- struct msm_gem_address_space *aspace;
- u64 size = va_end - va_start;
+ if (IS_ERR(mmu))
+ return ERR_CAST(mmu);
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
@@ -163,10 +142,9 @@ msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
spin_lock_init(&aspace->lock);
aspace->name = name;
- aspace->mmu = msm_gpummu_new(dev, gpu);
+ aspace->mmu = mmu;
- drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
- size >> PAGE_SHIFT);
+ drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
kref_init(&aspace->kref);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 615c5cda5389..a22d30622306 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -821,51 +821,6 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
return 0;
}
-static struct msm_gem_address_space *
-msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
- uint64_t va_start, uint64_t va_end)
-{
- struct msm_gem_address_space *aspace;
- int ret;
-
- /*
- * Setup IOMMU.. eventually we will (I think) do this once per context
- * and have separate page tables per context. For now, to keep things
- * simple and to get something working, just use a single address space:
- */
- if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
- struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
- if (!iommu)
- return NULL;
-
- iommu->geometry.aperture_start = va_start;
- iommu->geometry.aperture_end = va_end;
-
- DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
-
- aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
- if (IS_ERR(aspace))
- iommu_domain_free(iommu);
- } else {
- aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
- va_start, va_end);
- }
-
- if (IS_ERR(aspace)) {
- DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
- PTR_ERR(aspace));
- return ERR_CAST(aspace);
- }
-
- ret = aspace->mmu->funcs->attach(aspace->mmu);
- if (ret) {
- msm_gem_address_space_put(aspace);
- return ERR_PTR(ret);
- }
-
- return aspace;
-}
-
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
@@ -938,8 +893,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
msm_devfreq_init(gpu);
- gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
- config->va_start, config->va_end);
+
+ gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
if (gpu->aspace == NULL)
DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index be5bc2e8425c..429cb40f7931 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -21,8 +21,6 @@ struct msm_gpu_state;
struct msm_gpu_config {
const char *ioname;
- uint64_t va_start;
- uint64_t va_end;
unsigned int nr_rings;
};
@@ -57,13 +55,15 @@ struct msm_gpu_funcs {
void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p);
/* for generation specific debugfs: */
- int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
+ void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
unsigned long (*gpu_busy)(struct msm_gpu *gpu);
struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
int (*gpu_state_put)(struct msm_gpu_state *state);
unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq);
+ struct msm_gem_address_space *(*create_address_space)
+ (struct msm_gpu *gpu, struct platform_device *pdev);
};
struct msm_gpu {
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
index 34980d8eb7ad..310a31b05faa 100644
--- a/drivers/gpu/drm/msm/msm_gpummu.c
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -21,17 +21,12 @@ struct msm_gpummu {
#define GPUMMU_PAGE_SIZE SZ_4K
#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
-static int msm_gpummu_attach(struct msm_mmu *mmu)
-{
- return 0;
-}
-
static void msm_gpummu_detach(struct msm_mmu *mmu)
{
}
static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, unsigned len, int prot)
+ struct sg_table *sgt, size_t len, int prot)
{
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
@@ -59,7 +54,7 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
return 0;
}
-static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
+static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
{
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
@@ -85,7 +80,6 @@ static void msm_gpummu_destroy(struct msm_mmu *mmu)
}
static const struct msm_mmu_funcs funcs = {
- .attach = msm_gpummu_attach,
.detach = msm_gpummu_detach,
.map = msm_gpummu_map,
.unmap = msm_gpummu_unmap,
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index ad58cfe5998e..3a381a9674c9 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -23,13 +23,6 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
return 0;
}
-static int msm_iommu_attach(struct msm_mmu *mmu)
-{
- struct msm_iommu *iommu = to_msm_iommu(mmu);
-
- return iommu_attach_device(iommu->domain, mmu->dev);
-}
-
static void msm_iommu_detach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
@@ -38,7 +31,7 @@ static void msm_iommu_detach(struct msm_mmu *mmu)
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, unsigned len, int prot)
+ struct sg_table *sgt, size_t len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
@@ -49,7 +42,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
return (ret == len) ? 0 : -EINVAL;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
@@ -66,7 +59,6 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
}
static const struct msm_mmu_funcs funcs = {
- .attach = msm_iommu_attach,
.detach = msm_iommu_detach,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
@@ -76,6 +68,10 @@ static const struct msm_mmu_funcs funcs = {
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
{
struct msm_iommu *iommu;
+ int ret;
+
+ if (!domain)
+ return ERR_PTR(-ENODEV);
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -85,5 +81,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
msm_mmu_init(&iommu->base, dev, &funcs);
iommu_set_fault_handler(domain, msm_fault_handler, iommu);
+ ret = iommu_attach_device(iommu->domain, dev);
+ if (ret) {
+ kfree(iommu);
+ return ERR_PTR(ret);
+ }
+
return &iommu->base;
}
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 67a623f14319..3a534ee59bf6 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -10,11 +10,10 @@
#include <linux/iommu.h>
struct msm_mmu_funcs {
- int (*attach)(struct msm_mmu *mmu);
void (*detach)(struct msm_mmu *mmu);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
+ size_t len, int prot);
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
};
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 732f65df5c4f..fea30e7aa9e8 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -29,8 +29,6 @@
* or shader programs (if not emitted inline in cmdstream).
*/
-#ifdef CONFIG_DEBUG_FS
-
#include <linux/circ_buf.h>
#include <linux/debugfs.h>
#include <linux/kfifo.h>
@@ -47,6 +45,8 @@ bool rd_full = false;
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
module_param_named(rd_full, rd_full, bool, 0600);
+#ifdef CONFIG_DEBUG_FS
+
enum rd_sect_type {
RD_NONE,
RD_TEST, /* ascii text */
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 7a62fa04272d..49e57fba4925 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -1,8 +1,10 @@
+NOUVEAU_PATH ?= $(srctree)
+
# SPDX-License-Identifier: MIT
-ccflags-y += -I $(srctree)/$(src)/include
-ccflags-y += -I $(srctree)/$(src)/include/nvkm
-ccflags-y += -I $(srctree)/$(src)/nvkm
-ccflags-y += -I $(srctree)/$(src)
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include/nvkm
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)/nvkm
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)
# NVKM - HW resource manager
#- code also used by various userspace tools/tests
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 1f08de4241e0..2de589caf508 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -605,15 +605,16 @@ static int
nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
{
struct nv04_display *disp = nv04_display(crtc->dev);
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret == 0) {
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
- nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
+ nouveau_bo_ref(nvbo, &disp->image[nv_crtc->index]);
}
return ret;
@@ -822,8 +823,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+ struct nouveau_bo *nvbo;
struct drm_framebuffer *drm_fb;
- struct nouveau_framebuffer *fb;
int arb_burst, arb_lwm;
NV_DEBUG(drm, "index %d\n", nv_crtc->index);
@@ -839,13 +840,12 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
*/
if (atomic) {
drm_fb = passed_fb;
- fb = nouveau_framebuffer(passed_fb);
} else {
drm_fb = crtc->primary->fb;
- fb = nouveau_framebuffer(crtc->primary->fb);
}
- nv_crtc->fb.offset = fb->nvbo->bo.offset;
+ nvbo = nouveau_gem_object(drm_fb->obj[0]);
+ nv_crtc->fb.offset = nvbo->bo.offset;
if (nv_crtc->lut.depth != drm_fb->format->depth) {
nv_crtc->lut.depth = drm_fb->format->depth;
@@ -1143,8 +1143,9 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
- struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct nouveau_bo *old_bo = nouveau_gem_object(old_fb->obj[0]);
+ struct nouveau_bo *new_bo = nouveau_gem_object(fb->obj[0]);
struct nv04_page_flip_state *s;
struct nouveau_channel *chan;
struct nouveau_cli *cli;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 44ee82d0c9b6..0f4ebefed1fd 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -30,6 +30,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_bo.h"
+#include "nouveau_gem.h"
#include <nvif/if0004.h>
@@ -52,13 +53,13 @@ nv04_display_fini(struct drm_device *dev, bool suspend)
/* Un-pin FB and cursors so they'll be evicted to system memory. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_framebuffer *nouveau_fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct nouveau_bo *nvbo;
- nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
- if (!nouveau_fb || !nouveau_fb->nvbo)
+ if (!fb || !fb->obj[0])
continue;
-
- nouveau_bo_unpin(nouveau_fb->nvbo);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ nouveau_bo_unpin(nvbo);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -104,13 +105,13 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
/* Re-pin FB/cursors. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_framebuffer *nouveau_fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct nouveau_bo *nvbo;
- nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
- if (!nouveau_fb || !nouveau_fb->nvbo)
+ if (!fb || !fb->obj[0])
continue;
-
- ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM, true);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
if (ret)
NV_ERROR(drm, "Could not pin framebuffer\n");
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index a3a0a73ae8ab..6248fd1dbc6d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -31,6 +31,7 @@
#include "nouveau_bo.h"
#include "nouveau_connector.h"
#include "nouveau_display.h"
+#include "nouveau_gem.h"
#include "nvreg.h"
#include "disp.h"
@@ -120,9 +121,9 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct nvif_object *dev = &drm->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cur = nv_plane->cur;
+ struct nouveau_bo *nvbo;
bool flip = nv_plane->flip;
int soff = NV_PCRTC0_SIZE * nv_crtc->index;
int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
@@ -140,17 +141,18 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (ret)
return ret;
- ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
return ret;
- nv_plane->cur = nv_fb->nvbo;
+ nv_plane->cur = nvbo;
nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
nvif_wr32(dev, NV_PVIDEO_BASE(flip), 0);
- nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
+ nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nvbo->bo.offset);
nvif_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
nvif_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
nvif_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
@@ -172,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (format & NV_PVIDEO_FORMAT_PLANAR) {
nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
- nv_fb->nvbo->bo.offset + fb->offsets[1]);
+ nvbo->bo.offset + fb->offsets[1]);
}
nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format | fb->pitches[0]);
nvif_wr32(dev, NV_PVIDEO_STOP, 0);
@@ -368,8 +370,8 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_bo *cur = nv_plane->cur;
+ struct nouveau_bo *nvbo;
uint32_t overlay = 1;
int brightness = (nv_plane->brightness - 512) * 62 / 512;
int ret, i;
@@ -384,11 +386,12 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (ret)
return ret;
- ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
return ret;
- nv_plane->cur = nv_fb->nvbo;
+ nv_plane->cur = nvbo;
nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
@@ -396,7 +399,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
for (i = 0; i < 2; i++) {
nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
- nv_fb->nvbo->bo.offset);
+ nvbo->bo.offset);
nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i,
fb->pitches[0]);
nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index ee782151d332..511258bfbcbc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -263,7 +263,8 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
struct nv50_disp_base_channel_dma_v0 args = {
.head = head,
};
- struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nouveau_display *disp = nouveau_display(drm->dev);
+ struct nv50_disp *disp50 = nv50_disp(drm->dev);
struct nv50_wndw *wndw;
int ret;
@@ -273,9 +274,9 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
if (*pwndw = wndw, ret)
return ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(&drm->client.device, &disp->disp.object,
&oclass, head, &args, sizeof(args),
- disp->sync->bo.offset, &wndw->wndw);
+ disp50->sync->bo.offset, &wndw->wndw);
if (ret) {
NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index ff94f3f6f264..99157dc94d23 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -2,6 +2,7 @@
#define __NV50_KMS_CORE_H__
#include "disp.h"
#include "atom.h"
+#include <nouveau_encoder.h>
struct nv50_core {
const struct nv50_core_func *func;
@@ -15,6 +16,7 @@ void nv50_core_del(struct nv50_core **);
struct nv50_core_func {
void (*init)(struct nv50_core *);
void (*ntfy_init)(struct nouveau_bo *, u32 offset);
+ int (*caps_init)(struct nouveau_drm *, struct nv50_disp *);
int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
@@ -27,6 +29,9 @@ struct nv50_core_func {
const struct nv50_outp_func {
void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
struct nv50_head_atom *);
+ /* XXX: Only used by SORs and PIORs for now */
+ void (*get_caps)(struct nv50_disp *,
+ struct nouveau_encoder *, int or);
} *dac, *pior, *sor;
};
@@ -35,6 +40,7 @@ int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
struct nv50_core **);
void core507d_init(struct nv50_core *);
void core507d_ntfy_init(struct nouveau_bo *, u32);
+int core507d_caps_init(struct nouveau_drm *, struct nv50_disp *);
int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
void core507d_update(struct nv50_core *, u32 *, bool);
@@ -51,6 +57,7 @@ extern const struct nv50_outp_func sor907d;
int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int corec37d_caps_init(struct nouveau_drm *, struct nv50_disp *);
int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
void corec37d_update(struct nv50_core *, u32 *, bool);
void corec37d_wndw_owner(struct nv50_core *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index c5152c39c684..e341f572c269 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -62,6 +62,20 @@ core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
nouveau_bo_wr32(bo, offset / 4, 0x00000000);
}
+int
+core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ u32 *push = evo_wait(&disp->core->chan, 2);
+
+ if (push) {
+ evo_mthd(push, 0x008c, 1);
+ evo_data(push, 0x0);
+ evo_kick(push, &disp->core->chan);
+ }
+
+ return 0;
+}
+
void
core507d_init(struct nv50_core *core)
{
@@ -77,6 +91,7 @@ static const struct nv50_core_func
core507d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head507d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core827d.c b/drivers/gpu/drm/nouveau/dispnv50/core827d.c
index 6123a068f836..2e0c1c536afe 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core827d.c
@@ -26,6 +26,7 @@ static const struct nv50_core_func
core827d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head827d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core907d.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
index ef822f813435..271629832629 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
@@ -26,6 +26,7 @@ static const struct nv50_core_func
core907d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head907d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core917d.c b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
index 392338df5bfd..5cc072d4c30f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
@@ -26,6 +26,7 @@ static const struct nv50_core_func
core917d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head917d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index c03cb987856b..e0c8811fb8e4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -22,6 +22,7 @@
#include "core.h"
#include "head.h"
+#include <nvif/class.h>
#include <nouveau_bo.h>
#include <nvif/timer.h>
@@ -87,6 +88,30 @@ corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
}
+int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ int ret;
+
+ ret = nvif_object_init(&disp->disp->object, 0, GV100_DISP_CAPS,
+ NULL, 0, &disp->caps);
+ if (ret) {
+ NV_ERROR(drm,
+ "Failed to init notifier caps region: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = nvif_object_map(&disp->caps, NULL, 0);
+ if (ret) {
+ NV_ERROR(drm,
+ "Failed to map notifier caps region: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void
corec37d_init(struct nv50_core *core)
{
@@ -111,6 +136,7 @@ static const struct nv50_core_func
corec37d = {
.init = corec37d_init,
.ntfy_init = corec37d_ntfy_init,
+ .caps_init = corec37d_caps_init,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index 147adcd60937..10ba9e9e4ae6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -46,6 +46,7 @@ static const struct nv50_core_func
corec57d = {
.init = corec57d_init,
.ntfy_init = corec37d_ntfy_init,
+ .caps_init = corec37d_caps_init,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 8c5cf096f69b..658a200ab616 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -32,7 +32,7 @@
bool
curs507a_space(struct nv50_wndw *wndw)
{
- nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 2,
+ nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 100,
if (nvif_rd32(&wndw->wimm.base.user, 0x0008) >= 4)
return true;
);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 6be9df1820c5..d472942102f5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -277,7 +277,7 @@ nv50_outp_release(struct nouveau_encoder *nv_encoder)
}
static int
-nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
+nv50_outp_acquire(struct nouveau_encoder *nv_encoder, bool hda)
{
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
@@ -289,6 +289,7 @@ nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
.base.method = NV50_DISP_MTHD_V1_ACQUIRE,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = nv_encoder->dcb->hashm,
+ .info.hda = hda,
};
int ret;
@@ -393,7 +394,7 @@ nv50_dac_enable(struct drm_encoder *encoder)
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
- nv50_outp_acquire(nv_encoder);
+ nv50_outp_acquire(nv_encoder, false);
core->func->dac->ctrl(core, nv_encoder->or, 1 << nv_crtc->index, asyh);
asyh->or.depth = 0;
@@ -482,15 +483,16 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
* audio component binding for ELD notification
*/
static void
-nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port)
+nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port,
+ int dev_id)
{
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
- port, -1);
+ port, dev_id);
}
static int
-nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
+nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
bool *enabled, unsigned char *buf, int max_bytes)
{
struct drm_device *drm_dev = dev_get_drvdata(kdev);
@@ -506,9 +508,10 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
nv_encoder = nouveau_encoder(encoder);
nv_connector = nouveau_encoder_connector_get(nv_encoder);
nv_crtc = nouveau_crtc(encoder->crtc);
- if (!nv_connector || !nv_crtc || nv_crtc->index != port)
+ if (!nv_connector || !nv_crtc || nv_encoder->or != port ||
+ nv_crtc->index != dev_id)
continue;
- *enabled = drm_detect_monitor_audio(nv_connector->edid);
+ *enabled = nv_encoder->audio;
if (*enabled) {
ret = drm_eld_size(nv_connector->base.eld);
memcpy(buf, nv_connector->base.eld,
@@ -598,9 +601,11 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
(0x0100 << nv_crtc->index),
};
+ nv_encoder->audio = false;
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
- nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
+ nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
+ nv_crtc->index);
}
static void
@@ -633,8 +638,10 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
nvif_mthd(&disp->disp->object, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
+ nv_encoder->audio = true;
- nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
+ nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
+ nv_crtc->index);
}
/******************************************************************************
@@ -904,15 +911,9 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
if (!state->duplicated) {
const int clock = crtc_state->adjusted_mode.clock;
- /*
- * XXX: Since we don't use HDR in userspace quite yet, limit
- * the bpc to 8 to save bandwidth on the topology. In the
- * future, we'll want to properly fix this by dynamically
- * selecting the highest possible bpc that would fit in the
- * topology
- */
- asyh->or.bpc = min(connector->display_info.bpc, 8U);
- asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, false);
+ asyh->or.bpc = connector->display_info.bpc;
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3,
+ false);
}
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
@@ -968,7 +969,7 @@ nv50_msto_enable(struct drm_encoder *encoder)
DRM_DEBUG_KMS("Failed to allocate VCPI\n");
if (!mstm->links++)
- nv50_outp_acquire(mstm->outp);
+ nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/);
if (mstm->outp->link & 1)
proto = 0x8;
@@ -1058,7 +1059,14 @@ static enum drm_mode_status
nv50_mstc_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- return MODE_OK;
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ struct nouveau_encoder *outp = mstc->mstm->outp;
+
+ /* TODO: calculate the PBN from the dotclock and validate against the
+ * MSTB's max possible PBN
+ */
+
+ return nv50_dp_mode_valid(connector, outp, mode, NULL);
}
static int
@@ -1072,8 +1080,17 @@ nv50_mstc_get_modes(struct drm_connector *connector)
if (mstc->edid)
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
- if (!mstc->connector.display_info.bpc)
- mstc->connector.display_info.bpc = 8;
+ /*
+ * XXX: Since we don't use HDR in userspace quite yet, limit the bpc
+ * to 8 to save bandwidth on the topology. In the future, we'll want
+ * to properly fix this by dynamically selecting the highest possible
+ * bpc that would fit in the topology
+ */
+ if (connector->display_info.bpc)
+ connector->display_info.bpc =
+ clamp(connector->display_info.bpc, 6U, 8U);
+ else
+ connector->display_info.bpc = 8;
if (mstc->native)
drm_mode_destroy(mstc->connector.dev, mstc->native);
@@ -1123,8 +1140,10 @@ nv50_mstc_detect(struct drm_connector *connector,
return connector_status_disconnected;
ret = pm_runtime_get_sync(connector->dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
mstc->port);
@@ -1544,12 +1563,18 @@ nv50_sor_enable(struct drm_encoder *encoder)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
+ bool hda = false;
u8 proto = 0xf;
u8 depth = 0x0;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
nv_encoder->crtc = encoder->crtc;
- nv50_outp_acquire(nv_encoder);
+
+ if ((disp->disp->object.oclass == GT214_DISP ||
+ disp->disp->object.oclass >= GF110_DISP) &&
+ drm_detect_monitor_audio(nv_connector->edid))
+ hda = true;
+ nv50_outp_acquire(nv_encoder, hda);
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
@@ -1659,6 +1684,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
+ struct nv50_disp *disp = nv50_disp(connector->dev);
int type, ret;
switch (dcbe->type) {
@@ -1685,10 +1711,12 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_connector_attach_encoder(connector, encoder);
+ disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+
if (dcbe->type == DCB_OUTPUT_DP) {
- struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
+
if (aux) {
if (disp->disp->object.oclass < GF110_DISP) {
/* HW has no support for address-only
@@ -1756,7 +1784,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
u8 owner = 1 << nv_crtc->index;
u8 proto;
- nv50_outp_acquire(nv_encoder);
+ nv50_outp_acquire(nv_encoder, false);
switch (asyh->or.bpc) {
case 10: asyh->or.depth = 0x6; break;
@@ -1801,7 +1829,9 @@ nv50_pior_func = {
static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = NULL;
struct nvkm_i2c_aux *aux = NULL;
@@ -1840,6 +1870,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_encoder_helper_add(encoder, &nv50_pior_help);
drm_connector_attach_encoder(connector, encoder);
+
+ disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+
return 0;
}
@@ -2369,7 +2402,8 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
struct drm_encoder *encoder;
struct drm_plane *plane;
- core->func->init(core);
+ if (resume || runtime)
+ core->func->init(core);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
@@ -2396,6 +2430,8 @@ nv50_display_destroy(struct drm_device *dev)
nv50_audio_component_fini(nouveau_drm(dev));
+ nvif_object_unmap(&disp->caps);
+ nvif_object_fini(&disp->caps);
nv50_core_del(&disp->core);
nouveau_bo_unmap(disp->sync);
@@ -2456,6 +2492,22 @@ nv50_display_create(struct drm_device *dev)
if (ret)
goto out;
+ disp->core->func->init(disp->core);
+ if (disp->core->func->caps_init) {
+ ret = disp->core->func->caps_init(drm, disp);
+ if (ret)
+ goto out;
+ }
+
+ /* Assign the correct format modifiers */
+ if (disp->disp->object.oclass >= TU102_DISP)
+ nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
+ else
+ if (disp->disp->object.oclass >= GF110_DISP)
+ nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
+ else
+ nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
+
/* create crtc objects to represent the hw heads */
if (disp->disp->object.oclass >= GV100_DISP)
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
@@ -2551,3 +2603,53 @@ out:
nv50_display_destroy(dev);
return ret;
}
+
+/******************************************************************************
+ * Format modifiers
+ *****************************************************************************/
+
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 disp50xx_modifiers[] = { /* | | | | | */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 5),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 5),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 5),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 disp90xx_modifiers[] = { /* | | | | | */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 5),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index d54fe00ac3a3..696e70a6b98b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -9,6 +9,7 @@ struct nv50_msto;
struct nv50_disp {
struct nvif_disp *disp;
struct nv50_core *core;
+ struct nvif_object caps;
#define NV50_DISP_SYNC(c, o) ((c) * 0x040 + (o))
#define NV50_DISP_CORE_NTFY NV50_DISP_SYNC(0 , 0x00)
@@ -78,6 +79,10 @@ void nv50_dmac_destroy(struct nv50_dmac *);
u32 *evo_wait(struct nv50_dmac *, int nr);
void evo_kick(u32 *, struct nv50_dmac *);
+extern const u64 disp50xx_modifiers[];
+extern const u64 disp90xx_modifiers[];
+extern const u64 wndwc57e_modifiers[];
+
#define evo_mthd(p, m, s) do { \
const u32 _m = (m), _s = (s); \
if (drm_debug_enabled(DRM_UT_KMS)) \
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index 00011ce109a6..4a9a32b89f74 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -168,14 +168,15 @@ headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
struct nv50_head_mode *m = &asyh->mode;
u32 *push;
- if ((push = evo_wait(core, 12))) {
+ if ((push = evo_wait(core, 13))) {
evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
evo_data(push, (m->v.active << 16) | m->h.active );
evo_data(push, (m->v.synce << 16) | m->h.synce );
evo_data(push, (m->v.blanke << 16) | m->h.blanke );
evo_data(push, (m->v.blanks << 16) | m->h.blanks );
evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
- evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+ evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
+ evo_data(push, m->interlace);
evo_data(push, m->clock * 1000);
evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
evo_data(push, m->clock * 1000);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index 938d910a1b1e..859131a8bc3c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -173,14 +173,15 @@ headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
struct nv50_head_mode *m = &asyh->mode;
u32 *push;
- if ((push = evo_wait(core, 12))) {
+ if ((push = evo_wait(core, 13))) {
evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
evo_data(push, (m->v.active << 16) | m->h.active );
evo_data(push, (m->v.synce << 16) | m->h.synce );
evo_data(push, (m->v.blanke << 16) | m->h.blanke );
evo_data(push, (m->v.blanks << 16) | m->h.blanks );
evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
- evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+ evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
+ evo_data(push, m->interlace);
evo_data(push, m->clock * 1000);
evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
evo_data(push, m->clock * 1000);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
index d2bac6a341dc..45d8ce7d2c28 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
@@ -38,7 +38,15 @@ pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
}
}
+static void
+pior507d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp,
+ int or)
+{
+ outp->caps.dp_interlace = true;
+}
+
const struct nv50_outp_func
pior507d = {
.ctrl = pior507d_ctrl,
+ .get_caps = pior507d_get_caps,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
index 5222fe6a9b21..9a59fa7da00d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
@@ -38,7 +38,14 @@ sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
}
}
+static void
+sor507d_get_caps(struct nv50_disp *core, struct nouveau_encoder *outp, int or)
+{
+ outp->caps.dp_interlace = true;
+}
+
const struct nv50_outp_func
sor507d = {
.ctrl = sor507d_ctrl,
+ .get_caps = sor507d_get_caps,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
index b0314ec11fb3..9577ccf1c809 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
@@ -21,6 +21,7 @@
*/
#include "core.h"
+#include <nouveau_bo.h>
#include <nvif/class.h>
static void
@@ -35,7 +36,17 @@ sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
}
}
+static void
+sor907d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
+{
+ const int off = or * 2;
+ u32 tmp = nouveau_bo_rd32(disp->sync, 0x000014 + off);
+
+ outp->caps.dp_interlace = !!(tmp & 0x04000000);
+}
+
const struct nv50_outp_func
sor907d = {
.ctrl = sor907d_ctrl,
+ .get_caps = sor907d_get_caps,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
index dff059241c5d..c86ca955fdcd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
@@ -33,7 +33,16 @@ sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
}
}
+static void
+sorc37d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
+{
+ u32 tmp = nvif_rd32(&disp->caps, 0x000144 + (or * 8));
+
+ outp->caps.dp_interlace = !!(tmp & 0x04000000);
+}
+
const struct nv50_outp_func
sorc37d = {
.ctrl = sorc37d_ctrl,
+ .get_caps = sorc37d_get_caps,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index bb737f9281e6..99b9b681736d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -29,6 +29,7 @@
#include <drm/drm_fourcc.h>
#include "nouveau_bo.h"
+#include "nouveau_gem.h"
static void
nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
@@ -39,12 +40,13 @@ nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
}
static struct nv50_wndw_ctxdma *
-nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
+nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
{
- struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+ struct nouveau_drm *drm = nouveau_drm(fb->dev);
struct nv50_wndw_ctxdma *ctxdma;
- const u8 kind = fb->nvbo->kind;
- const u32 handle = 0xfb000000 | kind;
+ u32 handle;
+ u32 unused;
+ u8 kind;
struct {
struct nv_dma_v0 base;
union {
@@ -56,6 +58,9 @@ nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
u32 argc = sizeof(args.base);
int ret;
+ nouveau_framebuffer_get_layout(fb, &unused, &kind);
+ handle = 0xfb000000 | kind;
+
list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
if (ctxdma->object.handle == handle)
return ctxdma;
@@ -187,6 +192,8 @@ nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
wndw->func->release(wndw, asyw, asyh);
asyw->ntfy.handle = 0;
asyw->sema.handle = 0;
+ asyw->xlut.handle = 0;
+ memset(asyw->image.handle, 0x00, sizeof(asyw->image.handle));
}
static int
@@ -234,16 +241,20 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
+ struct drm_framebuffer *fb = asyw->state.fb;
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ uint8_t kind;
+ uint32_t tile_mode;
int ret;
NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
- if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
- asyw->image.w = fb->base.width;
- asyw->image.h = fb->base.height;
- asyw->image.kind = fb->nvbo->kind;
+ if (fb != armw->state.fb || !armw->visible || modeset) {
+ nouveau_framebuffer_get_layout(fb, &tile_mode, &kind);
+
+ asyw->image.w = fb->width;
+ asyw->image.h = fb->height;
+ asyw->image.kind = kind;
ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
if (ret) {
@@ -255,16 +266,16 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
if (asyw->image.kind) {
asyw->image.layout = 0;
if (drm->client.device.info.chipset >= 0xc0)
- asyw->image.blockh = fb->nvbo->mode >> 4;
+ asyw->image.blockh = tile_mode >> 4;
else
- asyw->image.blockh = fb->nvbo->mode;
- asyw->image.blocks[0] = fb->base.pitches[0] / 64;
+ asyw->image.blockh = tile_mode;
+ asyw->image.blocks[0] = fb->pitches[0] / 64;
asyw->image.pitch[0] = 0;
} else {
asyw->image.layout = 1;
asyw->image.blockh = 0;
asyw->image.blocks[0] = 0;
- asyw->image.pitch[0] = fb->base.pitches[0];
+ asyw->image.pitch[0] = fb->pitches[0];
}
if (!asyh->state.async_flip)
@@ -471,47 +482,51 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
static void
nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nouveau_bo *nvbo;
NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
if (!old_state->fb)
return;
- nouveau_bo_unpin(fb->nvbo);
+ nvbo = nouveau_gem_object(old_state->fb->obj[0]);
+ nouveau_bo_unpin(nvbo);
}
static int
nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+ struct drm_framebuffer *fb = state->fb;
struct nouveau_drm *drm = nouveau_drm(plane->dev);
struct nv50_wndw *wndw = nv50_wndw(plane);
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nouveau_bo *nvbo;
struct nv50_head_atom *asyh;
struct nv50_wndw_ctxdma *ctxdma;
int ret;
- NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
+ NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
if (!asyw->state.fb)
return 0;
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
if (ret)
return ret;
if (wndw->ctxdma.parent) {
ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
if (IS_ERR(ctxdma)) {
- nouveau_bo_unpin(fb->nvbo);
+ nouveau_bo_unpin(nvbo);
return PTR_ERR(ctxdma);
}
- asyw->image.handle[0] = ctxdma->object.handle;
+ if (asyw->visible)
+ asyw->image.handle[0] = ctxdma->object.handle;
}
- asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
- asyw->image.offset[0] = fb->nvbo->bo.offset;
+ asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
+ asyw->image.offset[0] = nvbo->bo.offset;
if (wndw->func->prepare) {
asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
@@ -603,6 +618,29 @@ nv50_wndw_destroy(struct drm_plane *plane)
kfree(wndw);
}
+/* This function assumes the format has already been validated against the plane
+ * and the modifier was validated against the device-wides modifier list at FB
+ * creation time.
+ */
+static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
+ u32 format, u64 modifier)
+{
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ uint8_t i;
+
+ if (drm->client.device.info.chipset < 0xc0) {
+ const struct drm_format_info *info = drm_format_info(format);
+ const uint8_t kind = (modifier >> 12) & 0xff;
+
+ if (!format) return false;
+
+ for (i = 0; i < info->num_planes; i++)
+ if ((info->cpp[i] != 4) && kind != 0x70) return false;
+ }
+
+ return true;
+}
+
const struct drm_plane_funcs
nv50_wndw = {
.update_plane = drm_atomic_helper_update_plane,
@@ -611,6 +649,7 @@ nv50_wndw = {
.reset = nv50_wndw_reset,
.atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
+ .format_mod_supported = nv50_plane_format_mod_supported,
};
static int
@@ -658,7 +697,8 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
for (nformat = 0; format[nformat]; nformat++);
ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
- format, nformat, NULL,
+ format, nformat,
+ nouveau_display(dev)->format_modifiers,
type, "%s-%d", name, index);
if (ret) {
kfree(*pwndw);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 35c9c52fab26..1d64741595ba 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -173,6 +173,23 @@ wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
return true;
}
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 wndwc57e_modifiers[] = { /* | | | | | */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 5),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static const struct nv50_wndw_func
wndwc57e = {
.acquire = wndwc37e_acquire,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
index 38bf4f38e869..53800fb46582 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
@@ -46,7 +46,8 @@ struct nv50_disp_acquire_v0 {
__u8 version;
__u8 or;
__u8 link;
- __u8 pad03[5];
+ __u8 hda;
+ __u8 pad04[4];
};
struct nv50_disp_dac_load_v0 {
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 30659747ffe8..2c79beb41126 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -89,6 +89,8 @@
#define GV100_DISP /* cl5070.h */ 0x0000c370
#define TU102_DISP /* cl5070.h */ 0x0000c570
+#define GV100_DISP_CAPS 0x0000c373
+
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 1218f28c14ba..76288c682e9e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -24,6 +24,8 @@ struct nvkm_subdev_func {
};
extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
+int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *,
+ int index, struct nvkm_subdev **);
void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
int index, struct nvkm_subdev *);
void nvkm_subdev_del(struct nvkm_subdev **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index fe3a10255c36..69a84d0197d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -49,7 +49,6 @@ static struct nouveau_dsm_priv {
bool optimus_flags_detected;
bool optimus_skip_dsm;
acpi_handle dhandle;
- acpi_handle rom_handle;
} nouveau_dsm_priv;
bool nouveau_is_optimus(void) {
@@ -212,37 +211,6 @@ static const struct vga_switcheroo_handler nouveau_dsm_handler = {
.get_client_id = nouveau_dsm_get_client_id,
};
-/*
- * Firmware supporting Windows 8 or later do not use _DSM to put the device into
- * D3cold, they instead rely on disabling power resources on the parent.
- */
-static bool nouveau_pr3_present(struct pci_dev *pdev)
-{
- struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
- struct acpi_device *parent_adev;
-
- if (!parent_pdev)
- return false;
-
- if (!parent_pdev->bridge_d3) {
- /*
- * Parent PCI bridge is currently not power managed.
- * Since userspace can change these afterwards to be on
- * the safe side we stick with _DSM and prevent usage of
- * _PR3 from the bridge.
- */
- pci_d3cold_disable(pdev);
- return false;
- }
-
- parent_adev = ACPI_COMPANION(&parent_pdev->dev);
- if (!parent_adev)
- return false;
-
- return parent_adev->power.flags.power_resources &&
- acpi_has_method(parent_adev->handle, "_PR3");
-}
-
static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
bool *has_mux, bool *has_opt,
bool *has_opt_flags, bool *has_pr3)
@@ -250,6 +218,16 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
acpi_handle dhandle;
bool supports_mux;
int optimus_funcs;
+ struct pci_dev *parent_pdev;
+
+ *has_pr3 = false;
+ parent_pdev = pci_upstream_bridge(pdev);
+ if (parent_pdev) {
+ if (parent_pdev->bridge_d3)
+ *has_pr3 = pci_pr3_present(parent_pdev);
+ else
+ pci_d3cold_disable(pdev);
+ }
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
@@ -270,7 +248,6 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
*has_mux = supports_mux;
*has_opt = !!optimus_funcs;
*has_opt_flags = optimus_funcs & (1 << NOUVEAU_DSM_OPTIMUS_FLAGS);
- *has_pr3 = false;
if (optimus_funcs) {
uint32_t result;
@@ -280,8 +257,6 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
(result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
(result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
(result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
-
- *has_pr3 = nouveau_pr3_present(pdev);
}
}
@@ -385,59 +360,6 @@ void nouveau_unregister_dsm_handler(void) {}
void nouveau_switcheroo_optimus_dsm(void) {}
#endif
-/* retrieve the ROM in 4k blocks */
-static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
- int offset, int len)
-{
- acpi_status status;
- union acpi_object rom_arg_elements[2], *obj;
- struct acpi_object_list rom_arg;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
-
- rom_arg.count = 2;
- rom_arg.pointer = &rom_arg_elements[0];
-
- rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
- rom_arg_elements[0].integer.value = offset;
-
- rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
- rom_arg_elements[1].integer.value = len;
-
- status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
- if (ACPI_FAILURE(status)) {
- pr_info("failed to evaluate ROM got %s\n",
- acpi_format_exception(status));
- return -ENODEV;
- }
- obj = (union acpi_object *)buffer.pointer;
- len = min(len, (int)obj->buffer.length);
- memcpy(bios+offset, obj->buffer.pointer, len);
- kfree(buffer.pointer);
- return len;
-}
-
-bool nouveau_acpi_rom_supported(struct device *dev)
-{
- acpi_status status;
- acpi_handle dhandle, rom_handle;
-
- dhandle = ACPI_HANDLE(dev);
- if (!dhandle)
- return false;
-
- status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
- if (ACPI_FAILURE(status))
- return false;
-
- nouveau_dsm_priv.rom_handle = rom_handle;
- return true;
-}
-
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
-{
- return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
-}
-
void *
nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 1e6e8a8c0455..330f9b837066 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -10,8 +10,6 @@ bool nouveau_is_v1_dsm(void);
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
void nouveau_switcheroo_optimus_dsm(void);
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct device *);
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
static inline bool nouveau_is_optimus(void) { return false; };
@@ -19,8 +17,6 @@ static inline bool nouveau_is_v1_dsm(void) { return false; };
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline void nouveau_switcheroo_optimus_dsm(void) {}
-static inline bool nouveau_acpi_rom_supported(struct device *dev) { return false; }
-static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 9a9a7f5003d3..1b383ae0248f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -38,6 +38,7 @@
#include "nouveau_reg.h"
#include "nouveau_drv.h"
#include "dispnv04/hw.h"
+#include "dispnv50/disp.h"
#include "nouveau_acpi.h"
#include "nouveau_display.h"
@@ -509,7 +510,11 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
nv_connector->detected_encoder = nv_encoder;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- connector->interlace_allowed = true;
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+ connector->interlace_allowed =
+ nv_encoder->caps.dp_interlace;
+ else
+ connector->interlace_allowed = true;
connector->doublescan_allowed = true;
} else
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
@@ -1029,6 +1034,29 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
return 112000 * duallink_scale;
}
+enum drm_mode_status
+nouveau_conn_mode_clock_valid(const struct drm_display_mode *mode,
+ const unsigned min_clock,
+ const unsigned max_clock,
+ unsigned int *clock_out)
+{
+ unsigned int clock = mode->clock;
+
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) ==
+ DRM_MODE_FLAG_3D_FRAME_PACKING)
+ clock *= 2;
+
+ if (clock < min_clock)
+ return MODE_CLOCK_LOW;
+ if (clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ if (clock_out)
+ *clock_out = clock;
+
+ return MODE_OK;
+}
+
static enum drm_mode_status
nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -1037,7 +1065,6 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
unsigned min_clock = 25000, max_clock = min_clock;
- unsigned clock = mode->clock;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_LVDS:
@@ -1060,25 +1087,14 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
case DCB_OUTPUT_TV:
return get_slave_funcs(encoder)->mode_valid(encoder, mode);
case DCB_OUTPUT_DP:
- max_clock = nv_encoder->dp.link_nr;
- max_clock *= nv_encoder->dp.link_bw;
- clock = clock * (connector->display_info.bpc * 3) / 10;
- break;
+ return nv50_dp_mode_valid(connector, nv_encoder, mode, NULL);
default:
BUG();
return MODE_BAD;
}
- if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
- clock *= 2;
-
- if (clock < min_clock)
- return MODE_CLOCK_LOW;
-
- if (clock > max_clock)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
+ return nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
+ NULL);
}
static struct drm_encoder *
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index de84fb4708c7..9e062c7adec8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -195,6 +195,11 @@ int nouveau_conn_atomic_get_property(struct drm_connector *,
const struct drm_connector_state *,
struct drm_property *, u64 *);
struct drm_display_mode *nouveau_conn_native_mode(struct drm_connector *);
+enum drm_mode_status
+nouveau_conn_mode_clock_valid(const struct drm_display_mode *,
+ const unsigned min_clock,
+ const unsigned max_clock,
+ unsigned *clock);
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
extern int nouveau_backlight_init(struct drm_connector *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 15a3d40edf02..63b5c8cf9ae4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -181,8 +181,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
}
ret = pm_runtime_get_sync(drm->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(drm->dev);
return ret;
+ }
+
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
pm_runtime_put_autosuspend(drm->dev);
if (ret < 0)
@@ -217,7 +220,7 @@ static const struct nouveau_debugfs_files {
{"pstate", &nouveau_pstate_fops},
};
-int
+void
nouveau_drm_debugfs_init(struct drm_minor *minor)
{
struct nouveau_drm *drm = nouveau_drm(minor->dev);
@@ -240,12 +243,10 @@ nouveau_drm_debugfs_init(struct drm_minor *minor)
*/
dentry = debugfs_lookup("vbios.rom", minor->debugfs_root);
if (!dentry)
- return 0;
+ return;
d_inode(dentry)->i_size = drm->vbios.length;
dput(dentry);
-
- return 0;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index 8909c010e8ea..77f0323b38ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -18,15 +18,13 @@ nouveau_debugfs(struct drm_device *dev)
return nouveau_drm(dev)->debugfs;
}
-extern int nouveau_drm_debugfs_init(struct drm_minor *);
+extern void nouveau_drm_debugfs_init(struct drm_minor *);
extern int nouveau_debugfs_init(struct nouveau_drm *);
extern void nouveau_debugfs_fini(struct nouveau_drm *);
#else
-static inline int
+static inline void
nouveau_drm_debugfs_init(struct drm_minor *minor)
-{
- return 0;
-}
+{}
static inline int
nouveau_debugfs_init(struct nouveau_drm *drm)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 700817dc4fa0..496c4621cc78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -31,6 +31,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -179,41 +180,164 @@ nouveau_display_vblank_init(struct drm_device *dev)
return 0;
}
+static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
static void
-nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
+nouveau_decode_mod(struct nouveau_drm *drm,
+ uint64_t modifier,
+ uint32_t *tile_mode,
+ uint8_t *kind)
+{
+ BUG_ON(!tile_mode || !kind);
+
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ /* tile_mode will not be used in this case */
+ *tile_mode = 0;
+ *kind = 0;
+ } else {
+ /*
+ * Extract the block height and kind from the corresponding
+ * modifier fields. See drm_fourcc.h for details.
+ */
+ *tile_mode = (uint32_t)(modifier & 0xF);
+ *kind = (uint8_t)((modifier >> 12) & 0xFF);
+
+ if (drm->client.device.info.chipset >= 0xc0)
+ *tile_mode <<= 4;
+ }
+}
+
+void
+nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
+ uint32_t *tile_mode,
+ uint8_t *kind)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ if (fb->flags & DRM_MODE_FB_MODIFIERS) {
+ struct nouveau_drm *drm = nouveau_drm(fb->dev);
- if (fb->nvbo)
- drm_gem_object_put_unlocked(&fb->nvbo->bo.base);
+ nouveau_decode_mod(drm, fb->modifier, tile_mode, kind);
+ } else {
+ const struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
- drm_framebuffer_cleanup(drm_fb);
- kfree(fb);
+ *tile_mode = nvbo->mode;
+ *kind = nvbo->kind;
+ }
}
static int
-nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
- struct drm_file *file_priv,
- unsigned int *handle)
+nouveau_validate_decode_mod(struct nouveau_drm *drm,
+ uint64_t modifier,
+ uint32_t *tile_mode,
+ uint8_t *kind)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ struct nouveau_display *disp = nouveau_display(drm->dev);
+ int mod;
+
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+ return -EINVAL;
+ }
- return drm_gem_handle_create(file_priv, &fb->nvbo->bo.base, handle);
+ BUG_ON(!disp->format_modifiers);
+
+ for (mod = 0;
+ (disp->format_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
+ (disp->format_modifiers[mod] != modifier);
+ mod++);
+
+ if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
+ return -EINVAL;
+
+ nouveau_decode_mod(drm, modifier, tile_mode, kind);
+
+ return 0;
}
-static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
- .destroy = nouveau_user_framebuffer_destroy,
- .create_handle = nouveau_user_framebuffer_create_handle,
-};
+static inline uint32_t
+nouveau_get_width_in_blocks(uint32_t stride)
+{
+ /* GOBs per block in the x direction is always one, and GOBs are
+ * 64 bytes wide
+ */
+ static const uint32_t log_block_width = 6;
+
+ return (stride + (1 << log_block_width) - 1) >> log_block_width;
+}
+
+static inline uint32_t
+nouveau_get_height_in_blocks(struct nouveau_drm *drm,
+ uint32_t height,
+ uint32_t log_block_height_in_gobs)
+{
+ uint32_t log_gob_height;
+ uint32_t log_block_height;
+
+ BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
+
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
+ log_gob_height = 2;
+ else
+ log_gob_height = 3;
+
+ log_block_height = log_block_height_in_gobs + log_gob_height;
+
+ return (height + (1 << log_block_height) - 1) >> log_block_height;
+}
+
+static int
+nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
+ uint32_t offset, uint32_t stride, uint32_t h,
+ uint32_t tile_mode)
+{
+ uint32_t gob_size, bw, bh;
+ uint64_t bl_size;
+
+ BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
+
+ if (drm->client.device.info.chipset >= 0xc0) {
+ if (tile_mode & 0xF)
+ return -EINVAL;
+ tile_mode >>= 4;
+ }
+
+ if (tile_mode & 0xFFFFFFF0)
+ return -EINVAL;
+
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
+ gob_size = 256;
+ else
+ gob_size = 512;
+
+ bw = nouveau_get_width_in_blocks(stride);
+ bh = nouveau_get_height_in_blocks(drm, h, tile_mode);
+
+ bl_size = bw * bh * (1 << tile_mode) * gob_size;
+
+ DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
+ offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
+ nvbo->bo.mem.size);
+
+ if (bl_size + offset > nvbo->bo.mem.size)
+ return -ERANGE;
+
+ return 0;
+}
int
nouveau_framebuffer_new(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
- struct nouveau_bo *nvbo,
- struct nouveau_framebuffer **pfb)
+ struct drm_gem_object *gem,
+ struct drm_framebuffer **pfb)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_framebuffer *fb;
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct drm_framebuffer *fb;
+ const struct drm_format_info *info;
+ unsigned int width, height, i;
+ uint32_t tile_mode;
+ uint8_t kind;
int ret;
/* YUV overlays have special requirements pre-NV50 */
@@ -236,13 +360,50 @@ nouveau_framebuffer_new(struct drm_device *dev,
return -EINVAL;
}
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ if (nouveau_validate_decode_mod(drm, mode_cmd->modifier[0],
+ &tile_mode, &kind)) {
+ DRM_DEBUG_KMS("Unsupported modifier: 0x%llx\n",
+ mode_cmd->modifier[0]);
+ return -EINVAL;
+ }
+ } else {
+ tile_mode = nvbo->mode;
+ kind = nvbo->kind;
+ }
+
+ info = drm_get_format_info(dev, mode_cmd);
+
+ for (i = 0; i < info->num_planes; i++) {
+ width = drm_format_info_plane_width(info,
+ mode_cmd->width,
+ i);
+ height = drm_format_info_plane_height(info,
+ mode_cmd->height,
+ i);
+
+ if (kind) {
+ ret = nouveau_check_bl_size(drm, nvbo,
+ mode_cmd->offsets[i],
+ mode_cmd->pitches[i],
+ height, tile_mode);
+ if (ret)
+ return ret;
+ } else {
+ uint32_t size = mode_cmd->pitches[i] * height;
+
+ if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
+ return -ERANGE;
+ }
+ }
+
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
- fb->nvbo = nvbo;
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ fb->obj[0] = gem;
- ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
if (ret)
kfree(fb);
return ret;
@@ -253,19 +414,17 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct nouveau_framebuffer *fb;
- struct nouveau_bo *nvbo;
+ struct drm_framebuffer *fb;
struct drm_gem_object *gem;
int ret;
gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
- nvbo = nouveau_gem_object(gem);
- ret = nouveau_framebuffer_new(dev, mode_cmd, nvbo, &fb);
+ ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
if (ret == 0)
- return &fb->base;
+ return fb;
drm_gem_object_put_unlocked(gem);
return ERR_PTR(ret);
@@ -517,6 +676,7 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
+ dev->mode_config.allow_fb_modifiers = true;
if (drm->client.device.info.chipset < 0x11)
dev->mode_config.async_page_flip = false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index de004018ab5c..6e0d900441d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -8,26 +8,11 @@
#include <drm/drm_framebuffer.h>
-struct nouveau_framebuffer {
- struct drm_framebuffer base;
- struct nouveau_bo *nvbo;
- struct nouveau_vma *vma;
- u32 r_handle;
- u32 r_format;
- u32 r_pitch;
- struct nvif_object h_base[4];
- struct nvif_object h_core;
-};
-
-static inline struct nouveau_framebuffer *
-nouveau_framebuffer(struct drm_framebuffer *fb)
-{
- return container_of(fb, struct nouveau_framebuffer, base);
-}
-
-int nouveau_framebuffer_new(struct drm_device *,
- const struct drm_mode_fb_cmd2 *,
- struct nouveau_bo *, struct nouveau_framebuffer **);
+int
+nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *gem,
+ struct drm_framebuffer **pfb);
struct nouveau_display {
void *priv;
@@ -47,6 +32,8 @@ struct nouveau_display {
struct drm_property *color_vibrance_property;
struct drm_atomic_state *suspend;
+
+ const u64 *format_modifiers;
};
static inline struct nouveau_display *
@@ -75,6 +62,10 @@ int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+void
+nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, uint32_t *tile_mode,
+ uint8_t *kind);
+
struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
const struct drm_mode_fb_cmd2 *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index ad89e09a0be3..e5c230d9ae24 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -25,12 +25,14 @@
#include "nouveau_dma.h"
#include "nouveau_mem.h"
#include "nouveau_bo.h"
+#include "nouveau_svm.h"
#include <nvif/class.h>
#include <nvif/object.h>
#include <nvif/if000c.h>
#include <nvif/if500b.h>
#include <nvif/if900b.h>
+#include <nvif/if000c.h>
#include <linux/sched/mm.h>
#include <linux/hmm.h>
@@ -54,66 +56,69 @@ enum nouveau_aper {
typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
enum nouveau_aper, u64 dst_addr,
enum nouveau_aper, u64 src_addr);
+typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
+ enum nouveau_aper, u64 dst_addr);
struct nouveau_dmem_chunk {
struct list_head list;
struct nouveau_bo *bo;
struct nouveau_drm *drm;
- unsigned long pfn_first;
unsigned long callocated;
- unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
- spinlock_t lock;
+ struct dev_pagemap pagemap;
};
struct nouveau_dmem_migrate {
nouveau_migrate_copy_t copy_func;
+ nouveau_clear_page_t clear_func;
struct nouveau_channel *chan;
};
struct nouveau_dmem {
struct nouveau_drm *drm;
- struct dev_pagemap pagemap;
struct nouveau_dmem_migrate migrate;
- struct list_head chunk_free;
- struct list_head chunk_full;
- struct list_head chunk_empty;
+ struct list_head chunks;
struct mutex mutex;
+ struct page *free_pages;
+ spinlock_t lock;
};
-static inline struct nouveau_dmem *page_to_dmem(struct page *page)
+static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
{
- return container_of(page->pgmap, struct nouveau_dmem, pagemap);
+ return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
+}
+
+static struct nouveau_drm *page_to_drm(struct page *page)
+{
+ struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+
+ return chunk->drm;
}
-static unsigned long nouveau_dmem_page_addr(struct page *page)
+unsigned long nouveau_dmem_page_addr(struct page *page)
{
- struct nouveau_dmem_chunk *chunk = page->zone_device_data;
- unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
+ struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+ unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
+ chunk->pagemap.res.start;
- return (idx << PAGE_SHIFT) + chunk->bo->bo.offset;
+ return chunk->bo->bo.offset + off;
}
static void nouveau_dmem_page_free(struct page *page)
{
- struct nouveau_dmem_chunk *chunk = page->zone_device_data;
- unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
+ struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+ struct nouveau_dmem *dmem = chunk->drm->dmem;
+
+ spin_lock(&dmem->lock);
+ page->zone_device_data = dmem->free_pages;
+ dmem->free_pages = page;
- /*
- * FIXME:
- *
- * This is really a bad example, we need to overhaul nouveau memory
- * management to be more page focus and allow lighter locking scheme
- * to be use in the process.
- */
- spin_lock(&chunk->lock);
- clear_bit(idx, chunk->bitmap);
WARN_ON(!chunk->callocated);
chunk->callocated--;
/*
* FIXME when chunk->callocated reach 0 we should add the chunk to
* a reclaim list so that it can be freed in case of memory pressure.
*/
- spin_unlock(&chunk->lock);
+ spin_unlock(&dmem->lock);
}
static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
@@ -165,8 +170,8 @@ error_free_page:
static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
{
- struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
- struct nouveau_drm *drm = dmem->drm;
+ struct nouveau_drm *drm = page_to_drm(vmf->page);
+ struct nouveau_dmem *dmem = drm->dmem;
struct nouveau_fence *fence;
unsigned long src = 0, dst = 0;
dma_addr_t dma_addr = 0;
@@ -209,131 +214,105 @@ static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
};
static int
-nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
{
struct nouveau_dmem_chunk *chunk;
+ struct resource *res;
+ struct page *page;
+ void *ptr;
+ unsigned long i, pfn_first;
int ret;
- if (drm->dmem == NULL)
- return -EINVAL;
-
- mutex_lock(&drm->dmem->mutex);
- chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
- struct nouveau_dmem_chunk,
- list);
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
if (chunk == NULL) {
- mutex_unlock(&drm->dmem->mutex);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
- list_del(&chunk->list);
- mutex_unlock(&drm->dmem->mutex);
+ /* Allocate unused physical address space for device private pages. */
+ res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
+ "nouveau_dmem");
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
+ goto out_free;
+ }
+
+ chunk->drm = drm;
+ chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ chunk->pagemap.res = *res;
+ chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
+ chunk->pagemap.owner = drm->dev;
ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
&chunk->bo);
if (ret)
- goto out;
+ goto out_release;
ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
- if (ret) {
- nouveau_bo_ref(NULL, &chunk->bo);
- goto out;
- }
+ if (ret)
+ goto out_bo_free;
- bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
- spin_lock_init(&chunk->lock);
+ ptr = memremap_pages(&chunk->pagemap, numa_node_id());
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+ goto out_bo_unpin;
+ }
-out:
mutex_lock(&drm->dmem->mutex);
- if (chunk->bo)
- list_add(&chunk->list, &drm->dmem->chunk_empty);
- else
- list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
+ list_add(&chunk->list, &drm->dmem->chunks);
mutex_unlock(&drm->dmem->mutex);
- return ret;
-}
-
-static struct nouveau_dmem_chunk *
-nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
-{
- struct nouveau_dmem_chunk *chunk;
-
- chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
- struct nouveau_dmem_chunk,
- list);
- if (chunk)
- return chunk;
-
- chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
- struct nouveau_dmem_chunk,
- list);
- if (chunk->bo)
- return chunk;
-
- return NULL;
-}
-
-static int
-nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
- unsigned long npages,
- unsigned long *pages)
-{
- struct nouveau_dmem_chunk *chunk;
- unsigned long c;
- int ret;
-
- memset(pages, 0xff, npages * sizeof(*pages));
-
- mutex_lock(&drm->dmem->mutex);
- for (c = 0; c < npages;) {
- unsigned long i;
-
- chunk = nouveau_dmem_chunk_first_free_locked(drm);
- if (chunk == NULL) {
- mutex_unlock(&drm->dmem->mutex);
- ret = nouveau_dmem_chunk_alloc(drm);
- if (ret) {
- if (c)
- return 0;
- return ret;
- }
- mutex_lock(&drm->dmem->mutex);
- continue;
- }
-
- spin_lock(&chunk->lock);
- i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
- while (i < DMEM_CHUNK_NPAGES && c < npages) {
- pages[c] = chunk->pfn_first + i;
- set_bit(i, chunk->bitmap);
- chunk->callocated++;
- c++;
-
- i = find_next_zero_bit(chunk->bitmap,
- DMEM_CHUNK_NPAGES, i);
- }
- spin_unlock(&chunk->lock);
+ pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
+ page = pfn_to_page(pfn_first);
+ spin_lock(&drm->dmem->lock);
+ for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
+ page->zone_device_data = drm->dmem->free_pages;
+ drm->dmem->free_pages = page;
}
- mutex_unlock(&drm->dmem->mutex);
+ *ppage = page;
+ chunk->callocated++;
+ spin_unlock(&drm->dmem->lock);
+
+ NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
+ DMEM_CHUNK_SIZE >> 20);
return 0;
+
+out_bo_unpin:
+ nouveau_bo_unpin(chunk->bo);
+out_bo_free:
+ nouveau_bo_ref(NULL, &chunk->bo);
+out_release:
+ release_mem_region(chunk->pagemap.res.start,
+ resource_size(&chunk->pagemap.res));
+out_free:
+ kfree(chunk);
+out:
+ return ret;
}
static struct page *
nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
{
- unsigned long pfns[1];
- struct page *page;
+ struct nouveau_dmem_chunk *chunk;
+ struct page *page = NULL;
int ret;
- /* FIXME stop all the miss-match API ... */
- ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
- if (ret)
- return NULL;
+ spin_lock(&drm->dmem->lock);
+ if (drm->dmem->free_pages) {
+ page = drm->dmem->free_pages;
+ drm->dmem->free_pages = page->zone_device_data;
+ chunk = nouveau_page_to_chunk(page);
+ chunk->callocated++;
+ spin_unlock(&drm->dmem->lock);
+ } else {
+ spin_unlock(&drm->dmem->lock);
+ ret = nouveau_dmem_chunk_alloc(drm, &page);
+ if (ret)
+ return NULL;
+ }
- page = pfn_to_page(pfns[0]);
get_page(page);
lock_page(page);
return page;
@@ -356,12 +335,7 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
return;
mutex_lock(&drm->dmem->mutex);
- list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
- ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
- /* FIXME handle pin failure */
- WARN_ON(ret);
- }
- list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+ list_for_each_entry(chunk, &drm->dmem->chunks, list) {
ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
/* FIXME handle pin failure */
WARN_ON(ret);
@@ -378,12 +352,8 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
return;
mutex_lock(&drm->dmem->mutex);
- list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
- nouveau_bo_unpin(chunk->bo);
- }
- list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+ list_for_each_entry(chunk, &drm->dmem->chunks, list)
nouveau_bo_unpin(chunk->bo);
- }
mutex_unlock(&drm->dmem->mutex);
}
@@ -397,15 +367,13 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
mutex_lock(&drm->dmem->mutex);
- WARN_ON(!list_empty(&drm->dmem->chunk_free));
- WARN_ON(!list_empty(&drm->dmem->chunk_full));
-
- list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
- if (chunk->bo) {
- nouveau_bo_unpin(chunk->bo);
- nouveau_bo_ref(NULL, &chunk->bo);
- }
+ list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
+ nouveau_bo_unpin(chunk->bo);
+ nouveau_bo_ref(NULL, &chunk->bo);
list_del(&chunk->list);
+ memunmap_pages(&chunk->pagemap);
+ release_mem_region(chunk->pagemap.res.start,
+ resource_size(&chunk->pagemap.res));
kfree(chunk);
}
@@ -472,6 +440,52 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
}
static int
+nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
+ enum nouveau_aper dst_aper, u64 dst_addr)
+{
+ struct nouveau_channel *chan = drm->dmem->migrate.chan;
+ u32 launch_dma = (1 << 10) /* REMAP_ENABLE_TRUE */ |
+ (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
+ (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
+ (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
+ (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
+ u32 remap = (4 << 0) /* DST_X_CONST_A */ |
+ (5 << 4) /* DST_Y_CONST_B */ |
+ (3 << 16) /* COMPONENT_SIZE_FOUR */ |
+ (1 << 24) /* NUM_DST_COMPONENTS_TWO */;
+ int ret;
+
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
+
+ switch (dst_aper) {
+ case NOUVEAU_APER_VRAM:
+ BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
+ break;
+ case NOUVEAU_APER_HOST:
+ BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
+
+ BEGIN_NVC0(chan, NvSubCopy, 0x0700, 3);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, remap);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0408, 2);
+ OUT_RING(chan, upper_32_bits(dst_addr));
+ OUT_RING(chan, lower_32_bits(dst_addr));
+ BEGIN_NVC0(chan, NvSubCopy, 0x0418, 1);
+ OUT_RING(chan, length >> 3);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
+ OUT_RING(chan, launch_dma);
+ return 0;
+}
+
+static int
nouveau_dmem_migrate_init(struct nouveau_drm *drm)
{
switch (drm->ttm.copy.oclass) {
@@ -480,6 +494,7 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm)
case VOLTA_DMA_COPY_A:
case TURING_DMA_COPY_A:
drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
+ drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
drm->dmem->migrate.chan = drm->ttm.chan;
return 0;
default:
@@ -491,9 +506,6 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm)
void
nouveau_dmem_init(struct nouveau_drm *drm)
{
- struct device *device = drm->dev->dev;
- struct resource *res;
- unsigned long i, size, pfn_first;
int ret;
/* This only make sense on PASCAL or newer */
@@ -505,84 +517,53 @@ nouveau_dmem_init(struct nouveau_drm *drm)
drm->dmem->drm = drm;
mutex_init(&drm->dmem->mutex);
- INIT_LIST_HEAD(&drm->dmem->chunk_free);
- INIT_LIST_HEAD(&drm->dmem->chunk_full);
- INIT_LIST_HEAD(&drm->dmem->chunk_empty);
-
- size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
+ INIT_LIST_HEAD(&drm->dmem->chunks);
+ mutex_init(&drm->dmem->mutex);
+ spin_lock_init(&drm->dmem->lock);
/* Initialize migration dma helpers before registering memory */
ret = nouveau_dmem_migrate_init(drm);
- if (ret)
- goto out_free;
-
- /*
- * FIXME we need some kind of policy to decide how much VRAM we
- * want to register with HMM. For now just register everything
- * and latter if we want to do thing like over commit then we
- * could revisit this.
- */
- res = devm_request_free_mem_region(device, &iomem_resource, size);
- if (IS_ERR(res))
- goto out_free;
- drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
- drm->dmem->pagemap.res = *res;
- drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
- drm->dmem->pagemap.owner = drm->dev;
- if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
- goto out_free;
-
- pfn_first = res->start >> PAGE_SHIFT;
- for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
- struct nouveau_dmem_chunk *chunk;
- struct page *page;
- unsigned long j;
-
- chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
- if (chunk == NULL) {
- nouveau_dmem_fini(drm);
- return;
- }
-
- chunk->drm = drm;
- chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
- list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
-
- page = pfn_to_page(chunk->pfn_first);
- for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page)
- page->zone_device_data = chunk;
+ if (ret) {
+ kfree(drm->dmem);
+ drm->dmem = NULL;
}
-
- NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
- return;
-out_free:
- kfree(drm->dmem);
- drm->dmem = NULL;
}
static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
- unsigned long src, dma_addr_t *dma_addr)
+ unsigned long src, dma_addr_t *dma_addr, u64 *pfn)
{
struct device *dev = drm->dev->dev;
struct page *dpage, *spage;
+ unsigned long paddr;
spage = migrate_pfn_to_page(src);
- if (!spage || !(src & MIGRATE_PFN_MIGRATE))
+ if (!(src & MIGRATE_PFN_MIGRATE))
goto out;
dpage = nouveau_dmem_page_alloc_locked(drm);
if (!dpage)
- return 0;
-
- *dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, *dma_addr))
- goto out_free_page;
+ goto out;
- if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
- nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
- *dma_addr))
- goto out_dma_unmap;
+ paddr = nouveau_dmem_page_addr(dpage);
+ if (spage) {
+ *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, *dma_addr))
+ goto out_free_page;
+ if (drm->dmem->migrate.copy_func(drm, page_size(spage),
+ NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
+ goto out_dma_unmap;
+ } else {
+ *dma_addr = DMA_MAPPING_ERROR;
+ if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
+ NOUVEAU_APER_VRAM, paddr))
+ goto out_free_page;
+ }
+ *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
+ ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
+ if (src & MIGRATE_PFN_WRITE)
+ *pfn |= NVIF_VMM_PFNMAP_V0_W;
return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
out_dma_unmap:
@@ -590,19 +571,21 @@ out_dma_unmap:
out_free_page:
nouveau_dmem_page_free_locked(drm, dpage);
out:
+ *pfn = NVIF_VMM_PFNMAP_V0_NONE;
return 0;
}
static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
- struct migrate_vma *args, dma_addr_t *dma_addrs)
+ struct nouveau_svmm *svmm, struct migrate_vma *args,
+ dma_addr_t *dma_addrs, u64 *pfns)
{
struct nouveau_fence *fence;
unsigned long addr = args->start, nr_dma = 0, i;
for (i = 0; addr < args->end; i++) {
args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
- dma_addrs + nr_dma);
- if (args->dst[i])
+ dma_addrs + nr_dma, pfns + i);
+ if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
nr_dma++;
addr += PAGE_SIZE;
}
@@ -610,20 +593,18 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
+ nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
while (nr_dma--) {
dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
DMA_BIDIRECTIONAL);
}
- /*
- * FIXME optimization: update GPU page table to point to newly migrated
- * memory.
- */
migrate_vma_finalize(args);
}
int
nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+ struct nouveau_svmm *svmm,
struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
@@ -635,9 +616,13 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
.vma = vma,
.start = start,
};
- unsigned long c, i;
+ unsigned long i;
+ u64 *pfns;
int ret = -ENOMEM;
+ if (drm->dmem == NULL)
+ return -ENODEV;
+
args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
if (!args.src)
goto out;
@@ -649,19 +634,25 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
if (!dma_addrs)
goto out_free_dst;
- for (i = 0; i < npages; i += c) {
- c = min(SG_MAX_SINGLE_ALLOC, npages);
- args.end = start + (c << PAGE_SHIFT);
+ pfns = nouveau_pfns_alloc(max);
+ if (!pfns)
+ goto out_free_dma;
+
+ for (i = 0; i < npages; i += max) {
+ args.end = start + (max << PAGE_SHIFT);
ret = migrate_vma_setup(&args);
if (ret)
- goto out_free_dma;
+ goto out_free_pfns;
if (args.cpages)
- nouveau_dmem_migrate_chunk(drm, &args, dma_addrs);
+ nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
+ pfns);
args.start = args.end;
}
ret = 0;
+out_free_pfns:
+ nouveau_pfns_free(pfns);
out_free_dma:
kfree(dma_addrs);
out_free_dst:
@@ -671,28 +662,3 @@ out_free_src:
out:
return ret;
}
-
-void
-nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
- struct hmm_range *range)
-{
- unsigned long i, npages;
-
- npages = (range->end - range->start) >> PAGE_SHIFT;
- for (i = 0; i < npages; ++i) {
- struct page *page;
- uint64_t addr;
-
- page = hmm_device_entry_to_page(range, range->pfns[i]);
- if (page == NULL)
- continue;
-
- if (!is_device_private_page(page))
- continue;
-
- addr = nouveau_dmem_page_addr(page);
- range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
- range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
- range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM;
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h
index 92394be5d649..64da5d3635c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h
@@ -25,6 +25,7 @@
struct drm_device;
struct drm_file;
struct nouveau_drm;
+struct nouveau_svmm;
struct hmm_range;
#if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
@@ -34,12 +35,12 @@ void nouveau_dmem_suspend(struct nouveau_drm *);
void nouveau_dmem_resume(struct nouveau_drm *);
int nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+ struct nouveau_svmm *svmm,
struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
+unsigned long nouveau_dmem_page_addr(struct page *page);
-void nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
- struct hmm_range *range);
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
static inline void nouveau_dmem_init(struct nouveau_drm *drm) {}
static inline void nouveau_dmem_fini(struct nouveau_drm *drm) {}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 2674f1587457..8a0f7994e1ae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -98,3 +98,34 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
return NOUVEAU_DP_SST;
return ret;
}
+
+/* TODO:
+ * - Use the minimum possible BPC here, once we add support for the max bpc
+ * property.
+ * - Validate the mode against downstream port caps (see
+ * drm_dp_downstream_max_clock())
+ * - Validate against the DP caps advertised by the GPU (we don't check these
+ * yet)
+ */
+enum drm_mode_status
+nv50_dp_mode_valid(struct drm_connector *connector,
+ struct nouveau_encoder *outp,
+ const struct drm_display_mode *mode,
+ unsigned *out_clock)
+{
+ const unsigned min_clock = 25000;
+ unsigned max_clock, clock;
+ enum drm_mode_status ret;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
+ return MODE_NO_INTERLACE;
+
+ max_clock = outp->dp.link_nr * outp->dp.link_bw;
+ clock = mode->clock * (connector->display_info.bpc * 3) / 10;
+
+ ret = nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
+ &clock);
+ if (out_clock)
+ *out_clock = clock;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ca4087f5a15b..ac93d12201dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -681,8 +681,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
{
struct nvkm_device *device;
struct drm_device *drm_dev;
- struct apertures_struct *aper;
- bool boot = false;
int ret;
if (vga_switcheroo_client_probe_defer(pdev))
@@ -699,32 +697,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
nvkm_device_del(&device);
/* Remove conflicting drivers (vesafb, efifb etc). */
- aper = alloc_apertures(3);
- if (!aper)
- return -ENOMEM;
-
- aper->ranges[0].base = pci_resource_start(pdev, 1);
- aper->ranges[0].size = pci_resource_len(pdev, 1);
- aper->count = 1;
-
- if (pci_resource_len(pdev, 2)) {
- aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
- aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
- aper->count++;
- }
-
- if (pci_resource_len(pdev, 3)) {
- aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
- aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
- aper->count++;
- }
-
-#ifdef CONFIG_X86
- boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- if (nouveau_modeset != 2)
- drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot);
- kfree(aper);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "nouveaufb");
+ if (ret)
+ return ret;
ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
true, true, ~0ULL, &device);
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 3517f920bf89..a72c412ac8b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -52,6 +52,7 @@ struct nouveau_encoder {
* actually programmed on the hw, not the proposed crtc */
struct drm_crtc *crtc;
u32 ctrl;
+ bool audio;
struct drm_display_mode mode;
int last_dpms;
@@ -66,6 +67,10 @@ struct nouveau_encoder {
} dp;
};
+ struct {
+ bool dp_interlace : 1;
+ } caps;
+
void (*enc_save)(struct drm_encoder *encoder);
void (*enc_restore)(struct drm_encoder *encoder);
void (*update)(struct nouveau_encoder *, u8 head,
@@ -100,6 +105,10 @@ enum nouveau_dp_status {
};
int nouveau_dp_detect(struct nouveau_encoder *);
+enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *,
+ struct nouveau_encoder *,
+ const struct drm_display_mode *,
+ unsigned *clock);
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 24d543a01f43..3d11b84d4cf9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -312,7 +312,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
struct fb_info *info;
- struct nouveau_framebuffer *fb;
+ struct drm_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
@@ -335,7 +335,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
goto out;
}
- ret = nouveau_framebuffer_new(dev, &mode_cmd, nvbo, &fb);
+ ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb);
if (ret)
goto out_unref;
@@ -353,7 +353,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_vma_new(nvbo, chan->vmm, &fb->vma);
+ ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
@@ -367,7 +367,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
}
/* setup helper */
- fbcon->helper.fb = &fb->base;
+ fbcon->helper.fb = fb;
if (!chan)
info->flags = FBINFO_HWACCEL_DISABLED;
@@ -376,12 +376,12 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = fb->nvbo->bo.mem.bus.base +
- fb->nvbo->bo.mem.bus.offset;
- info->fix.smem_len = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ info->fix.smem_start = nvbo->bo.mem.bus.base +
+ nvbo->bo.mem.bus.offset;
+ info->fix.smem_len = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
- info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
+ info->screen_size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
@@ -393,19 +393,19 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
/* To allow resizeing without swapping buffers */
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
- fb->base.width, fb->base.height, fb->nvbo->bo.offset, nvbo);
+ fb->width, fb->height, nvbo->bo.offset, nvbo);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unlock:
if (chan)
- nouveau_vma_del(&fb->vma);
- nouveau_bo_unmap(fb->nvbo);
+ nouveau_vma_del(&fbcon->vma);
+ nouveau_bo_unmap(nvbo);
out_unpin:
- nouveau_bo_unpin(fb->nvbo);
+ nouveau_bo_unpin(nvbo);
out_unref:
- nouveau_bo_ref(NULL, &fb->nvbo);
+ nouveau_bo_ref(NULL, &nvbo);
out:
return ret;
}
@@ -413,16 +413,18 @@ out:
static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
- struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fbcon->helper.fb);
+ struct drm_framebuffer *fb = fbcon->helper.fb;
+ struct nouveau_bo *nvbo;
drm_fb_helper_unregister_fbi(&fbcon->helper);
drm_fb_helper_fini(&fbcon->helper);
- if (nouveau_fb && nouveau_fb->nvbo) {
- nouveau_vma_del(&nouveau_fb->vma);
- nouveau_bo_unmap(nouveau_fb->nvbo);
- nouveau_bo_unpin(nouveau_fb->nvbo);
- drm_framebuffer_put(&nouveau_fb->base);
+ if (fb && fb->obj[0]) {
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ nouveau_vma_del(&fbcon->vma);
+ nouveau_bo_unmap(nvbo);
+ nouveau_bo_unpin(nvbo);
+ drm_framebuffer_put(fb);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 73a7eeba3973..1796d8824580 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -31,6 +31,8 @@
#include "nouveau_display.h"
+struct nouveau_vma;
+
struct nouveau_fbdev {
struct drm_fb_helper helper; /* must be first */
unsigned int saved_flags;
@@ -41,6 +43,7 @@ struct nouveau_fbdev {
struct nvif_object gdi;
struct nvif_object blit;
struct nvif_object twod;
+ struct nouveau_vma *vma;
struct mutex hotplug_lock;
bool hotplug_waiting;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index f5ece1f94973..4c3f131ad31d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -76,8 +76,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
return ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev);
goto out;
+ }
ret = nouveau_vma_new(nvbo, vmm, &vma);
pm_runtime_mark_last_busy(dev);
@@ -157,8 +159,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
if (!WARN_ON(ret < 0 && ret != -EACCES)) {
nouveau_gem_object_unmap(nvbo, vma);
pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
}
+ pm_runtime_put_autosuspend(dev);
}
}
ttm_bo_unreserve(&nvbo->bo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 039e23548e08..23cd43a7fd19 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -95,14 +95,3 @@ struct platform_driver nouveau_platform_driver = {
.probe = nouveau_platform_probe,
.remove = nouveau_platform_remove,
};
-
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
-MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 645fedd77e21..ba9f9359c30e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -70,6 +70,12 @@ struct nouveau_svm {
#define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
+struct nouveau_pfnmap_args {
+ struct nvif_ioctl_v0 i;
+ struct nvif_ioctl_mthd_v0 m;
+ struct nvif_vmm_pfnmap_v0 p;
+};
+
struct nouveau_ivmm {
struct nouveau_svmm *svmm;
u64 inst;
@@ -169,10 +175,10 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
*/
mm = get_task_mm(current);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (!cli->svm.svmm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return -EINVAL;
}
@@ -187,7 +193,8 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
addr = max(addr, vma->vm_start);
next = min(vma->vm_end, end);
/* This is a best effort so we ignore errors */
- nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
+ nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
+ next);
addr = next;
}
@@ -198,7 +205,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
*/
args->result = 0;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
return 0;
@@ -348,7 +355,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
if (ret)
goto out_free;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
svmm->notifier.ops = &nouveau_mn_ops;
ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret)
@@ -357,31 +364,18 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
cli->svm.svmm = svmm;
cli->svm.cli = cli;
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
mutex_unlock(&cli->mutex);
return 0;
out_mm_unlock:
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
out_free:
mutex_unlock(&cli->mutex);
kfree(svmm);
return ret;
}
-static const u64
-nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
- [HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V,
- [HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W,
-};
-
-static const u64
-nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
- [HMM_PFN_ERROR ] = ~NVIF_VMM_PFNMAP_V0_V,
- [HMM_PFN_NONE ] = NVIF_VMM_PFNMAP_V0_NONE,
- [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V,
-};
-
/* Issue fault replay for GPU to retry accesses that faulted previously. */
static void
nouveau_svm_fault_replay(struct nouveau_svm *svm)
@@ -519,9 +513,45 @@ static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
.invalidate = nouveau_svm_range_invalidate,
};
+static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
+ struct hmm_range *range, u64 *ioctl_addr)
+{
+ unsigned long i, npages;
+
+ /*
+ * The ioctl_addr prepared here is passed through nvif_object_ioctl()
+ * to an eventual DMA map in something like gp100_vmm_pgt_pfn()
+ *
+ * This is all just encoding the internal hmm representation into a
+ * different nouveau internal representation.
+ */
+ npages = (range->end - range->start) >> PAGE_SHIFT;
+ for (i = 0; i < npages; ++i) {
+ struct page *page;
+
+ if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) {
+ ioctl_addr[i] = 0;
+ continue;
+ }
+
+ page = hmm_pfn_to_page(range->hmm_pfns[i]);
+ if (is_device_private_page(page))
+ ioctl_addr[i] = nouveau_dmem_page_addr(page) |
+ NVIF_VMM_PFNMAP_V0_V |
+ NVIF_VMM_PFNMAP_V0_VRAM;
+ else
+ ioctl_addr[i] = page_to_phys(page) |
+ NVIF_VMM_PFNMAP_V0_V |
+ NVIF_VMM_PFNMAP_V0_HOST;
+ if (range->hmm_pfns[i] & HMM_PFN_WRITE)
+ ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W;
+ }
+}
+
static int nouveau_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm, void *data, u32 size,
- u64 *pfns, struct svm_notifier *notifier)
+ unsigned long hmm_pfns[], u64 *ioctl_addr,
+ struct svm_notifier *notifier)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
@@ -530,26 +560,27 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
.notifier = &notifier->notifier,
.start = notifier->notifier.interval_tree.start,
.end = notifier->notifier.interval_tree.last + 1,
- .pfns = pfns,
- .flags = nouveau_svm_pfn_flags,
- .values = nouveau_svm_pfn_values,
- .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT,
+ .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
+ .hmm_pfns = hmm_pfns,
};
struct mm_struct *mm = notifier->notifier.mm;
- long ret;
+ int ret;
while (true) {
if (time_after(jiffies, timeout))
return -EBUSY;
range.notifier_seq = mmu_interval_read_begin(range.notifier);
- range.default_flags = 0;
- range.pfn_flags_mask = -1UL;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = hmm_range_fault(&range);
- up_read(&mm->mmap_sem);
- if (ret <= 0) {
- if (ret == 0 || ret == -EBUSY)
+ mmap_read_unlock(mm);
+ if (ret) {
+ /*
+ * FIXME: the input PFN_REQ flags are destroyed on
+ * -EBUSY, we need to regenerate them, also for the
+ * other continue below
+ */
+ if (ret == -EBUSY)
continue;
return ret;
}
@@ -563,7 +594,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
break;
}
- nouveau_dmem_convert_pfn(drm, &range);
+ nouveau_hmm_convert_pfn(drm, &range, ioctl_addr);
svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL);
@@ -590,6 +621,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
} i;
u64 phys[16];
} args;
+ unsigned long hmm_pfns[ARRAY_SIZE(args.phys)];
struct vm_area_struct *vma;
u64 inst, start, limit;
int fi, fn, pi, fill;
@@ -673,18 +705,18 @@ nouveau_svm_fault(struct nvif_notify *notify)
/* Intersect fault window with the CPU VMA, cancelling
* the fault if the address is invalid.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma_intersection(mm, start, limit);
if (!vma) {
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
start = max_t(u64, start, vma->vm_start);
limit = min_t(u64, limit, vma->vm_end);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
if (buffer->fault[fi]->addr != start) {
@@ -705,12 +737,17 @@ nouveau_svm_fault(struct nvif_notify *notify)
* access flags.
*XXX: atomic?
*/
- if (buffer->fault[fn]->access != 0 /* READ. */ &&
- buffer->fault[fn]->access != 3 /* PREFETCH. */) {
- args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V |
- NVIF_VMM_PFNMAP_V0_W;
- } else {
- args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V;
+ switch (buffer->fault[fn]->access) {
+ case 0: /* READ. */
+ hmm_pfns[pi++] = HMM_PFN_REQ_FAULT;
+ break;
+ case 3: /* PREFETCH. */
+ hmm_pfns[pi++] = 0;
+ break;
+ default:
+ hmm_pfns[pi++] = HMM_PFN_REQ_FAULT |
+ HMM_PFN_REQ_WRITE;
+ break;
}
args.i.p.size = pi << PAGE_SHIFT;
@@ -738,7 +775,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
fill = (buffer->fault[fn ]->addr -
buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
while (--fill)
- args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE;
+ hmm_pfns[pi++] = 0;
}
SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
@@ -754,7 +791,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
ret = nouveau_range_fault(
svmm, svm->drm, &args,
sizeof(args.i) + pi * sizeof(args.phys[0]),
- args.phys, &notifier);
+ hmm_pfns, args.phys, &notifier);
mmu_interval_notifier_remove(&notifier.notifier);
}
mmput(mm);
@@ -784,6 +821,56 @@ nouveau_svm_fault(struct nvif_notify *notify)
return NVIF_NOTIFY_KEEP;
}
+static struct nouveau_pfnmap_args *
+nouveau_pfns_to_args(void *pfns)
+{
+ return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
+}
+
+u64 *
+nouveau_pfns_alloc(unsigned long npages)
+{
+ struct nouveau_pfnmap_args *args;
+
+ args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL);
+ if (!args)
+ return NULL;
+
+ args->i.type = NVIF_IOCTL_V0_MTHD;
+ args->m.method = NVIF_VMM_V0_PFNMAP;
+ args->p.page = PAGE_SHIFT;
+
+ return args->p.phys;
+}
+
+void
+nouveau_pfns_free(u64 *pfns)
+{
+ struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
+
+ kfree(args);
+}
+
+void
+nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
+ unsigned long addr, u64 *pfns, unsigned long npages)
+{
+ struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
+ int ret;
+
+ args->p.addr = addr;
+ args->p.size = npages << PAGE_SHIFT;
+
+ mutex_lock(&svmm->mutex);
+
+ svmm->vmm->vmm.object.client->super = true;
+ ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
+ npages * sizeof(args->p.phys[0]), NULL);
+ svmm->vmm->vmm.object.client->super = false;
+
+ mutex_unlock(&svmm->mutex);
+}
+
static void
nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h
index e839d8189461..f0fcd1b72e8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
@@ -18,6 +18,11 @@ void nouveau_svmm_fini(struct nouveau_svmm **);
int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
+
+u64 *nouveau_pfns_alloc(unsigned long npages);
+void nouveau_pfns_free(u64 *pfns);
+void nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
+ unsigned long addr, u64 *pfns, unsigned long npages);
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index facd18564e0d..47428f79ede8 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -149,7 +149,6 @@ int
nv50_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
@@ -240,8 +239,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb->vma->addr));
- OUT_RING(chan, lower_32_bits(fb->vma->addr));
+ OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
+ OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -249,8 +248,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb->vma->addr));
- OUT_RING(chan, lower_32_bits(fb->vma->addr));
+ OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
+ OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
FIRE_RING(chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index c0deef4fe727..cb56163ed608 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -150,7 +150,6 @@ nvc0_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->helper.dev;
- struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
int ret, format;
@@ -240,8 +239,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(fb->vma->addr));
- OUT_RING (chan, lower_32_bits(fb->vma->addr));
+ OUT_RING (chan, upper_32_bits(nfbdev->vma->addr));
+ OUT_RING (chan, lower_32_bits(nfbdev->vma->addr));
BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
@@ -251,8 +250,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(fb->vma->addr));
- OUT_RING (chan, lower_32_bits(fb->vma->addr));
+ OUT_RING (chan, upper_32_bits(nfbdev->vma->addr));
+ OUT_RING (chan, lower_32_bits(nfbdev->vma->addr));
FIRE_RING (chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index 4cc186262d34..38130ef272d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -140,7 +140,7 @@ nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
{
struct nvkm_instmem *imem = device->imem;
struct nvkm_memory *memory;
- int ret = -ENOSYS;
+ int ret;
if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 79a8f9d305c5..49d468b45d3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -221,3 +221,14 @@ nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
__mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
subdev->debug = nvkm_dbgopt(device->dbgopt, name);
}
+
+int
+nvkm_subdev_new_(const struct nvkm_subdev_func *func,
+ struct nvkm_device *device, int index,
+ struct nvkm_subdev **psubdev)
+{
+ if (!(*psubdev = kzalloc(sizeof(**psubdev), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(func, device, index, *psubdev);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 8ebbe1656008..5b90c2a1bf3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2924,6 +2924,20 @@ nvkm_device_del(struct nvkm_device **pdevice)
}
}
+static inline bool
+nvkm_device_endianness(struct nvkm_device *device)
+{
+ u32 boot1 = nvkm_rd32(device, 0x000004) & 0x01000001;
+#ifdef __BIG_ENDIAN
+ if (!boot1)
+ return false;
+#else
+ if (boot1)
+ return false;
+#endif
+ return true;
+}
+
int
nvkm_device_ctor(const struct nvkm_device_func *func,
const struct nvkm_device_quirk *quirk,
@@ -2934,8 +2948,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
{
struct nvkm_subdev *subdev;
u64 mmio_base, mmio_size;
- u32 boot0, strap;
- void __iomem *map;
+ u32 boot0, boot1, strap;
int ret = -EEXIST, i;
unsigned chipset;
@@ -2961,26 +2974,30 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
mmio_base = device->func->resource_addr(device, 0);
mmio_size = device->func->resource_size(device, 0);
- /* identify the chipset, and determine classes of subdev/engines */
- if (detect) {
- map = ioremap(mmio_base, 0x102000);
- if (ret = -ENOMEM, map == NULL)
+ if (detect || mmio) {
+ device->pri = ioremap(mmio_base, mmio_size);
+ if (device->pri == NULL) {
+ nvdev_error(device, "unable to map PRI\n");
+ ret = -ENOMEM;
goto done;
+ }
+ }
+ /* identify the chipset, and determine classes of subdev/engines */
+ if (detect) {
/* switch mmio to cpu's native endianness */
-#ifndef __BIG_ENDIAN
- if (ioread32_native(map + 0x000004) != 0x00000000) {
-#else
- if (ioread32_native(map + 0x000004) == 0x00000000) {
-#endif
- iowrite32_native(0x01000001, map + 0x000004);
- ioread32_native(map);
+ if (!nvkm_device_endianness(device)) {
+ nvkm_wr32(device, 0x000004, 0x01000001);
+ nvkm_rd32(device, 0x000000);
+ if (!nvkm_device_endianness(device)) {
+ nvdev_error(device,
+ "GPU not supported on big-endian\n");
+ ret = -ENOSYS;
+ goto done;
+ }
}
- /* read boot0 and strapping information */
- boot0 = ioread32_native(map + 0x000000);
- strap = ioread32_native(map + 0x101000);
- iounmap(map);
+ boot0 = nvkm_rd32(device, 0x000000);
/* chipset can be overridden for devel/testing purposes */
chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
@@ -3138,6 +3155,17 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
nvdev_info(device, "NVIDIA %s (%08x)\n",
device->chip->name, boot0);
+ /* vGPU detection */
+ boot1 = nvkm_rd32(device, 0x0000004);
+ if (device->card_type >= TU100 && (boot1 & 0x00030000)) {
+ nvdev_info(device, "vGPUs are not supported\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ /* read strapping information */
+ strap = nvkm_rd32(device, 0x101000);
+
/* determine frequency of timing crystal */
if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
(device->chipset >= 0x20 && device->chipset < 0x25))
@@ -3158,15 +3186,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
if (!device->name)
device->name = device->chip->name;
- if (mmio) {
- device->pri = ioremap(mmio_base, mmio_size);
- if (!device->pri) {
- nvdev_error(device, "unable to map PRI\n");
- ret = -ENOMEM;
- goto done;
- }
- }
-
mutex_init(&device->mutex);
for (i = 0; i < NVKM_SUBDEV_NR; i++) {
@@ -3254,6 +3273,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
ret = 0;
done:
+ if (device->pri && (!mmio || ret)) {
+ iounmap(device->pri);
+ device->pri = NULL;
+ }
mutex_unlock(&nv_devices_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 0d584d0da59c..cf075311cdd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -39,6 +39,7 @@ nvkm-y += nvkm/engine/disp/sorgf119.o
nvkm-y += nvkm/engine/disp/sorgk104.o
nvkm-y += nvkm/engine/disp/sorgm107.o
nvkm-y += nvkm/engine/disp/sorgm200.o
+nvkm-y += nvkm/engine/disp/sorgp100.o
nvkm-y += nvkm/engine/disp/sorgv100.o
nvkm-y += nvkm/engine/disp/sortu102.o
@@ -47,6 +48,7 @@ nvkm-y += nvkm/engine/disp/dp.o
nvkm-y += nvkm/engine/disp/hdagt215.o
nvkm-y += nvkm/engine/disp/hdagf119.o
+nvkm-y += nvkm/engine/disp/hdagv100.o
nvkm-y += nvkm/engine/disp/hdmi.o
nvkm-y += nvkm/engine/disp/hdmig84.o
@@ -74,6 +76,8 @@ nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/rootgv100.o
nvkm-y += nvkm/engine/disp/roottu102.o
+nvkm-y += nvkm/engine/disp/capsgv100.o
+
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
nvkm-y += nvkm/engine/disp/changv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c
new file mode 100644
index 000000000000..5026e530f4bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define gv100_disp_caps(p) container_of((p), struct gv100_disp_caps, object)
+#include "rootnv50.h"
+
+struct gv100_disp_caps {
+ struct nvkm_object object;
+ struct nv50_disp *disp;
+};
+
+static int
+gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct gv100_disp_caps *caps = gv100_disp_caps(object);
+ struct nvkm_device *device = caps->disp->base.engine.subdev.device;
+ *type = NVKM_OBJECT_MAP_IO;
+ *addr = 0x640000 + device->func->resource_addr(device, 0);
+ *size = 0x1000;
+ return 0;
+}
+
+static const struct nvkm_object_func
+gv100_disp_caps = {
+ .map = gv100_disp_caps_map,
+};
+
+int
+gv100_disp_caps_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+ struct gv100_disp_caps *caps;
+
+ if (!(caps = kzalloc(sizeof(*caps), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &caps->object;
+
+ nvkm_object_ctor(&gv100_disp_caps, oclass, &caps->object);
+ caps->disp = disp;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
index fd6216684f6d..8471de3f3b61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -36,7 +36,7 @@ gp100_disp = {
.super = gf119_disp_super,
.root = &gp100_disp_root_oclass,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
- .sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
+ .sor = { .cnt = gf119_sor_cnt, .new = gp100_sor_new },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index 3468ddec1270..a3779c5046ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -63,7 +63,7 @@ gp102_disp = {
.super = gf119_disp_super,
.root = &gp102_disp_root_oclass,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
- .sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
+ .sor = { .cnt = gf119_sor_cnt, .new = gp100_sor_new },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
index 0fa0ec0a1de0..19d2d58344e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
@@ -24,10 +24,18 @@
#include "ior.h"
void
-gf119_hda_eld(struct nvkm_ior *ior, u8 *data, u8 size)
+gf119_hda_device_entry(struct nvkm_ior *ior, int head)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
- const u32 soff = 0x030 * ior->id;
+ const u32 hoff = 0x800 * head;
+ nvkm_mask(device, 0x616548 + hoff, 0x00000070, head << 4);
+}
+
+void
+gf119_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
+{
+ struct nvkm_device *device = ior->disp->engine.subdev.device;
+ const u32 soff = 0x030 * ior->id + (head * 0x04);
int i;
for (i = 0; i < size; i++)
@@ -41,14 +49,14 @@ void
gf119_hda_hpd(struct nvkm_ior *ior, int head, bool present)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
- const u32 hoff = 0x800 * head;
+ const u32 soff = 0x030 * ior->id + (head * 0x04);
u32 data = 0x80000000;
u32 mask = 0x80000001;
if (present) {
- nvkm_mask(device, 0x616548 + hoff, 0x00000070, 0x00000000);
+ ior->func->hda.device_entry(ior, head);
data |= 0x00000001;
} else {
mask |= 0x00000002;
}
- nvkm_mask(device, 0x10ec10 + ior->id * 0x030, mask, data);
+ nvkm_mask(device, 0x10ec10 + soff, mask, data);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 4509d2ba880e..0d1b81fe1093 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -24,7 +24,7 @@
#include "ior.h"
void
-gt215_hda_eld(struct nvkm_ior *ior, u8 *data, u8 size)
+gt215_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 soff = ior->id * 0x800;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c
new file mode 100644
index 000000000000..57d374ecfeef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+void
+gv100_hda_device_entry(struct nvkm_ior *ior, int head)
+{
+ struct nvkm_device *device = ior->disp->engine.subdev.device;
+ const u32 hoff = 0x800 * head;
+ nvkm_mask(device, 0x616528 + hoff, 0x00000070, head << 4);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
index 9b16a08eb4d9..bf6d41fb0c9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
@@ -27,10 +27,10 @@ void
gm200_hdmi_scdc(struct nvkm_ior *ior, int head, u8 scdc)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
- const u32 hoff = head * 0x800;
+ const u32 soff = nv50_ior_base(ior);
const u32 ctrl = scdc & 0x3;
- nvkm_mask(device, 0x61c5bc + hoff, 0x00000003, ctrl);
+ nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl);
ior->tmds.high_speed = !!(scdc & 0x2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 009d3a8b7a50..1a200a9ba4e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -87,7 +87,8 @@ struct nvkm_ior_func {
struct {
void (*hpd)(struct nvkm_ior *, int head, bool present);
- void (*eld)(struct nvkm_ior *, u8 *data, u8 size);
+ void (*eld)(struct nvkm_ior *, int head, u8 *data, u8 size);
+ void (*device_entry)(struct nvkm_ior *, int head);
} hda;
};
@@ -158,10 +159,13 @@ void gv100_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gm200_hdmi_scdc(struct nvkm_ior *, int, u8);
void gt215_hda_hpd(struct nvkm_ior *, int, bool);
-void gt215_hda_eld(struct nvkm_ior *, u8 *, u8);
+void gt215_hda_eld(struct nvkm_ior *, int, u8 *, u8);
void gf119_hda_hpd(struct nvkm_ior *, int, bool);
-void gf119_hda_eld(struct nvkm_ior *, u8 *, u8);
+void gf119_hda_eld(struct nvkm_ior *, int, u8 *, u8);
+void gf119_hda_device_entry(struct nvkm_ior *, int);
+
+void gv100_hda_device_entry(struct nvkm_ior *, int);
#define IOR_MSG(i,l,f,a...) do { \
struct nvkm_ior *_ior = (i); \
@@ -197,6 +201,7 @@ int gf119_sor_new(struct nvkm_disp *, int);
int gk104_sor_new(struct nvkm_disp *, int);
int gm107_sor_new(struct nvkm_disp *, int);
int gm200_sor_new(struct nvkm_disp *, int);
+int gp100_sor_new(struct nvkm_disp *, int);
int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
int gv100_sor_new(struct nvkm_disp *, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index c62030c96fba..dcf08249374a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -111,8 +111,44 @@ nvkm_outp_acquire_ior(struct nvkm_outp *outp, u8 user, struct nvkm_ior *ior)
return 0;
}
+static inline int
+nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
+ u8 user, bool hda)
+{
+ struct nvkm_ior *ior;
+
+ /* First preference is to reuse the OR that is currently armed
+ * on HW, if any, in order to prevent unnecessary switching.
+ */
+ list_for_each_entry(ior, &outp->disp->ior, head) {
+ if (!ior->identity && !!ior->func->hda.hpd == hda &&
+ !ior->asy.outp && ior->arm.outp == outp)
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+
+ /* Failing that, a completely unused OR is the next best thing. */
+ list_for_each_entry(ior, &outp->disp->ior, head) {
+ if (!ior->identity && !!ior->func->hda.hpd == hda &&
+ !ior->asy.outp && ior->type == type && !ior->arm.outp &&
+ (ior->func->route.set || ior->id == __ffs(outp->info.or)))
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+
+ /* Last resort is to assign an OR that's already active on HW,
+ * but will be released during the next modeset.
+ */
+ list_for_each_entry(ior, &outp->disp->ior, head) {
+ if (!ior->identity && !!ior->func->hda.hpd == hda &&
+ !ior->asy.outp && ior->type == type &&
+ (ior->func->route.set || ior->id == __ffs(outp->info.or)))
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+
+ return -ENOSPC;
+}
+
int
-nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
+nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
{
struct nvkm_ior *ior = outp->ior;
enum nvkm_ior_proto proto;
@@ -137,32 +173,25 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
return nvkm_outp_acquire_ior(outp, user, ior);
}
- /* First preference is to reuse the OR that is currently armed
- * on HW, if any, in order to prevent unnecessary switching.
+ /* If we don't need HDA, first try to acquire an OR that doesn't
+ * support it to leave free the ones that do.
*/
- list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
- return nvkm_outp_acquire_ior(outp, user, ior);
- }
+ if (!hda) {
+ if (!nvkm_outp_acquire_hda(outp, type, user, false))
+ return 0;
- /* Failing that, a completely unused OR is the next best thing. */
- list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->identity &&
- !ior->asy.outp && ior->type == type && !ior->arm.outp &&
- (ior->func->route.set || ior->id == __ffs(outp->info.or)))
- return nvkm_outp_acquire_ior(outp, user, ior);
+ /* Use a HDA-supporting SOR anyway. */
+ return nvkm_outp_acquire_hda(outp, type, user, true);
}
- /* Last resort is to assign an OR that's already active on HW,
- * but will be released during the next modeset.
- */
- list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->identity && !ior->asy.outp && ior->type == type &&
- (ior->func->route.set || ior->id == __ffs(outp->info.or)))
- return nvkm_outp_acquire_ior(outp, user, ior);
- }
+ /* We want HDA, try to acquire an OR that supports it. */
+ if (!nvkm_outp_acquire_hda(outp, type, user, true))
+ return 0;
- return -ENOSPC;
+ /* There weren't any free ORs that support HDA, grab one that
+ * doesn't and at least allow display to work still.
+ */
+ return nvkm_outp_acquire_hda(outp, type, user, false);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index 721b068b87ef..ee028d30cfe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -32,7 +32,7 @@ int nvkm_outp_new(struct nvkm_disp *, int index, struct dcb_output *,
void nvkm_outp_del(struct nvkm_outp **);
void nvkm_outp_init(struct nvkm_outp *);
void nvkm_outp_fini(struct nvkm_outp *);
-int nvkm_outp_acquire(struct nvkm_outp *, u8 user);
+int nvkm_outp_acquire(struct nvkm_outp *, u8 user, bool hda);
void nvkm_outp_release(struct nvkm_outp *, u8 user);
void nvkm_outp_route(struct nvkm_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
index 9c658d632d37..47efb48d769a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_root_func
gv100_disp_root = {
.user = {
+ {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
{{0,0,GV100_DISP_CURSOR }, gv100_disp_curs_new },
{{0,0,GV100_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
{{0,0,GV100_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 5f758948d6e1..fb5de44e4b8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -99,7 +99,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
} *args = data;
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER);
+ ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, args->v0.hda);
if (ret == 0) {
args->v0.or = outp->ior->id;
args->v0.link = outp->ior->asy.link;
@@ -119,7 +119,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
if (args->v0.data & 0xfff00000)
return -EINVAL;
- ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV);
+ ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV, false);
if (ret)
return ret;
ret = outp->ior->func->sense(outp->ior, args->v0.data);
@@ -155,7 +155,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
if (outp->info.type == DCB_OUTPUT_DP)
ior->func->dp.audio(ior, hidx, true);
ior->func->hda.hpd(ior, hidx, true);
- ior->func->hda.eld(ior, data, size);
+ ior->func->hda.eld(ior, hidx, data, size);
} else {
if (outp->info.type == DCB_OUTPUT_DP)
ior->func->dp.audio(ior, hidx, false);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index a1f942793f98..7070f5408d92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -24,6 +24,9 @@ int nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,
const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
+int gv100_disp_caps_new(const struct nvkm_oclass *, void *, u32,
+ struct nv50_disp *, struct nvkm_object **);
+
extern const struct nvkm_disp_oclass nv50_disp_root_oclass;
extern const struct nvkm_disp_oclass g84_disp_root_oclass;
extern const struct nvkm_disp_oclass g94_disp_root_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
index 579a5d02308a..d8719d38b98a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_root_func
tu102_disp_root = {
.user = {
+ {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
{{0,0,TU102_DISP_CURSOR }, gv100_disp_curs_new },
{{0,0,TU102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
{{0,0,TU102_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 456a5a143522..3b3643fb1019 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -177,6 +177,7 @@ gf119_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
index b94090edaebf..0c0925680790 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
@@ -43,6 +43,7 @@ gk104_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
index e6965dec09c9..38045c92197f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -57,6 +57,7 @@ gm107_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index 384f82652bec..4dd7f382968e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -89,7 +89,7 @@ gm200_sor_route_get(struct nvkm_outp *outp, int *link)
}
static const struct nvkm_ior_func
-gm200_sor = {
+gm200_sor_hda = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
@@ -115,11 +115,46 @@ gm200_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+gm200_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gf119_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gk104_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = gf119_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = gf119_sor_dp_vcpi,
+ .audio = gf119_sor_dp_audio,
+ .audio_sym = gf119_sor_dp_audio_sym,
+ .watermark = gf119_sor_dp_watermark,
},
};
int
gm200_sor_new(struct nvkm_disp *disp, int id)
{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda;
+
+ if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
+ hda = nvkm_rd32(device, 0x101034);
+
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&gm200_sor_hda, disp, SOR, id);
return nvkm_ior_new_(&gm200_sor, disp, SOR, id);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgp100.c
new file mode 100644
index 000000000000..c54f88317a07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgp100.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+static const struct nvkm_ior_func
+gp100_sor_hda = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gf119_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gk104_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = gf119_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = gf119_sor_dp_vcpi,
+ .audio = gf119_sor_dp_audio,
+ .audio_sym = gf119_sor_dp_audio_sym,
+ .watermark = gf119_sor_dp_watermark,
+ },
+ .hda = {
+ .hpd = gf119_hda_hpd,
+ .eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+gp100_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gf119_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gk104_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = gf119_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = gf119_sor_dp_vcpi,
+ .audio = gf119_sor_dp_audio,
+ .audio_sym = gf119_sor_dp_audio_sym,
+ .watermark = gf119_sor_dp_watermark,
+ },
+};
+
+int
+gp100_sor_new(struct nvkm_disp *disp, int id)
+{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda;
+
+ if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
+ hda = nvkm_rd32(device, 0x10ebb0) >> 8;
+
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&gp100_sor_hda, disp, SOR, id);
+ return nvkm_ior_new_(&gp100_sor, disp, SOR, id);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
index b0597ff9a714..4441187e8ec9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
@@ -78,7 +78,7 @@ gv100_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
}
static const struct nvkm_ior_func
-gv100_sor = {
+gv100_sor_hda = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
@@ -103,12 +103,46 @@ gv100_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gv100_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+gv100_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = gf119_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
},
};
int
gv100_sor_new(struct nvkm_disp *disp, int id)
{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda;
+
+ if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
+ hda = nvkm_rd32(device, 0x118fb0) >> 8;
+
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&gv100_sor_hda, disp, SOR, id);
return nvkm_ior_new_(&gv100_sor, disp, SOR, id);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
index 4d5f3791ea7b..59865a934c4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
@@ -62,7 +62,7 @@ tu102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
}
static const struct nvkm_ior_func
-tu102_sor = {
+tu102_sor_hda = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
@@ -88,11 +88,42 @@ tu102_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gv100_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+tu102_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = tu102_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu102_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
},
};
int
tu102_sor_new(struct nvkm_disp *disp, int id)
{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda = nvkm_rd32(device, 0x08a15c);
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&tu102_sor_hda, disp, SOR, id);
return nvkm_ior_new_(&tu102_sor, disp, SOR, id);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 4209b24a46d7..e56880f3e3bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -319,6 +319,17 @@ gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver)
return 0;
}
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
+MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
+#endif
+
static int
gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
{
@@ -341,7 +352,7 @@ gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
static const struct gf100_gr_fwif
gk20a_gr_fwif[] = {
- { -1, gk20a_gr_load, &gk20a_gr },
+ { 0, gk20a_gr_load, &gk20a_gr },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
index 8eb2a930a9b5..e4866a02e457 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
@@ -250,6 +250,11 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
list_add_tail(&lsf->head, &acr->lsf);
}
+ /* Ensure the falcon that'll provide ACR functions is booted first. */
+ lsf = nvkm_acr_falcon(device);
+ if (lsf)
+ list_move(&lsf->head, &acr->lsf);
+
if (!acr->wpr_fw || acr->wpr_comp)
wpr_size = acr->func->wpr_layout(acr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
index aecce2dac558..667fa016496e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
@@ -100,25 +100,21 @@ nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver,
hsfw->data_size = lhdr->data_size;
hsfw->sig.prod.size = fwhdr->sig_prod_size;
- hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL);
+ hsfw->sig.prod.data = kmemdup(fw->data + fwhdr->sig_prod_offset + sig,
+ hsfw->sig.prod.size, GFP_KERNEL);
if (!hsfw->sig.prod.data) {
ret = -ENOMEM;
goto done;
}
- memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig,
- hsfw->sig.prod.size);
-
hsfw->sig.dbg.size = fwhdr->sig_dbg_size;
- hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL);
+ hsfw->sig.dbg.data = kmemdup(fw->data + fwhdr->sig_dbg_offset + sig,
+ hsfw->sig.dbg.size, GFP_KERNEL);
if (!hsfw->sig.dbg.data) {
ret = -ENOMEM;
goto done;
}
- memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig,
- hsfw->sig.dbg.size);
-
hsfw->sig.patch_loc = loc;
done:
nvkm_firmware_put(fw);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index 06572f8ce914..f9c427559538 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -22,22 +22,39 @@
*/
#include "priv.h"
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct device *);
-#else
-static inline bool
-nouveau_acpi_rom_supported(struct device *dev)
+static int
+acpi_read_bios(acpi_handle rom_handle, u8 *bios, u32 offset, u32 length)
{
- return false;
-}
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ acpi_status status;
+ union acpi_object rom_arg_elements[2], *obj;
+ struct acpi_object_list rom_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
-static inline int
-nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
-{
+ rom_arg.count = 2;
+ rom_arg.pointer = &rom_arg_elements[0];
+
+ rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ rom_arg_elements[0].integer.value = offset;
+
+ rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ rom_arg_elements[1].integer.value = length;
+
+ status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ pr_info("failed to evaluate ROM got %s\n",
+ acpi_format_exception(status));
+ return -ENODEV;
+ }
+ obj = (union acpi_object *)buffer.pointer;
+ length = min(length, obj->buffer.length);
+ memcpy(bios+offset, obj->buffer.pointer, length);
+ kfree(buffer.pointer);
+ return length;
+#else
return -EINVAL;
-}
#endif
+}
/* This version of the shadow function disobeys the ACPI spec and tries
* to fetch in units of more than 4KiB at a time. This is a LOT faster
@@ -51,7 +68,7 @@ acpi_read_fast(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
u32 fetch = limit - start;
if (nvbios_extend(bios, limit) >= 0) {
- int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch);
+ int ret = acpi_read_bios(data, bios->data, start, fetch);
if (ret == fetch)
return fetch;
}
@@ -73,9 +90,8 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
if (nvbios_extend(bios, limit) >= 0) {
while (start + fetch < limit) {
- int ret = nouveau_acpi_get_bios_chunk(bios->data,
- start + fetch,
- 0x1000);
+ int ret = acpi_read_bios(data, bios->data,
+ start + fetch, 0x1000);
if (ret != 0x1000)
break;
fetch += 0x1000;
@@ -88,9 +104,22 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
static void *
acpi_init(struct nvkm_bios *bios, const char *name)
{
- if (!nouveau_acpi_rom_supported(bios->subdev.device->dev))
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ acpi_status status;
+ acpi_handle dhandle, rom_handle;
+
+ dhandle = ACPI_HANDLE(bios->subdev.device->dev);
+ if (!dhandle)
return ERR_PTR(-ENODEV);
- return NULL;
+
+ status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
+ if (ACPI_FAILURE(status))
+ return ERR_PTR(-ENODEV);
+
+ return rom_handle;
+#else
+ return ERR_PTR(-ENODEV);
+#endif
}
const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index d80dbc8f09b2..2340040942c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -114,9 +114,5 @@ int
gf100_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gf100_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gf100_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
index 3905a80da811..1124dadac145 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
@@ -43,9 +43,5 @@ int
gf117_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gf117_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gf117_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index 9025ed1bd2a9..f3915f85838e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -117,9 +117,5 @@ int
gk104_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gk104_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gk104_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index 1a4ab825852c..187d544378b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -81,9 +81,5 @@ int
gk20a_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gk20a_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gk20a_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
index c63328152bfa..0f1f0ad6377e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
@@ -32,9 +32,5 @@ int
gm200_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gm200_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gm200_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
index 39db90aa2c80..0347b367cefe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
@@ -51,9 +51,5 @@ int
gp10b_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gp10b_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gp10b_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 41640e0584ac..199f94e15c5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -580,7 +580,7 @@ nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
it.pte[it.lvl]++;
}
}
- };
+ }
nvkm_vmm_flush(&it);
return ~0ULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 5e55ecbd8005..d3f8f916d0db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -304,7 +304,7 @@ int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
FILL(VMM, PT, PTEI, _ptes, MAP, _addr); \
PTEI += _ptes; \
PTEN -= _ptes; \
- }; \
+ } \
nvkm_done((PT)->memory); \
} while(0)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
index 03b355dabab3..abf3eda683f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
@@ -36,8 +36,8 @@ probe_monitoring_device(struct nvkm_i2c_bus *bus,
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
- client = i2c_new_device(&bus->i2c, info);
- if (!client)
+ client = i2c_new_client_device(&bus->i2c, info);
+ if (IS_ERR(client))
return false;
if (!client->dev.driver ||
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index dbb90f2d2ccd..6639ee9b05d3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -3137,33 +3137,12 @@ static void _dispc_mgr_set_lcd_timings(struct dispc_device *dispc,
dispc_write_reg(dispc, DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(dispc, DISPC_TIMING_V(channel), timing_v);
- if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
- vs = false;
- else
- vs = true;
-
- if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
- hs = false;
- else
- hs = true;
-
- if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
- de = false;
- else
- de = true;
-
- if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
- ipc = false;
- else
- ipc = true;
-
- /* always use the 'rf' setting */
- onoff = true;
-
- if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
- rf = true;
- else
- rf = false;
+ vs = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ hs = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ de = !!(vm->flags & DISPLAY_FLAGS_DE_LOW);
+ ipc = !!(vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE);
+ onoff = true; /* always use the 'rf' setting */
+ rf = !!(vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE);
l = FLD_VAL(onoff, 17, 17) |
FLD_VAL(rf, 16, 16) |
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 766553bb2f87..9701843ccf09 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -208,49 +208,6 @@ static const struct venc_config venc_config_ntsc_trm = {
.gen_ctrl = 0x00F90000,
};
-static const struct venc_config venc_config_pal_bdghi = {
- .f_control = 0,
- .vidout_ctrl = 0,
- .sync_ctrl = 0,
- .hfltr_ctrl = 0,
- .x_color = 0,
- .line21 = 0,
- .ln_sel = 21,
- .htrigger_vtrigger = 0,
- .tvdetgp_int_start_stop_x = 0x00140001,
- .tvdetgp_int_start_stop_y = 0x00010001,
- .gen_ctrl = 0x00FB0000,
-
- .llen = 864-1,
- .flens = 625-1,
- .cc_carr_wss_carr = 0x2F7625ED,
- .c_phase = 0xDF,
- .gain_u = 0x111,
- .gain_v = 0x181,
- .gain_y = 0x140,
- .black_level = 0x3e,
- .blank_level = 0x3e,
- .m_control = 0<<2 | 1<<1,
- .bstamp_wss_data = 0x42,
- .s_carr = 0x2a098acb,
- .l21__wc_ctl = 0<<13 | 0x16<<8 | 0<<0,
- .savid__eavid = 0x06A70108,
- .flen__fal = 23<<16 | 624<<0,
- .lal__phase_reset = 2<<17 | 310<<0,
- .hs_int_start_stop_x = 0x00920358,
- .hs_ext_start_stop_x = 0x000F035F,
- .vs_int_start_x = 0x1a7<<16,
- .vs_int_stop_x__vs_int_start_y = 0x000601A7,
- .vs_int_stop_y__vs_ext_start_x = 0x01AF0036,
- .vs_ext_stop_x__vs_ext_start_y = 0x27101af,
- .vs_ext_stop_y = 0x05,
- .avid_start_stop_x = 0x03530082,
- .avid_start_stop_y = 0x0270002E,
- .fid_int_start_x__fid_int_start_y = 0x0005008A,
- .fid_int_offset_y__fid_ext_start_x = 0x002E0138,
- .fid_ext_start_y__fid_ext_offset_y = 0x01380005,
-};
-
enum venc_videomode {
VENC_MODE_UNKNOWN,
VENC_MODE_PAL,
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 34dfb33145b4..b57fbe8a0ac2 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -80,31 +80,16 @@ static struct drm_info_list omap_dmm_debugfs_list[] = {
{"tiler_map", tiler_map_show, 0},
};
-int omap_debugfs_init(struct drm_minor *minor)
+void omap_debugfs_init(struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
- int ret;
-
- ret = drm_debugfs_create_files(omap_debugfs_list,
- ARRAY_SIZE(omap_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install omap_debugfs_list\n");
- return ret;
- }
+ drm_debugfs_create_files(omap_debugfs_list,
+ ARRAY_SIZE(omap_debugfs_list),
+ minor->debugfs_root, minor);
if (dmm_is_available())
- ret = drm_debugfs_create_files(omap_dmm_debugfs_list,
- ARRAY_SIZE(omap_dmm_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n");
- return ret;
- }
-
- return ret;
+ drm_debugfs_create_files(omap_dmm_debugfs_list,
+ ARRAY_SIZE(omap_dmm_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 7c4b66efcaa7..8a1fac680138 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -82,6 +82,6 @@ struct omap_drm_private {
};
-int omap_debugfs_init(struct drm_minor *minor);
+void omap_debugfs_init(struct drm_minor *minor);
#endif /* __OMAPDRM_DRV_H__ */
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index a1723c1b5fbf..39055c1f0e2f 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,6 +18,16 @@ config DRM_PANEL_ARM_VERSATILE
reference designs. The panel is detected using special registers
in the Versatile family syscon registers.
+config DRM_PANEL_ASUS_Z00T_TM5P5_NT35596
+ tristate "ASUS Z00T TM5P5 NT35596 panel"
+ depends on GPIOLIB && OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the ASUS TMP5P5
+ NT35596 1080x1920 video mode panel as found in some Asus
+ Zenfone 2 Laser Z00T devices.
+
config DRM_PANEL_BOE_HIMAX8279D
tristate "Boe Himax8279d panel"
depends on OF
@@ -137,6 +147,17 @@ config DRM_PANEL_KINGDISPLAY_KD097D04
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_LEADTEK_LTK050H3146W
+ tristate "Leadtek LTK050H3146W panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Leadtek LTK050H3146W
+ TFT-LCD modules. The panel has a 720x1280 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host and has a built-in LED backlight.
+
config DRM_PANEL_LEADTEK_LTK500HD1829
tristate "Leadtek LTK500HD1829 panel"
depends on OF
@@ -433,6 +454,14 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
Video Mode panel
+config DRM_PANEL_VISIONOX_RM69299
+ tristate "Visionox RM69299"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable support for Visionox
+ RM69299 DSI Video Mode panel.
+
config DRM_PANEL_XINPENG_XPP055C272
tristate "Xinpeng XPP055C272 panel driver"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 96a883cd6630..de74f282c433 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
+obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
+obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
@@ -46,4 +48,5 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
new file mode 100644
index 000000000000..39e0f0373f3c
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct tm5p5_nt35596 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+ bool prepared;
+};
+
+static inline struct tm5p5_nt35596 *to_tm5p5_nt35596(struct drm_panel *panel)
+{
+ return container_of(panel, struct tm5p5_nt35596, panel);
+}
+
+#define dsi_generic_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+#define dsi_dcs_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static void tm5p5_nt35596_reset(struct tm5p5_nt35596 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(15000, 16000);
+}
+
+static int tm5p5_nt35596_on(struct tm5p5_nt35596 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+
+ dsi_generic_write_seq(dsi, 0xff, 0x05);
+ dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ dsi_generic_write_seq(dsi, 0xc5, 0x31);
+ dsi_generic_write_seq(dsi, 0xff, 0x04);
+ dsi_generic_write_seq(dsi, 0x01, 0x84);
+ dsi_generic_write_seq(dsi, 0x05, 0x25);
+ dsi_generic_write_seq(dsi, 0x06, 0x01);
+ dsi_generic_write_seq(dsi, 0x07, 0x20);
+ dsi_generic_write_seq(dsi, 0x08, 0x06);
+ dsi_generic_write_seq(dsi, 0x09, 0x08);
+ dsi_generic_write_seq(dsi, 0x0a, 0x10);
+ dsi_generic_write_seq(dsi, 0x0b, 0x10);
+ dsi_generic_write_seq(dsi, 0x0c, 0x10);
+ dsi_generic_write_seq(dsi, 0x0d, 0x14);
+ dsi_generic_write_seq(dsi, 0x0e, 0x14);
+ dsi_generic_write_seq(dsi, 0x0f, 0x14);
+ dsi_generic_write_seq(dsi, 0x10, 0x14);
+ dsi_generic_write_seq(dsi, 0x11, 0x14);
+ dsi_generic_write_seq(dsi, 0x12, 0x14);
+ dsi_generic_write_seq(dsi, 0x17, 0xf3);
+ dsi_generic_write_seq(dsi, 0x18, 0xc0);
+ dsi_generic_write_seq(dsi, 0x19, 0xc0);
+ dsi_generic_write_seq(dsi, 0x1a, 0xc0);
+ dsi_generic_write_seq(dsi, 0x1b, 0xb3);
+ dsi_generic_write_seq(dsi, 0x1c, 0xb3);
+ dsi_generic_write_seq(dsi, 0x1d, 0xb3);
+ dsi_generic_write_seq(dsi, 0x1e, 0xb3);
+ dsi_generic_write_seq(dsi, 0x1f, 0xb3);
+ dsi_generic_write_seq(dsi, 0x20, 0xb3);
+ dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ dsi_generic_write_seq(dsi, 0xff, 0x00);
+ dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ dsi_generic_write_seq(dsi, 0x35, 0x01);
+ dsi_generic_write_seq(dsi, 0xd3, 0x06);
+ dsi_generic_write_seq(dsi, 0xd4, 0x04);
+ dsi_generic_write_seq(dsi, 0x5e, 0x0d);
+ dsi_generic_write_seq(dsi, 0x11, 0x00);
+ msleep(100);
+ dsi_generic_write_seq(dsi, 0x29, 0x00);
+ dsi_generic_write_seq(dsi, 0x53, 0x24);
+
+ return 0;
+}
+
+static int tm5p5_nt35596_off(struct tm5p5_nt35596 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(60);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+
+ dsi_dcs_write_seq(dsi, 0x4f, 0x01);
+
+ return 0;
+}
+
+static int tm5p5_nt35596_prepare(struct drm_panel *panel)
+{
+ struct tm5p5_nt35596 *ctx = to_tm5p5_nt35596(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ tm5p5_nt35596_reset(ctx);
+
+ ret = tm5p5_nt35596_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ return ret;
+ }
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int tm5p5_nt35596_unprepare(struct drm_panel *panel)
+{
+ struct tm5p5_nt35596 *ctx = to_tm5p5_nt35596(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = tm5p5_nt35596_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode tm5p5_nt35596_mode = {
+ .clock = (1080 + 100 + 8 + 16) * (1920 + 4 + 2 + 4) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 100,
+ .hsync_end = 1080 + 100 + 8,
+ .htotal = 1080 + 100 + 8 + 16,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 4,
+ .vsync_end = 1920 + 4 + 2,
+ .vtotal = 1920 + 4 + 2 + 4,
+ .vrefresh = 60,
+ .width_mm = 68,
+ .height_mm = 121,
+};
+
+static int tm5p5_nt35596_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &tm5p5_nt35596_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs tm5p5_nt35596_panel_funcs = {
+ .prepare = tm5p5_nt35596_prepare,
+ .unprepare = tm5p5_nt35596_unprepare,
+ .get_modes = tm5p5_nt35596_get_modes,
+};
+
+static int tm5p5_nt35596_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = bl->props.brightness;
+ int ret;
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+ brightness = 0;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int tm5p5_nt35596_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = bl->props.brightness;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return brightness & 0xff;
+}
+
+static const struct backlight_ops tm5p5_nt35596_bl_ops = {
+ .update_status = tm5p5_nt35596_bl_update_status,
+ .get_brightness = tm5p5_nt35596_bl_get_brightness,
+};
+
+static struct backlight_device *
+tm5p5_nt35596_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 255,
+ .max_brightness = 255,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &tm5p5_nt35596_bl_ops, &props);
+}
+
+static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct tm5p5_nt35596 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vdd";
+ ctx->supplies[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ ret = PTR_ERR(ctx->reset_gpio);
+ dev_err(dev, "Failed to get reset-gpios: %d\n", ret);
+ return ret;
+ }
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_EOT_PACKET |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ctx->panel.backlight = tm5p5_nt35596_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight)) {
+ ret = PTR_ERR(ctx->panel.backlight);
+ dev_err(dev, "Failed to create backlight: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add panel: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tm5p5_nt35596_remove(struct mipi_dsi_device *dsi)
+{
+ struct tm5p5_nt35596 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev,
+ "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id tm5p5_nt35596_of_match[] = {
+ { .compatible = "asus,z00t-tm5p5-n35596" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tm5p5_nt35596_of_match);
+
+static struct mipi_dsi_driver tm5p5_nt35596_driver = {
+ .probe = tm5p5_nt35596_probe,
+ .remove = tm5p5_nt35596_remove,
+ .driver = {
+ .name = "panel-tm5p5-nt35596",
+ .of_match_table = tm5p5_nt35596_of_match,
+ },
+};
+module_mipi_dsi_driver(tm5p5_nt35596_driver);
+
+MODULE_AUTHOR("Konrad Dybcio <konradybcio@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for tm5p5 nt35596 1080p video mode dsi panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 48a164257d18..46fe1805c588 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -696,6 +696,34 @@ static const struct panel_desc auo_b101uan08_3_desc = {
.init_cmds = auo_b101uan08_3_init_cmd,
};
+static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
+ .clock = 159916,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 24,
+ .htotal = 1200 + 80 + 24 + 60,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 20,
+ .vsync_end = 1920 + 20 + 4,
+ .vtotal = 1920 + 20 + 4 + 10,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv105wum_nw0_desc = {
+ .modes = &boe_tv105wum_nw0_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 141,
+ .height_mm = 226,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = boe_init_cmd,
+};
+
static int boe_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -834,6 +862,9 @@ static const struct of_device_id boe_of_match[] = {
{ .compatible = "auo,b101uan08.3",
.data = &auo_b101uan08_3_desc
},
+ { .compatible = "boe,tv105wum-nw0",
+ .data = &boe_tv105wum_nw0_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, boe_of_match);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 09935520e606..873b1c7059bd 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -379,7 +379,7 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
"can't set up VCOM amplitude (%d)\n", ret);
return ret;
}
- };
+ }
if (ili->vcom_high != U8_MAX) {
ret = regmap_write(ili->regmap, ILI9322_VCOM_HIGH,
@@ -388,7 +388,7 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
dev_err(ili->dev, "can't set up VCOM high (%d)\n", ret);
return ret;
}
- };
+ }
/* Set up gamma correction */
for (i = 0; i < ARRAY_SIZE(ili->gamma); i++) {
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
new file mode 100644
index 000000000000..5a7a31c8513e
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct ltk050h3146w_cmd {
+ char cmd;
+ char data;
+};
+
+struct ltk050h3146w;
+struct ltk050h3146w_desc {
+ const struct drm_display_mode *mode;
+ int (*init)(struct ltk050h3146w *ctx);
+};
+
+struct ltk050h3146w {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vci;
+ struct regulator *iovcc;
+ const struct ltk050h3146w_desc *panel_desc;
+ bool prepared;
+};
+
+static const struct ltk050h3146w_cmd page1_cmds[] = {
+ { 0x22, 0x0A }, /* BGR SS GS */
+ { 0x31, 0x00 }, /* column inversion */
+ { 0x53, 0xA2 }, /* VCOM1 */
+ { 0x55, 0xA2 }, /* VCOM2 */
+ { 0x50, 0x81 }, /* VREG1OUT=5V */
+ { 0x51, 0x85 }, /* VREG2OUT=-5V */
+ { 0x62, 0x0D }, /* EQT Time setting */
+/*
+ * The vendor init selected page 1 here _again_
+ * Is this supposed to be page 2?
+ */
+ { 0xA0, 0x00 },
+ { 0xA1, 0x1A },
+ { 0xA2, 0x28 },
+ { 0xA3, 0x13 },
+ { 0xA4, 0x16 },
+ { 0xA5, 0x29 },
+ { 0xA6, 0x1D },
+ { 0xA7, 0x1E },
+ { 0xA8, 0x84 },
+ { 0xA9, 0x1C },
+ { 0xAA, 0x28 },
+ { 0xAB, 0x75 },
+ { 0xAC, 0x1A },
+ { 0xAD, 0x19 },
+ { 0xAE, 0x4D },
+ { 0xAF, 0x22 },
+ { 0xB0, 0x28 },
+ { 0xB1, 0x54 },
+ { 0xB2, 0x66 },
+ { 0xB3, 0x39 },
+ { 0xC0, 0x00 },
+ { 0xC1, 0x1A },
+ { 0xC2, 0x28 },
+ { 0xC3, 0x13 },
+ { 0xC4, 0x16 },
+ { 0xC5, 0x29 },
+ { 0xC6, 0x1D },
+ { 0xC7, 0x1E },
+ { 0xC8, 0x84 },
+ { 0xC9, 0x1C },
+ { 0xCA, 0x28 },
+ { 0xCB, 0x75 },
+ { 0xCC, 0x1A },
+ { 0xCD, 0x19 },
+ { 0xCE, 0x4D },
+ { 0xCF, 0x22 },
+ { 0xD0, 0x28 },
+ { 0xD1, 0x54 },
+ { 0xD2, 0x66 },
+ { 0xD3, 0x39 },
+};
+
+static const struct ltk050h3146w_cmd page3_cmds[] = {
+ { 0x01, 0x00 },
+ { 0x02, 0x00 },
+ { 0x03, 0x73 },
+ { 0x04, 0x00 },
+ { 0x05, 0x00 },
+ { 0x06, 0x0a },
+ { 0x07, 0x00 },
+ { 0x08, 0x00 },
+ { 0x09, 0x01 },
+ { 0x0a, 0x00 },
+ { 0x0b, 0x00 },
+ { 0x0c, 0x01 },
+ { 0x0d, 0x00 },
+ { 0x0e, 0x00 },
+ { 0x0f, 0x1d },
+ { 0x10, 0x1d },
+ { 0x11, 0x00 },
+ { 0x12, 0x00 },
+ { 0x13, 0x00 },
+ { 0x14, 0x00 },
+ { 0x15, 0x00 },
+ { 0x16, 0x00 },
+ { 0x17, 0x00 },
+ { 0x18, 0x00 },
+ { 0x19, 0x00 },
+ { 0x1a, 0x00 },
+ { 0x1b, 0x00 },
+ { 0x1c, 0x00 },
+ { 0x1d, 0x00 },
+ { 0x1e, 0x40 },
+ { 0x1f, 0x80 },
+ { 0x20, 0x06 },
+ { 0x21, 0x02 },
+ { 0x22, 0x00 },
+ { 0x23, 0x00 },
+ { 0x24, 0x00 },
+ { 0x25, 0x00 },
+ { 0x26, 0x00 },
+ { 0x27, 0x00 },
+ { 0x28, 0x33 },
+ { 0x29, 0x03 },
+ { 0x2a, 0x00 },
+ { 0x2b, 0x00 },
+ { 0x2c, 0x00 },
+ { 0x2d, 0x00 },
+ { 0x2e, 0x00 },
+ { 0x2f, 0x00 },
+ { 0x30, 0x00 },
+ { 0x31, 0x00 },
+ { 0x32, 0x00 },
+ { 0x33, 0x00 },
+ { 0x34, 0x04 },
+ { 0x35, 0x00 },
+ { 0x36, 0x00 },
+ { 0x37, 0x00 },
+ { 0x38, 0x3C },
+ { 0x39, 0x35 },
+ { 0x3A, 0x01 },
+ { 0x3B, 0x40 },
+ { 0x3C, 0x00 },
+ { 0x3D, 0x01 },
+ { 0x3E, 0x00 },
+ { 0x3F, 0x00 },
+ { 0x40, 0x00 },
+ { 0x41, 0x88 },
+ { 0x42, 0x00 },
+ { 0x43, 0x00 },
+ { 0x44, 0x1F },
+ { 0x50, 0x01 },
+ { 0x51, 0x23 },
+ { 0x52, 0x45 },
+ { 0x53, 0x67 },
+ { 0x54, 0x89 },
+ { 0x55, 0xab },
+ { 0x56, 0x01 },
+ { 0x57, 0x23 },
+ { 0x58, 0x45 },
+ { 0x59, 0x67 },
+ { 0x5a, 0x89 },
+ { 0x5b, 0xab },
+ { 0x5c, 0xcd },
+ { 0x5d, 0xef },
+ { 0x5e, 0x11 },
+ { 0x5f, 0x01 },
+ { 0x60, 0x00 },
+ { 0x61, 0x15 },
+ { 0x62, 0x14 },
+ { 0x63, 0x0E },
+ { 0x64, 0x0F },
+ { 0x65, 0x0C },
+ { 0x66, 0x0D },
+ { 0x67, 0x06 },
+ { 0x68, 0x02 },
+ { 0x69, 0x07 },
+ { 0x6a, 0x02 },
+ { 0x6b, 0x02 },
+ { 0x6c, 0x02 },
+ { 0x6d, 0x02 },
+ { 0x6e, 0x02 },
+ { 0x6f, 0x02 },
+ { 0x70, 0x02 },
+ { 0x71, 0x02 },
+ { 0x72, 0x02 },
+ { 0x73, 0x02 },
+ { 0x74, 0x02 },
+ { 0x75, 0x01 },
+ { 0x76, 0x00 },
+ { 0x77, 0x14 },
+ { 0x78, 0x15 },
+ { 0x79, 0x0E },
+ { 0x7a, 0x0F },
+ { 0x7b, 0x0C },
+ { 0x7c, 0x0D },
+ { 0x7d, 0x06 },
+ { 0x7e, 0x02 },
+ { 0x7f, 0x07 },
+ { 0x80, 0x02 },
+ { 0x81, 0x02 },
+ { 0x82, 0x02 },
+ { 0x83, 0x02 },
+ { 0x84, 0x02 },
+ { 0x85, 0x02 },
+ { 0x86, 0x02 },
+ { 0x87, 0x02 },
+ { 0x88, 0x02 },
+ { 0x89, 0x02 },
+ { 0x8A, 0x02 },
+};
+
+static const struct ltk050h3146w_cmd page4_cmds[] = {
+ { 0x70, 0x00 },
+ { 0x71, 0x00 },
+ { 0x82, 0x0F }, /* VGH_MOD clamp level=15v */
+ { 0x84, 0x0F }, /* VGH clamp level 15V */
+ { 0x85, 0x0D }, /* VGL clamp level (-10V) */
+ { 0x32, 0xAC },
+ { 0x8C, 0x80 },
+ { 0x3C, 0xF5 },
+ { 0xB5, 0x07 }, /* GAMMA OP */
+ { 0x31, 0x45 }, /* SOURCE OP */
+ { 0x3A, 0x24 }, /* PS_EN OFF */
+ { 0x88, 0x33 }, /* LVD */
+};
+
+static inline
+struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel)
+{
+ return container_of(panel, struct ltk050h3146w, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ /*
+ * Init sequence was supplied by the panel vendor without much
+ * documentation.
+ */
+ dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8);
+ dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
+ 0x01);
+ dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5);
+ dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5);
+ dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
+
+ dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07);
+ dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
+ 0x28, 0x04, 0xcc, 0xcc, 0xcc);
+ dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04);
+ dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2);
+ dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03);
+ dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12);
+ dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
+ 0x80);
+ dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
+ 0x16, 0x00, 0x00);
+ dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
+ 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
+ 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
+ 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
+ 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
+ dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
+ 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
+ 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
+ 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
+ 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
+ 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
+ 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
+ dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
+ 0x21, 0x00, 0x60);
+ dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00);
+ dsi_dcs_write_seq(dsi, 0xde, 0x02);
+ dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c);
+ dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04);
+ dsi_dcs_write_seq(dsi, 0xc1, 0x11);
+ dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
+ dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84);
+ dsi_dcs_write_seq(dsi, 0xde, 0x00);
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n",
+ ret);
+ return ret;
+ }
+
+ msleep(60);
+
+ return 0;
+}
+
+static const struct drm_display_mode ltk050h3146w_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 42,
+ .hsync_end = 720 + 42 + 8,
+ .htotal = 720 + 42 + 8 + 42,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 12,
+ .vsync_end = 1280 + 12 + 4,
+ .vtotal = 1280 + 12 + 4 + 18,
+ .clock = 64018,
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
+static const struct ltk050h3146w_desc ltk050h3146w_data = {
+ .mode = &ltk050h3146w_mode,
+ .init = ltk050h3146w_init_sequence,
+};
+
+static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ u8 d[3] = { 0x98, 0x81, page };
+
+ return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d));
+}
+
+static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page,
+ const struct ltk050h3146w_cmd *cmds,
+ int num)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int i, ret;
+
+ ret = ltk050h3146w_a2_select_page(ctx, page);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to select page %d: %d\n",
+ page, ret);
+ return ret;
+ }
+
+ for (i = 0; i < num; i++) {
+ ret = mipi_dsi_generic_write(dsi, &cmds[i],
+ sizeof(struct ltk050h3146w_cmd));
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to write page %d init cmds: %d\n",
+ page, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ /*
+ * Init sequence was supplied by the panel vendor without much
+ * documentation.
+ */
+ ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds,
+ ARRAY_SIZE(page3_cmds));
+ if (ret < 0)
+ return ret;
+
+ ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds,
+ ARRAY_SIZE(page4_cmds));
+ if (ret < 0)
+ return ret;
+
+ ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds,
+ ARRAY_SIZE(page1_cmds));
+ if (ret < 0)
+ return ret;
+
+ ret = ltk050h3146w_a2_select_page(ctx, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to select page 0: %d\n", ret);
+ return ret;
+ }
+
+ /* vendor code called this without param, where there should be one */
+ ret = mipi_dsi_dcs_set_tear_on(dsi, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n",
+ ret);
+ return ret;
+ }
+
+ msleep(60);
+
+ return 0;
+}
+
+static const struct drm_display_mode ltk050h3146w_a2_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 42,
+ .hsync_end = 720 + 42 + 10,
+ .htotal = 720 + 42 + 10 + 60,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 18,
+ .vsync_end = 1280 + 18 + 4,
+ .vtotal = 1280 + 18 + 4 + 12,
+ .clock = 65595,
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
+static const struct ltk050h3146w_desc ltk050h3146w_a2_data = {
+ .mode = &ltk050h3146w_a2_mode,
+ .init = ltk050h3146w_a2_init_sequence,
+};
+
+static int ltk050h3146w_unprepare(struct drm_panel *panel)
+{
+ struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+ ret);
+ return ret;
+ }
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vci);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int ltk050h3146w_prepare(struct drm_panel *panel)
+{
+ struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ ret = regulator_enable(ctx->vci);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vci supply: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vci;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(20);
+
+ ret = ctx->panel_desc->init(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ /* T9: 120ms */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(50);
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_iovcc:
+ regulator_disable(ctx->iovcc);
+disable_vci:
+ regulator_disable(ctx->vci);
+ return ret;
+}
+
+static int ltk050h3146w_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, ctx->panel_desc->mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ltk050h3146w_funcs = {
+ .unprepare = ltk050h3146w_unprepare,
+ .prepare = ltk050h3146w_prepare,
+ .get_modes = ltk050h3146w_get_modes,
+};
+
+static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct ltk050h3146w *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->panel_desc = of_device_get_match_data(dev);
+ if (!ctx->panel_desc)
+ return -EINVAL;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->vci = devm_regulator_get(dev, "vci");
+ if (IS_ERR(ctx->vci)) {
+ ret = PTR_ERR(ctx->vci);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vci regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &ltk050h3146w_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ltk050h3146w_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int ltk050h3146w_remove(struct mipi_dsi_device *dsi)
+{
+ struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ltk050h3146w_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id ltk050h3146w_of_match[] = {
+ {
+ .compatible = "leadtek,ltk050h3146w",
+ .data = &ltk050h3146w_data,
+ },
+ {
+ .compatible = "leadtek,ltk050h3146w-a2",
+ .data = &ltk050h3146w_a2_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ltk050h3146w_of_match);
+
+static struct mipi_dsi_driver ltk050h3146w_driver = {
+ .driver = {
+ .name = "panel-leadtek-ltk050h3146w",
+ .of_match_table = ltk050h3146w_of_match,
+ },
+ .probe = ltk050h3146w_probe,
+ .remove = ltk050h3146w_remove,
+ .shutdown = ltk050h3146w_shutdown,
+};
+module_mipi_dsi_driver(ltk050h3146w_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("DRM driver for Leadtek LTK050H3146W MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
index 76ecf2de9c44..113ab9c0396b 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -377,7 +377,7 @@ static const struct drm_display_mode default_mode = {
.vsync_end = 1280 + 30 + 4,
.vtotal = 1280 + 30 + 4 + 12,
.vrefresh = 60,
- .clock = 41600,
+ .clock = 69217,
.width_mm = 62,
.height_mm = 110,
};
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index a470810f7dbe..05cae8d62d56 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -49,7 +49,8 @@ enum nt39016_regs {
#define NT39016_SYSTEM_STANDBY BIT(1)
struct nt39016_panel_info {
- struct drm_display_mode display_mode;
+ const struct drm_display_mode *display_modes;
+ unsigned int num_modes;
u16 width_mm, height_mm;
u32 bus_format, bus_flags;
};
@@ -212,15 +213,22 @@ static int nt39016_get_modes(struct drm_panel *drm_panel,
struct nt39016 *panel = to_nt39016(drm_panel);
const struct nt39016_panel_info *panel_info = panel->panel_info;
struct drm_display_mode *mode;
+ unsigned int i;
- mode = drm_mode_duplicate(connector->dev, &panel_info->display_mode);
- if (!mode)
- return -ENOMEM;
+ for (i = 0; i < panel_info->num_modes; i++) {
+ mode = drm_mode_duplicate(connector->dev,
+ &panel_info->display_modes[i]);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
- drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ if (panel_info->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
+ drm_mode_probed_add(connector, mode);
+ }
connector->display_info.bpc = 8;
connector->display_info.width_mm = panel_info->width_mm;
@@ -230,7 +238,7 @@ static int nt39016_get_modes(struct drm_panel *drm_panel,
&panel_info->bus_format, 1);
connector->display_info.bus_flags = panel_info->bus_flags;
- return 1;
+ return panel_info->num_modes;
}
static const struct drm_panel_funcs nt39016_funcs = {
@@ -316,8 +324,8 @@ static int nt39016_remove(struct spi_device *spi)
return 0;
}
-static const struct nt39016_panel_info kd035g6_info = {
- .display_mode = {
+static const struct drm_display_mode kd035g6_display_modes[] = {
+ { /* 60 Hz */
.clock = 6000,
.hdisplay = 320,
.hsync_start = 320 + 10,
@@ -330,6 +338,24 @@ static const struct nt39016_panel_info kd035g6_info = {
.vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
+ { /* 50 Hz */
+ .clock = 5400,
+ .hdisplay = 320,
+ .hsync_start = 320 + 42,
+ .hsync_end = 320 + 42 + 50,
+ .htotal = 320 + 42 + 50 + 20,
+ .vdisplay = 240,
+ .vsync_start = 240 + 5,
+ .vsync_end = 240 + 5 + 1,
+ .vtotal = 240 + 5 + 1 + 4,
+ .vrefresh = 50,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct nt39016_panel_info kd035g6_info = {
+ .display_modes = kd035g6_display_modes,
+ .num_modes = ARRAY_SIZE(kd035g6_display_modes),
.width_mm = 71,
.height_mm = 53,
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 3ad828eaefe1..b6ecd1552132 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -108,6 +109,7 @@ struct panel_simple {
struct i2c_adapter *ddc;
struct gpio_desc *enable_gpio;
+ struct gpio_desc *hpd_gpio;
struct drm_display_mode override_mode;
};
@@ -259,11 +261,37 @@ static int panel_simple_unprepare(struct drm_panel *panel)
return 0;
}
+static int panel_simple_get_hpd_gpio(struct device *dev,
+ struct panel_simple *p, bool from_probe)
+{
+ int err;
+
+ p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(p->hpd_gpio)) {
+ err = PTR_ERR(p->hpd_gpio);
+
+ /*
+ * If we're called from probe we won't consider '-EPROBE_DEFER'
+ * to be an error--we'll leave the error code in "hpd_gpio".
+ * When we try to use it we'll try again. This allows for
+ * circular dependencies where the component providing the
+ * hpd gpio needs the panel to init before probing.
+ */
+ if (err != -EPROBE_DEFER || !from_probe) {
+ dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int panel_simple_prepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
unsigned int delay;
int err;
+ int hpd_asserted;
if (p->prepared)
return 0;
@@ -282,6 +310,26 @@ static int panel_simple_prepare(struct drm_panel *panel)
if (delay)
msleep(delay);
+ if (p->hpd_gpio) {
+ if (IS_ERR(p->hpd_gpio)) {
+ err = panel_simple_get_hpd_gpio(panel->dev, p, false);
+ if (err)
+ return err;
+ }
+
+ err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
+ hpd_asserted, hpd_asserted,
+ 1000, 2000000);
+ if (hpd_asserted < 0)
+ err = hpd_asserted;
+
+ if (err) {
+ dev_err(panel->dev,
+ "error waiting for hpd GPIO: %d\n", err);
+ return err;
+ }
+ }
+
p->prepared = true;
return 0;
@@ -462,6 +510,11 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->desc = desc;
panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
+ if (!panel->no_hpd) {
+ err = panel_simple_get_hpd_gpio(dev, panel, true);
+ if (err)
+ return err;
+ }
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
@@ -836,7 +889,8 @@ static const struct panel_desc auo_g101evn010 = {
.width = 216,
.height = 135,
},
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode auo_g104sn02_mode = {
@@ -862,6 +916,31 @@ static const struct panel_desc auo_g104sn02 = {
},
};
+static const struct drm_display_mode auo_g121ean01_mode = {
+ .clock = 66700,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 58,
+ .hsync_end = 1280 + 58 + 8,
+ .htotal = 1280 + 58 + 8 + 70,
+ .vdisplay = 800,
+ .vsync_start = 800 + 6,
+ .vsync_end = 800 + 6 + 4,
+ .vtotal = 800 + 6 + 4 + 10,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g121ean01 = {
+ .modes = &auo_g121ean01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 261,
+ .height = 163,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_g133han01_timings = {
.pixelclock = { 134000000, 141200000, 149000000 },
.hactive = { 1920, 1920, 1920 },
@@ -892,6 +971,31 @@ static const struct panel_desc auo_g133han01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct drm_display_mode auo_g156xtn01_mode = {
+ .clock = 76000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 33,
+ .hsync_end = 1366 + 33 + 67,
+ .htotal = 1560,
+ .vdisplay = 768,
+ .vsync_start = 768 + 4,
+ .vsync_end = 768 + 4 + 4,
+ .vtotal = 806,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g156xtn01 = {
+ .modes = &auo_g156xtn01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 344,
+ .height = 194,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_g185han01_timings = {
.pixelclock = { 120000000, 144000000, 175000000 },
.hactive = { 1920, 1920, 1920 },
@@ -922,6 +1026,36 @@ static const struct panel_desc auo_g185han01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing auo_g190ean01_timings = {
+ .pixelclock = { 90000000, 108000000, 135000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 126, 184, 1266 },
+ .hback_porch = { 84, 122, 844 },
+ .hsync_len = { 70, 102, 704 },
+ .vactive = { 1024, 1024, 1024 },
+ .vfront_porch = { 4, 26, 76 },
+ .vback_porch = { 2, 8, 25 },
+ .vsync_len = { 2, 8, 25 },
+};
+
+static const struct panel_desc auo_g190ean01 = {
+ .timings = &auo_g190ean01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 376,
+ .height = 301,
+ },
+ .delay = {
+ .prepare = 50,
+ .enable = 200,
+ .disable = 110,
+ .unprepare = 1000,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_p320hvn03_timings = {
.pixelclock = { 106000000, 148500000, 164000000 },
.hactive = { 1920, 1920, 1920 },
@@ -1092,6 +1226,38 @@ static const struct panel_desc boe_nv101wxmn51 = {
},
};
+/* Also used for boe_nv133fhm_n62 */
+static const struct drm_display_mode boe_nv133fhm_n61_modes = {
+ .clock = 147840,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 200,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 6,
+ .vtotal = 1080 + 3 + 6 + 31,
+ .vrefresh = 60,
+};
+
+/* Also used for boe_nv133fhm_n62 */
+static const struct panel_desc boe_nv133fhm_n61 = {
+ .modes = &boe_nv133fhm_n61_modes,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 294,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_absent_delay = 200,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
{
.clock = 148500,
@@ -1980,6 +2146,37 @@ static const struct panel_desc innolux_zj070na_01p = {
},
};
+static const struct drm_display_mode ivo_m133nwf4_r0_mode = {
+ .clock = 138778,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 24,
+ .hsync_end = 1920 + 24 + 48,
+ .htotal = 1920 + 24 + 48 + 88,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 12,
+ .vtotal = 1080 + 3 + 12 + 17,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ivo_m133nwf4_r0 = {
+ .modes = &ivo_m133nwf4_r0_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 294,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_absent_delay = 200,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct display_timing koe_tx14d24vm1bpa_timing = {
.pixelclock = { 5580000, 5850000, 6200000 },
.hactive = { 320, 320, 320 },
@@ -2168,6 +2365,7 @@ static const struct panel_desc lg_lp120up1 = {
.width = 267,
.height = 183,
},
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
};
static const struct drm_display_mode lg_lp129qe_mode = {
@@ -3065,6 +3263,32 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode starry_kr070pe2t_mode = {
+ .clock = 33000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 209,
+ .hsync_end = 800 + 209 + 1,
+ .htotal = 800 + 209 + 1 + 45,
+ .vdisplay = 480,
+ .vsync_start = 480 + 22,
+ .vsync_end = 480 + 22 + 1,
+ .vtotal = 480 + 22 + 1 + 22,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc starry_kr070pe2t = {
+ .modes = &starry_kr070pe2t_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode starry_kr122ea0sra_mode = {
.clock = 147000,
.hdisplay = 1920,
@@ -3455,12 +3679,21 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g104sn02",
.data = &auo_g104sn02,
}, {
+ .compatible = "auo,g121ean01",
+ .data = &auo_g121ean01,
+ }, {
.compatible = "auo,g133han01",
.data = &auo_g133han01,
}, {
+ .compatible = "auo,g156xtn01",
+ .data = &auo_g156xtn01,
+ }, {
.compatible = "auo,g185han01",
.data = &auo_g185han01,
}, {
+ .compatible = "auo,g190ean01",
+ .data = &auo_g190ean01,
+ }, {
.compatible = "auo,p320hvn03",
.data = &auo_p320hvn03,
}, {
@@ -3479,6 +3712,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
+ .compatible = "boe,nv133fhm-n61",
+ .data = &boe_nv133fhm_n61,
+ }, {
+ .compatible = "boe,nv133fhm-n62",
+ .data = &boe_nv133fhm_n61,
+ }, {
.compatible = "boe,nv140fhmn49",
.data = &boe_nv140fhmn49,
}, {
@@ -3587,6 +3826,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
+ .compatible = "ivo,m133nwf4-r0",
+ .data = &ivo_m133nwf4_r0,
+ }, {
.compatible = "koe,tx14d24vm1bpa",
.data = &koe_tx14d24vm1bpa,
}, {
@@ -3716,6 +3958,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
+ .compatible = "starry,kr070pe2t",
+ .data = &starry_kr070pe2t,
+ }, {
.compatible = "starry,kr122ea0sra",
.data = &starry_kr122ea0sra,
}, {
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 012ca62bf30e..f0ad6081570f 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -490,9 +490,7 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
{
struct device *dev = ctx->dev;
int ret, i;
- const struct nt35597_config *config;
- config = ctx->config;
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
ctx->supplies[i].supply = regulator_names[i];
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
new file mode 100644
index 000000000000..42f299ad3804
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct visionox_rm69299 {
+ struct drm_panel panel;
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+ struct mipi_dsi_device *dsi;
+ bool prepared;
+ bool enabled;
+};
+
+static inline struct visionox_rm69299 *panel_to_ctx(struct drm_panel *panel)
+{
+ return container_of(panel, struct visionox_rm69299, panel);
+}
+
+static int visionox_rm69299_power_on(struct visionox_rm69299 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Reset sequence of visionox panel requires the panel to be
+ * out of reset for 10ms, followed by being held in reset
+ * for 10ms and then out again
+ */
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+static int visionox_rm69299_power_off(struct visionox_rm69299 *ctx)
+{
+ gpiod_set_value(ctx->reset_gpio, 0);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int visionox_rm69299_unprepare(struct drm_panel *panel)
+{
+ struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+ int ret;
+
+ ctx->dsi->mode_flags = 0;
+
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "set_display_off cmd failed ret = %d\n", ret);
+
+ /* 120ms delay required here as per DCS spec */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "enter_sleep cmd failed ret = %d\n", ret);
+ }
+
+ ret = visionox_rm69299_power_off(ctx);
+
+ ctx->prepared = false;
+ return ret;
+}
+
+static int visionox_rm69299_prepare(struct drm_panel *panel)
+{
+ struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = visionox_rm69299_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xfe, 0x00 }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "cmd set tx 0 failed, ret = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xc2, 0x08 }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "cmd set tx 1 failed, ret = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x35, 0x00 }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "cmd set tx 2 failed, ret = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x51, 0xff }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "cmd set tx 3 failed, ret = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "exit_sleep_mode cmd failed ret = %d\n", ret);
+ goto power_off;
+ }
+
+ /* Per DSI spec wait 120ms after sending exit sleep DCS command */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "set_display_on cmd failed ret = %d\n", ret);
+ goto power_off;
+ }
+
+ /* Per DSI spec wait 120ms after sending set_display_on DCS command */
+ msleep(120);
+
+ ctx->prepared = true;
+
+ return 0;
+
+power_off:
+ return ret;
+}
+
+static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
+ .name = "1080x2248",
+ .clock = 158695,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 2,
+ .htotal = 1080 + 26 + 2 + 36,
+ .vdisplay = 2248,
+ .vsync_start = 2248 + 56,
+ .vsync_end = 2248 + 56 + 4,
+ .vtotal = 2248 + 56 + 4 + 4,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static int visionox_rm69299_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "failed to create a new display mode\n");
+ return 0;
+ }
+
+ connector->display_info.width_mm = 74;
+ connector->display_info.height_mm = 131;
+ drm_mode_copy(mode, &visionox_rm69299_1080x2248_60hz);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs visionox_rm69299_drm_funcs = {
+ .unprepare = visionox_rm69299_unprepare,
+ .prepare = visionox_rm69299_prepare,
+ .get_modes = visionox_rm69299_get_modes,
+};
+
+static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct visionox_rm69299 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->panel.dev = dev;
+ ctx->dsi = dsi;
+
+ ctx->supplies[0].supply = "vdda";
+ ctx->supplies[1].supply = "vdd3p3";
+
+ ret = devm_regulator_bulk_get(ctx->panel.dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(ctx->panel.dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio %ld\n",
+ PTR_ERR(ctx->reset_gpio));
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &visionox_rm69299_drm_funcs;
+ drm_panel_add(&ctx->panel);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "dsi attach failed ret = %d\n", ret);
+ goto err_dsi_attach;
+ }
+
+ ret = regulator_set_load(ctx->supplies[0].consumer, 32000);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "regulator set load failed for vdda supply ret = %d\n",
+ ret);
+ goto err_set_load;
+ }
+
+ ret = regulator_set_load(ctx->supplies[1].consumer, 13200);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "regulator set load failed for vdd3p3 supply ret = %d\n",
+ ret);
+ goto err_set_load;
+ }
+
+ return 0;
+
+err_set_load:
+ mipi_dsi_detach(dsi);
+err_dsi_attach:
+ drm_panel_remove(&ctx->panel);
+ return ret;
+}
+
+static int visionox_rm69299_remove(struct mipi_dsi_device *dsi)
+{
+ struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(ctx->dsi);
+ mipi_dsi_device_unregister(ctx->dsi);
+
+ drm_panel_remove(&ctx->panel);
+ return 0;
+}
+
+static const struct of_device_id visionox_rm69299_of_match[] = {
+ { .compatible = "visionox,rm69299-1080p-display", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_rm69299_of_match);
+
+static struct mipi_dsi_driver visionox_rm69299_driver = {
+ .driver = {
+ .name = "panel-visionox-rm69299",
+ .of_match_table = visionox_rm69299_of_match,
+ },
+ .probe = visionox_rm69299_probe,
+ .remove = visionox_rm69299_remove,
+};
+module_mipi_dsi_driver(visionox_rm69299_driver);
+
+MODULE_DESCRIPTION("Visionox RM69299 DSI Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile
index 0c70f0e91d21..67d430d433e0 100644
--- a/drivers/gpu/drm/pl111/Makefile
+++ b/drivers/gpu/drm/pl111/Makefile
@@ -3,7 +3,6 @@ pl111_drm-y += pl111_display.o \
pl111_versatile.o \
pl111_drv.o
-pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o
pl111_drm-$(CONFIG_ARCH_NOMADIK) += pl111_nomadik.o
pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
diff --git a/drivers/gpu/drm/pl111/pl111_debugfs.c b/drivers/gpu/drm/pl111/pl111_debugfs.c
index 3c8e82016854..26ca8cdf3e60 100644
--- a/drivers/gpu/drm/pl111/pl111_debugfs.c
+++ b/drivers/gpu/drm/pl111/pl111_debugfs.c
@@ -51,10 +51,10 @@ static const struct drm_info_list pl111_debugfs_list[] = {
{"regs", pl111_debugfs_regs, 0},
};
-int
+void
pl111_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(pl111_debugfs_list,
- ARRAY_SIZE(pl111_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(pl111_debugfs_list,
+ ARRAY_SIZE(pl111_debugfs_list),
+ minor->debugfs_root, minor);
}
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index 77d2da9a8a7c..ba399bcb792f 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -84,6 +84,6 @@ struct pl111_drm_dev_private {
int pl111_display_init(struct drm_device *dev);
irqreturn_t pl111_irq(int irq, void *data);
-int pl111_debugfs_init(struct drm_minor *minor);
+void pl111_debugfs_init(struct drm_minor *minor);
#endif /* _PL111_DRM_H_ */
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index aa8aa8d9e405..da0c39dae874 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -90,10 +90,13 @@ static int pl111_modeset_init(struct drm_device *dev)
struct drm_panel *panel = NULL;
struct drm_bridge *bridge = NULL;
bool defer = false;
- int ret = 0;
+ int ret;
int i;
- drm_mode_config_init(dev);
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
+
mode_config = &dev->mode_config;
mode_config->funcs = &mode_config_funcs;
mode_config->min_width = 1;
@@ -154,7 +157,7 @@ static int pl111_modeset_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
- goto out_config;
+ goto finish;
}
} else if (bridge) {
dev_info(dev->dev, "Using non-panel bridge\n");
@@ -197,8 +200,6 @@ static int pl111_modeset_init(struct drm_device *dev)
out_bridge:
if (panel)
drm_panel_bridge_remove(bridge);
-out_config:
- drm_mode_config_cleanup(dev);
finish:
return ret;
}
@@ -343,7 +344,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
drm_dev_unregister(drm);
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
- drm_mode_config_cleanup(drm);
drm_dev_put(drm);
of_reserved_mem_device_release(dev);
@@ -444,6 +444,7 @@ static const struct amba_id pl111_id_table[] = {
},
{0, 0},
};
+MODULE_DEVICE_TABLE(amba, pl111_id_table);
static struct amba_driver pl111_amba_driver __maybe_unused = {
.drv = {
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 4f325c410b5d..64f01a4e6767 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -8,9 +8,9 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
+#include <linux/vexpress.h>
#include "pl111_versatile.h"
-#include "pl111_vexpress.h"
#include "pl111_drm.h"
static struct regmap *versatile_syscon_map;
@@ -361,13 +361,110 @@ static const struct pl111_variant_data pl111_vexpress = {
.broken_clockdivider = true,
};
+#define VEXPRESS_FPGAMUX_MOTHERBOARD 0x00
+#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1 0x01
+#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2 0x02
+
+static int pl111_vexpress_clcd_init(struct device *dev, struct device_node *np,
+ struct pl111_drm_dev_private *priv)
+{
+ struct platform_device *pdev;
+ struct device_node *root;
+ struct device_node *child;
+ struct device_node *ct_clcd = NULL;
+ struct regmap *map;
+ bool has_coretile_clcd = false;
+ bool has_coretile_hdlcd = false;
+ bool mux_motherboard = true;
+ u32 val;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_VEXPRESS_CONFIG))
+ return -ENODEV;
+
+ /*
+ * Check if we have a CLCD or HDLCD on the core tile by checking if a
+ * CLCD or HDLCD is available in the root of the device tree.
+ */
+ root = of_find_node_by_path("/");
+ if (!root)
+ return -EINVAL;
+
+ for_each_available_child_of_node(root, child) {
+ if (of_device_is_compatible(child, "arm,pl111")) {
+ has_coretile_clcd = true;
+ ct_clcd = child;
+ break;
+ }
+ if (of_device_is_compatible(child, "arm,hdlcd")) {
+ has_coretile_hdlcd = true;
+ of_node_put(child);
+ break;
+ }
+ }
+
+ of_node_put(root);
+
+ /*
+ * If there is a coretile HDLCD and it has a driver,
+ * do not mux the CLCD on the motherboard to the DVI.
+ */
+ if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
+ mux_motherboard = false;
+
+ /*
+ * On the Vexpress CA9 we let the CLCD on the coretile
+ * take precedence, so also in this case do not mux the
+ * motherboard to the DVI.
+ */
+ if (has_coretile_clcd)
+ mux_motherboard = false;
+
+ if (mux_motherboard) {
+ dev_info(dev, "DVI muxed to motherboard CLCD\n");
+ val = VEXPRESS_FPGAMUX_MOTHERBOARD;
+ } else if (ct_clcd == dev->of_node) {
+ dev_info(dev,
+ "DVI muxed to daughterboard 1 (core tile) CLCD\n");
+ val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
+ } else {
+ dev_info(dev, "core tile graphics present\n");
+ dev_info(dev, "this device will be deactivated\n");
+ return -ENODEV;
+ }
+
+ /* Call into deep Vexpress configuration API */
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_err(dev, "can't find the sysreg device, deferring\n");
+ return -EPROBE_DEFER;
+ }
+
+ map = devm_regmap_init_vexpress_config(&pdev->dev);
+ if (IS_ERR(map)) {
+ platform_device_put(pdev);
+ return PTR_ERR(map);
+ }
+
+ ret = regmap_write(map, 0, val);
+ platform_device_put(pdev);
+ if (ret) {
+ dev_err(dev, "error setting DVI muxmode\n");
+ return -ENODEV;
+ }
+
+ priv->variant = &pl111_vexpress;
+ dev_info(dev, "initializing Versatile Express PL111\n");
+
+ return 0;
+}
+
int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
{
const struct of_device_id *clcd_id;
enum versatile_clcd versatile_clcd_type;
struct device_node *np;
struct regmap *map;
- int ret;
np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
&clcd_id);
@@ -378,6 +475,15 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+ /* Versatile Express special handling */
+ if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
+ int ret = pl111_vexpress_clcd_init(dev, np, priv);
+ of_node_put(np);
+ if (ret)
+ dev_err(dev, "Versatile Express init failed - %d", ret);
+ return ret;
+ }
+
/*
* On the Integrator, check if we should use the IM-PD1 instead,
* if we find it, it will take precedence. This is on the Integrator/AP
@@ -390,37 +496,8 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
}
- /* Versatile Express special handling */
- if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
- struct platform_device *pdev;
-
- /* Registers a driver for the muxfpga */
- ret = vexpress_muxfpga_init();
- if (ret) {
- dev_err(dev, "unable to initialize muxfpga driver\n");
- of_node_put(np);
- return ret;
- }
-
- /* Call into deep Vexpress configuration API */
- pdev = of_find_device_by_node(np);
- if (!pdev) {
- dev_err(dev, "can't find the sysreg device, deferring\n");
- of_node_put(np);
- return -EPROBE_DEFER;
- }
- map = dev_get_drvdata(&pdev->dev);
- if (!map) {
- dev_err(dev, "sysreg has not yet probed\n");
- platform_device_put(pdev);
- of_node_put(np);
- return -EPROBE_DEFER;
- }
- } else {
- map = syscon_node_to_regmap(np);
- }
+ map = syscon_node_to_regmap(np);
of_node_put(np);
-
if (IS_ERR(map)) {
dev_err(dev, "no Versatile syscon regmap\n");
return PTR_ERR(map);
@@ -466,13 +543,6 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
priv->variant_display_disable = pl111_realview_clcd_disable;
dev_info(dev, "set up callbacks for RealView PL111\n");
break;
- case VEXPRESS_CLCD_V2M:
- priv->variant = &pl111_vexpress;
- dev_info(dev, "initializing Versatile Express PL111\n");
- ret = pl111_vexpress_clcd_init(dev, priv, map);
- if (ret)
- return ret;
- break;
default:
dev_info(dev, "unknown Versatile system controller\n");
break;
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
deleted file mode 100644
index 350570fe06b5..000000000000
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ /dev/null
@@ -1,138 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Versatile Express PL111 handling
- * Copyright (C) 2018 Linus Walleij
- *
- * This module binds to the "arm,vexpress-muxfpga" device on the
- * Versatile Express configuration bus and sets up which CLCD instance
- * gets muxed out on the DVI bridge.
- */
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/vexpress.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include "pl111_drm.h"
-#include "pl111_vexpress.h"
-
-#define VEXPRESS_FPGAMUX_MOTHERBOARD 0x00
-#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1 0x01
-#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2 0x02
-
-int pl111_vexpress_clcd_init(struct device *dev,
- struct pl111_drm_dev_private *priv,
- struct regmap *map)
-{
- struct device_node *root;
- struct device_node *child;
- struct device_node *ct_clcd = NULL;
- bool has_coretile_clcd = false;
- bool has_coretile_hdlcd = false;
- bool mux_motherboard = true;
- u32 val;
- int ret;
-
- /*
- * Check if we have a CLCD or HDLCD on the core tile by checking if a
- * CLCD or HDLCD is available in the root of the device tree.
- */
- root = of_find_node_by_path("/");
- if (!root)
- return -EINVAL;
-
- for_each_available_child_of_node(root, child) {
- if (of_device_is_compatible(child, "arm,pl111")) {
- has_coretile_clcd = true;
- ct_clcd = child;
- break;
- }
- if (of_device_is_compatible(child, "arm,hdlcd")) {
- has_coretile_hdlcd = true;
- of_node_put(child);
- break;
- }
- }
-
- of_node_put(root);
-
- /*
- * If there is a coretile HDLCD and it has a driver,
- * do not mux the CLCD on the motherboard to the DVI.
- */
- if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
- mux_motherboard = false;
-
- /*
- * On the Vexpress CA9 we let the CLCD on the coretile
- * take precedence, so also in this case do not mux the
- * motherboard to the DVI.
- */
- if (has_coretile_clcd)
- mux_motherboard = false;
-
- if (mux_motherboard) {
- dev_info(dev, "DVI muxed to motherboard CLCD\n");
- val = VEXPRESS_FPGAMUX_MOTHERBOARD;
- } else if (ct_clcd == dev->of_node) {
- dev_info(dev,
- "DVI muxed to daughterboard 1 (core tile) CLCD\n");
- val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
- } else {
- dev_info(dev, "core tile graphics present\n");
- dev_info(dev, "this device will be deactivated\n");
- return -ENODEV;
- }
-
- ret = regmap_write(map, 0, val);
- if (ret) {
- dev_err(dev, "error setting DVI muxmode\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-/*
- * This sets up the regmap pointer that will then be retrieved by
- * the detection code in pl111_versatile.c and passed in to the
- * pl111_vexpress_clcd_init() function above.
- */
-static int vexpress_muxfpga_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *map;
-
- map = devm_regmap_init_vexpress_config(&pdev->dev);
- if (IS_ERR(map))
- return PTR_ERR(map);
- dev_set_drvdata(dev, map);
-
- return 0;
-}
-
-static const struct of_device_id vexpress_muxfpga_match[] = {
- { .compatible = "arm,vexpress-muxfpga", },
- {}
-};
-
-static struct platform_driver vexpress_muxfpga_driver = {
- .driver = {
- .name = "vexpress-muxfpga",
- .of_match_table = of_match_ptr(vexpress_muxfpga_match),
- },
- .probe = vexpress_muxfpga_probe,
-};
-
-int vexpress_muxfpga_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&vexpress_muxfpga_driver);
- /* -EBUSY just means this driver is already registered */
- if (ret == -EBUSY)
- ret = 0;
- return ret;
-}
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.h b/drivers/gpu/drm/pl111/pl111_vexpress.h
deleted file mode 100644
index 5d3681bb4c00..000000000000
--- a/drivers/gpu/drm/pl111/pl111_vexpress.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-struct device;
-struct pl111_drm_dev_private;
-struct regmap;
-
-#ifdef CONFIG_ARCH_VEXPRESS
-
-int pl111_vexpress_clcd_init(struct device *dev,
- struct pl111_drm_dev_private *priv,
- struct regmap *map);
-
-int vexpress_muxfpga_init(void);
-
-#else
-
-static inline int pl111_vexpress_clcd_init(struct device *dev,
- struct pl111_drm_dev_private *priv,
- struct regmap *map)
-{
- return -ENODEV;
-}
-
-static inline int vexpress_muxfpga_init(void)
-{
- return 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index a4f4175bbdbe..524d35b648d8 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -39,7 +39,7 @@ static int
qxl_debugfs_irq_received(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct qxl_device *qdev = node->minor->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(node->minor->dev);
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
@@ -53,7 +53,7 @@ static int
qxl_debugfs_buffers_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct qxl_device *qdev = node->minor->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(node->minor->dev);
struct qxl_bo *bo;
list_for_each_entry(bo, &qdev->gem.objects, list) {
@@ -79,36 +79,29 @@ static struct drm_info_list qxl_debugfs_list[] = {
#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
#endif
-int
+void
qxl_debugfs_init(struct drm_minor *minor)
{
#if defined(CONFIG_DEBUG_FS)
- int r;
- struct qxl_device *dev =
- (struct qxl_device *) minor->dev->dev_private;
+ struct qxl_device *dev = to_qxl(minor->dev);
drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
- r = qxl_ttm_debugfs_init(dev);
- if (r) {
- DRM_ERROR("Failed to init TTM debugfs\n");
- return r;
- }
+ qxl_ttm_debugfs_init(dev);
#endif
- return 0;
}
-int qxl_debugfs_add_files(struct qxl_device *qdev,
- struct drm_info_list *files,
- unsigned int nfiles)
+void qxl_debugfs_add_files(struct qxl_device *qdev,
+ struct drm_info_list *files,
+ unsigned int nfiles)
{
unsigned int i;
for (i = 0; i < qdev->debugfs_count; i++) {
if (qdev->debugfs[i].files == files) {
/* Already registered */
- return 0;
+ return;
}
}
@@ -116,7 +109,7 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
DRM_ERROR("Reached maximum number of debugfs components.\n");
DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
- return -EINVAL;
+ return;
}
qdev->debugfs[qdev->debugfs_count].files = files;
qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
@@ -126,5 +119,4 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
qdev->ddev.primary->debugfs_root,
qdev->ddev.primary);
#endif
- return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 91f398d51cfa..9d45d5a4278f 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -221,7 +221,7 @@ static int qxl_add_mode(struct drm_connector *connector,
bool preferred)
{
struct drm_device *dev = connector->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_display_mode *mode = NULL;
int rc;
@@ -242,7 +242,7 @@ static int qxl_add_mode(struct drm_connector *connector,
static int qxl_add_monitors_config_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_output *output = drm_connector_to_qxl_output(connector);
int h = output->index;
struct qxl_head *head;
@@ -310,7 +310,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
const char *reason)
{
struct drm_device *dev = crtc->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
struct qxl_head head;
int oldcount, i = qcrtc->index;
@@ -400,7 +400,7 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
unsigned int num_clips)
{
/* TODO: vmwgfx where this was cribbed from had locking. Why? */
- struct qxl_device *qdev = fb->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(fb->dev);
struct drm_clip_rect norect;
struct qxl_bo *qobj;
bool is_primary;
@@ -462,7 +462,7 @@ static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
static int qxl_primary_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_bo *bo;
if (!state->crtc || !state->fb)
@@ -476,7 +476,7 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
static int qxl_primary_apply_cursor(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_framebuffer *fb = plane->state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
struct qxl_cursor_cmd *cmd;
@@ -523,7 +523,7 @@ out_free_release:
static void qxl_primary_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
struct qxl_bo *primary;
struct drm_clip_rect norect = {
@@ -554,7 +554,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
static void qxl_primary_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
if (old_state->fb) {
struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]);
@@ -570,7 +570,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_framebuffer *fb = plane->state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
struct qxl_release *release;
@@ -679,7 +679,7 @@ out_free_release:
static void qxl_cursor_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_release *release;
struct qxl_cursor_cmd *cmd;
int ret;
@@ -762,7 +762,7 @@ static void qxl_calc_dumb_shadow(struct qxl_device *qdev,
static int qxl_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
struct drm_gem_object *obj;
struct qxl_bo *user_bo;
struct qxl_surface surf;
@@ -923,7 +923,7 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
{
struct qxl_crtc *qxl_crtc;
struct drm_plane *primary, *cursor;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
int r;
qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
@@ -965,7 +965,7 @@ free_mem:
static int qxl_conn_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_output *output = drm_connector_to_qxl_output(connector);
unsigned int pwidth = 1024;
unsigned int pheight = 768;
@@ -991,7 +991,7 @@ static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *ddev = connector->dev;
- struct qxl_device *qdev = ddev->dev_private;
+ struct qxl_device *qdev = to_qxl(ddev);
if (qxl_check_mode(qdev, mode->hdisplay, mode->vdisplay) != 0)
return MODE_BAD;
@@ -1021,7 +1021,7 @@ static enum drm_connector_status qxl_conn_detect(
struct qxl_output *output =
drm_connector_to_qxl_output(connector);
struct drm_device *ddev = connector->dev;
- struct qxl_device *qdev = ddev->dev_private;
+ struct qxl_device *qdev = to_qxl(ddev);
bool connected = false;
/* The first monitor is always connected */
@@ -1071,7 +1071,7 @@ static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
static int qdev_output_init(struct drm_device *dev, int num_output)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_output *qxl_output;
struct drm_connector *connector;
struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 4fda3f9b29f4..13872b882775 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -81,13 +81,16 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -EINVAL; /* TODO: ENODEV ? */
}
- qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
- if (!qdev)
+ qdev = devm_drm_dev_alloc(&pdev->dev, &qxl_driver,
+ struct qxl_device, ddev);
+ if (IS_ERR(qdev)) {
+ pr_err("Unable to init drm dev");
return -ENOMEM;
+ }
ret = pci_enable_device(pdev);
if (ret)
- goto free_dev;
+ return ret;
ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl");
if (ret)
@@ -101,7 +104,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- ret = qxl_device_init(qdev, &qxl_driver, pdev);
+ ret = qxl_device_init(qdev, pdev);
if (ret)
goto put_vga;
@@ -128,14 +131,13 @@ put_vga:
vga_put(pdev, VGA_RSRC_LEGACY_IO);
disable_pci:
pci_disable_device(pdev);
-free_dev:
- kfree(qdev);
+
return ret;
}
static void qxl_drm_release(struct drm_device *dev)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
/*
* TODO: qxl_device_fini() call should be in qxl_pci_remove(),
@@ -144,8 +146,6 @@ static void qxl_drm_release(struct drm_device *dev)
*/
qxl_modeset_fini(qdev);
qxl_device_fini(qdev);
- dev->dev_private = NULL;
- kfree(qdev);
}
static void
@@ -157,7 +157,6 @@ qxl_pci_remove(struct pci_dev *pdev)
drm_atomic_helper_shutdown(dev);
if (is_vga(pdev))
vga_put(pdev, VGA_RSRC_LEGACY_IO);
- drm_dev_put(dev);
}
DEFINE_DRM_GEM_FOPS(qxl_fops);
@@ -165,7 +164,7 @@ DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
{
struct pci_dev *pdev = dev->pdev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
int ret;
ret = drm_mode_config_helper_suspend(dev);
@@ -187,7 +186,7 @@ static int qxl_drm_freeze(struct drm_device *dev)
static int qxl_drm_resume(struct drm_device *dev, bool thaw)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (!thaw) {
@@ -246,7 +245,7 @@ static int qxl_pm_restore(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct qxl_device *qdev = drm_dev->dev_private;
+ struct qxl_device *qdev = to_qxl(drm_dev);
qxl_io_reset(qdev);
return qxl_drm_resume(drm_dev, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 27e45a2d6b52..31e35f787df2 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -190,13 +190,8 @@ struct qxl_debugfs {
unsigned int num_files;
};
-int qxl_debugfs_add_files(struct qxl_device *rdev,
- struct drm_info_list *files,
- unsigned int nfiles);
int qxl_debugfs_fence_init(struct qxl_device *rdev);
-struct qxl_device;
-
struct qxl_device {
struct drm_device ddev;
@@ -276,11 +271,12 @@ struct qxl_device {
int monitors_config_height;
};
+#define to_qxl(dev) container_of(dev, struct qxl_device, ddev)
+
extern const struct drm_ioctl_desc qxl_ioctls[];
extern int qxl_max_ioctl;
-int qxl_device_init(struct qxl_device *qdev, struct drm_driver *drv,
- struct pci_dev *pdev);
+int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev);
void qxl_device_fini(struct qxl_device *qdev);
int qxl_modeset_init(struct qxl_device *qdev);
@@ -442,8 +438,8 @@ int qxl_garbage_collect(struct qxl_device *qdev);
/* debugfs */
-int qxl_debugfs_init(struct drm_minor *minor);
-int qxl_ttm_debugfs_init(struct qxl_device *qdev);
+void qxl_debugfs_init(struct drm_minor *minor);
+void qxl_ttm_debugfs_init(struct qxl_device *qdev);
/* qxl_prime.c */
int qxl_gem_prime_pin(struct drm_gem_object *obj);
@@ -461,9 +457,9 @@ int qxl_gem_prime_mmap(struct drm_gem_object *obj,
int qxl_irq_init(struct qxl_device *qdev);
irqreturn_t qxl_irq_handler(int irq, void *arg);
-int qxl_debugfs_add_files(struct qxl_device *qdev,
- struct drm_info_list *files,
- unsigned int nfiles);
+void qxl_debugfs_add_files(struct qxl_device *qdev,
+ struct drm_info_list *files,
+ unsigned int nfiles);
int qxl_surface_id_alloc(struct qxl_device *qdev,
struct qxl_bo *surf);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 272d19b677d8..24e903383aa1 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -32,7 +32,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_bo *qobj;
uint32_t handle;
int r;
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 69f37db1027a..5ff6fa9b799c 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -34,7 +34,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj)
struct qxl_device *qdev;
struct ttm_buffer_object *tbo;
- qdev = (struct qxl_device *)gobj->dev->dev_private;
+ qdev = to_qxl(gobj->dev);
qxl_surface_evict(qdev, qobj, false);
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 72f3f1bbb40c..13bd1d11c703 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -36,7 +36,7 @@
static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc *qxl_alloc = data;
int ret;
struct qxl_bo *qobj;
@@ -64,7 +64,7 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
static int qxl_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_map *qxl_map = data;
return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
@@ -276,7 +276,7 @@ out_free_reloc:
static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_execbuffer *execbuffer = data;
struct drm_qxl_command user_cmd;
int cmd_num;
@@ -301,7 +301,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_update_area *update_area = data;
struct qxl_rect area = {.left = update_area->left,
.top = update_area->top,
@@ -351,7 +351,7 @@ out:
static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_getparam *param = data;
switch (param->param) {
@@ -370,7 +370,7 @@ static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_clientcap *param = data;
int byte, idx;
@@ -391,7 +391,7 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc_surf *param = data;
struct qxl_bo *qobj;
int handle;
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 8435af108632..1ba5a702d763 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -32,7 +32,7 @@
irqreturn_t qxl_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
- struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
uint32_t pending;
pending = xchg(&qdev->ram_header->int_pending, 0);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 70b20ee4741a..a6d873052cd4 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "qxl_drv.h"
@@ -107,20 +108,12 @@ static void qxl_gc_work(struct work_struct *work)
}
int qxl_device_init(struct qxl_device *qdev,
- struct drm_driver *drv,
struct pci_dev *pdev)
{
int r, sb;
- r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
- if (r) {
- pr_err("Unable to init drm dev");
- goto error;
- }
-
qdev->ddev.pdev = pdev;
pci_set_drvdata(pdev, &qdev->ddev);
- qdev->ddev.dev_private = qdev;
mutex_init(&qdev->gem.mutex);
mutex_init(&qdev->update_area_mutex);
@@ -136,8 +129,7 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
if (!qdev->vram_mapping) {
pr_err("Unable to create vram_mapping");
- r = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
if (pci_resource_len(pdev, 4) > 0) {
@@ -218,7 +210,7 @@ int qxl_device_init(struct qxl_device *qdev,
&(qdev->ram_header->cursor_ring_hdr),
sizeof(struct qxl_command),
QXL_CURSOR_RING_SIZE,
- qdev->io_base + QXL_IO_NOTIFY_CMD,
+ qdev->io_base + QXL_IO_NOTIFY_CURSOR,
false,
&qdev->cursor_event);
@@ -291,7 +283,6 @@ surface_mapping_free:
io_mapping_free(qdev->surface_mapping);
vram_mapping_free:
io_mapping_free(qdev->vram_mapping);
-error:
return r;
}
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index ab72dc3476e9..edc8a9916872 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -33,7 +33,7 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
struct qxl_device *qdev;
bo = to_qxl_bo(tbo);
- qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private;
+ qdev = to_qxl(bo->tbo.base.dev);
qxl_surface_evict(qdev, bo, false);
WARN_ON_ONCE(bo->map_count > 0);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 2feca734c7b1..4fae3e393da1 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -243,7 +243,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
return ret;
/* allocate a surface for reserved + validated buffers */
- ret = qxl_bo_check_id(bo->tbo.base.dev->dev_private, bo);
+ ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
if (ret)
return ret;
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 62a5e424971b..f09a712b1ed2 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -243,7 +243,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
if (!qxl_ttm_bo_is_qxl_bo(bo))
return;
qbo = to_qxl_bo(bo);
- qdev = qbo->tbo.base.dev->dev_private;
+ qdev = to_qxl(qbo->tbo.base.dev);
if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
qxl_surface_evict(qdev, qbo, new_mem ? true : false);
@@ -322,7 +322,7 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
}
#endif
-int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+void qxl_ttm_debugfs_init(struct qxl_device *qdev)
{
#if defined(CONFIG_DEBUG_FS)
static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
@@ -343,8 +343,6 @@ int qxl_ttm_debugfs_init(struct qxl_device *qdev)
qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
}
- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
-#else
- return 0;
+ qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
#endif
}
diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
index 9b4072f97215..3e76ae5a17ee 100644
--- a/drivers/gpu/drm/r128/ati_pcigart.c
+++ b/drivers/gpu/drm/r128/ati_pcigart.c
@@ -32,9 +32,10 @@
*/
#include <linux/export.h>
+#include <linux/pci.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
+#include <drm/drm_legacy.h>
#include <drm/drm_print.h>
#include "ati_pcigart.h"
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index c693b2ca0329..11c97edde54d 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -3,42 +3,13 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Idrivers/gpu/drm/amd/include
-
hostprogs := mkregtable
-clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
+targets := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
-quiet_cmd_mkregtable = MKREGTABLE $@
+quiet_cmd_mkregtable = MKREG $@
cmd_mkregtable = $(obj)/mkregtable $< > $@
-$(obj)/rn50_reg_safe.h: $(src)/reg_srcs/rn50 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r100_reg_safe.h: $(src)/reg_srcs/r100 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r200_reg_safe.h: $(src)/reg_srcs/r200 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable
+$(obj)/%_reg_safe.h: $(src)/reg_srcs/% $(obj)/mkregtable FORCE
$(call if_changed,mkregtable)
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 2c27627b6659..f15b20da5315 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1211,8 +1211,7 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
SDEBUG("<<\n");
free:
- if (ws)
- kfree(ectx.ws);
+ kfree(ectx.ws);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index a9257bed3484..134aa2b01f90 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -65,13 +65,6 @@ static const struct ci_pt_defaults defaults_bonaire_xt =
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
};
-static const struct ci_pt_defaults defaults_bonaire_pro =
-{
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
- { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
- { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
-};
-
static const struct ci_pt_defaults defaults_saturn_xt =
{
1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
@@ -79,13 +72,6 @@ static const struct ci_pt_defaults defaults_saturn_xt =
{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
};
-static const struct ci_pt_defaults defaults_saturn_pro =
-{
- 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
- { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
- { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
-};
-
static const struct ci_pt_config_reg didt_config_ci[] =
{
{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 848ef68d9086..5d2591725189 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2111,7 +2111,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
ucOverdriveThermalController];
info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
num_modes = power_info->info.ucNumOfPowerModeEntries;
@@ -2351,7 +2351,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
const char *name = pp_lib_thermal_controller_names[controller->ucType];
info.addr = controller->ucI2cAddress >> 1;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
}
} else {
DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index c3e49c973812..d3c04df7e75d 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2704,7 +2704,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
const char *name = thermal_controller_names[thermal_controller];
info.addr = i2c_addr >> 1;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
} else {
@@ -2721,7 +2721,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
const char *name = "f75375";
info.addr = 0x28;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
DRM_INFO("Possible %s thermal controller at 0x%02x\n",
name, info.addr);
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 0d0ab8e0ff3b..cc31d187042e 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -196,12 +196,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
&p->validated);
if (need_mmap_lock)
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
if (need_mmap_lock)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 59f8186a2415..bbb0883e8ce6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -36,6 +36,7 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <linux/pci.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc_helper.h>
@@ -44,7 +45,6 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 068c3e5da173..3c8f570a20ee 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -342,17 +342,17 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
r = radeon_bo_reserve(bo, true);
if (r) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
goto release_object;
}
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
radeon_bo_unreserve(bo);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (r)
goto release_object;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 58176db85952..c5d1dc9618a4 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -158,7 +158,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (radeon_is_px(dev)) {
- dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
@@ -828,7 +828,7 @@ int radeon_enable_vblank_kms(struct drm_crtc *crtc)
unsigned long irqflags;
int r;
- if (pipe < 0 || pipe >= rdev->num_crtc) {
+ if (pipe >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
@@ -854,7 +854,7 @@ void radeon_disable_vblank_kms(struct drm_crtc *crtc)
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
- if (pipe < 0 || pipe >= rdev->num_crtc) {
+ if (pipe >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return;
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 2cb85dbe728f..a167e1c36d24 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -252,24 +252,6 @@ static const struct si_dte_data dte_data_tahiti =
false
};
-static const struct si_dte_data dte_data_tahiti_le =
-{
- { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
- { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
- 0x5,
- 0xAFC8,
- 0x64,
- 0x32,
- 1,
- 0,
- 0x10,
- { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
- { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
- { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
- 85,
- true
-};
-
static const struct si_dte_data dte_data_tahiti_pro =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 654e2dd08146..3e67cf70f040 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -530,7 +530,6 @@ static int rcar_du_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
drm_kms_helper_poll_fini(ddev);
- drm_mode_config_cleanup(ddev);
drm_dev_put(ddev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index c07c6a88aff0..b0335da0c161 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -13,6 +13,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
@@ -23,13 +24,6 @@
* Encoder
*/
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
{
struct device_node *ports;
@@ -110,13 +104,11 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
}
}
- ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(rcdu->ddev, encoder,
+ DRM_MODE_ENCODER_NONE);
if (ret < 0)
goto done;
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
/*
* Attach the bridge to the encoder. The bridge will create the
* connector.
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index fcfd916227d1..482329102f19 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -712,7 +712,9 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
unsigned int i;
int ret;
- drm_mode_config_init(dev);
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index c6430027169f..a0021fc25b27 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -785,13 +785,15 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
drm_plane_create_alpha_property(&plane->plane);
- if (type == DRM_PLANE_TYPE_PRIMARY)
- continue;
-
- drm_object_attach_property(&plane->plane.base,
- rcdu->props.colorkey,
- RCAR_DU_COLORKEY_NONE);
- drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ drm_plane_create_zpos_immutable_property(&plane->plane,
+ 0);
+ } else {
+ drm_object_attach_property(&plane->plane.base,
+ rcdu->props.colorkey,
+ RCAR_DU_COLORKEY_NONE);
+ drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 5e4faf258c31..f1a81c9b184d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -392,12 +392,14 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
drm_plane_helper_add(&plane->plane,
&rcar_du_vsp_plane_helper_funcs);
- if (type == DRM_PLANE_TYPE_PRIMARY)
- continue;
-
- drm_plane_create_alpha_property(&plane->plane);
- drm_plane_create_zpos_property(&plane->plane, 1, 1,
- vsp->num_planes - 1);
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ drm_plane_create_zpos_immutable_property(&plane->plane,
+ 0);
+ } else {
+ drm_plane_create_alpha_property(&plane->plane);
+ drm_plane_create_zpos_property(&plane->plane, 1, 1,
+ vsp->num_planes - 1);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index ce98c08aa8b4..ade2327a10e2 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -26,6 +26,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -258,10 +259,6 @@ static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
.atomic_check = rockchip_dp_drm_encoder_atomic_check,
};
-static struct drm_encoder_funcs rockchip_dp_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
{
struct device *dev = dp->dev;
@@ -309,8 +306,8 @@ static int rockchip_dp_drm_create_encoder(struct rockchip_dp_device *dp)
dev->of_node);
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
- ret = drm_encoder_init(drm_dev, encoder, &rockchip_dp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret) {
DRM_ERROR("failed to initialize encoder with drm\n");
return ret;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index eed594bd38d3..c634b95b50f7 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -20,6 +20,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
@@ -689,10 +690,6 @@ static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
.atomic_check = cdn_dp_encoder_atomic_check,
};
-static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
{
struct device *dev = dp->dev;
@@ -1030,8 +1027,8 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
dev->of_node);
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
- ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret) {
DRM_ERROR("failed to initialize encoder with drm\n");
return ret;
@@ -1109,7 +1106,7 @@ static const struct component_ops cdn_dp_component_ops = {
.unbind = cdn_dp_unbind,
};
-int cdn_dp_suspend(struct device *dev)
+static int cdn_dp_suspend(struct device *dev)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
int ret = 0;
@@ -1123,7 +1120,7 @@ int cdn_dp_suspend(struct device *dev)
return ret;
}
-int cdn_dp_resume(struct device *dev)
+static int cdn_dp_resume(struct device *dev)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 7361c07cb4a7..9d2163ef4d6e 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -601,7 +601,7 @@ static int cdn_dp_get_msa_misc(struct video_info *video,
case YCBCR_4_2_0:
val[0] = 5;
break;
- };
+ }
switch (video->color_depth) {
case 6:
@@ -619,7 +619,7 @@ static int cdn_dp_get_msa_misc(struct video_info *video,
case 16:
val[1] = 4;
break;
- };
+ }
msa_misc = 2 * val[0] + 32 * val[1] +
((video->color_fmt == Y_ONLY) ? (1 << 14) : 0);
@@ -700,7 +700,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
case 16:
val = BCS_16;
break;
- };
+ }
val += video->color_fmt << 8;
ret = cdn_dp_reg_write(dp, DP_FRAMER_PXL_REPR, val);
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 6e1270e45f97..3feff0c45b3f 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -21,6 +21,7 @@
#include <drm/bridge/dw_mipi_dsi.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -789,10 +790,6 @@ dw_mipi_dsi_encoder_helper_funcs = {
.disable = dw_mipi_dsi_encoder_disable,
};
-static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
struct drm_device *drm_dev)
{
@@ -802,8 +799,7 @@ static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
dsi->dev->of_node);
- ret = drm_encoder_init(drm_dev, encoder, &dw_mipi_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("Failed to initialize encoder with drm\n");
return ret;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 7f56d8c3491d..121aa8a63a76 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -14,6 +14,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -237,10 +238,6 @@ dw_hdmi_rockchip_mode_valid(struct drm_connector *connector,
return (valid) ? MODE_OK : MODE_BAD;
}
-static const struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder)
{
}
@@ -546,8 +543,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
}
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
platform_set_drvdata(pdev, hdmi);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index e5864e823020..7afdc54eb3ec 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -19,6 +19,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -532,10 +533,6 @@ static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
.atomic_check = inno_hdmi_encoder_atomic_check,
};
-static struct drm_encoder_funcs inno_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static enum drm_connector_status
inno_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
@@ -617,8 +614,7 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
return -EPROBE_DEFER;
drm_encoder_helper_add(encoder, &inno_hdmi_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &inno_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index fe203d38664e..1c546c3a8998 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -6,6 +6,7 @@
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
@@ -451,10 +452,6 @@ struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
.atomic_check = rk3066_hdmi_encoder_atomic_check,
};
-static const struct drm_encoder_funcs rk3066_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static enum drm_connector_status
rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
@@ -557,8 +554,7 @@ rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
return -EPROBE_DEFER;
drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &rk3066_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 20ecb1508a22..0f3eb392fe39 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -135,14 +135,16 @@ static int rockchip_drm_bind(struct device *dev)
if (ret)
goto err_free;
- drm_mode_config_init(drm_dev);
+ ret = drmm_mode_config_init(drm_dev);
+ if (ret)
+ goto err_iommu_cleanup;
rockchip_drm_mode_config_init(drm_dev);
/* Try to bind all sub drivers. */
ret = component_bind_all(dev, drm_dev);
if (ret)
- goto err_mode_config_cleanup;
+ goto err_iommu_cleanup;
ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc);
if (ret)
@@ -173,12 +175,9 @@ err_kms_helper_poll_fini:
rockchip_drm_fbdev_fini(drm_dev);
err_unbind_all:
component_unbind_all(dev, drm_dev);
-err_mode_config_cleanup:
- drm_mode_config_cleanup(drm_dev);
+err_iommu_cleanup:
rockchip_iommu_cleanup(drm_dev);
err_free:
- drm_dev->dev_private = NULL;
- dev_set_drvdata(dev, NULL);
drm_dev_put(drm_dev);
return ret;
}
@@ -194,11 +193,8 @@ static void rockchip_drm_unbind(struct device *dev)
drm_atomic_helper_shutdown(drm_dev);
component_unbind_all(dev, drm_dev);
- drm_mode_config_cleanup(drm_dev);
rockchip_iommu_cleanup(drm_dev);
- drm_dev->dev_private = NULL;
- dev_set_drvdata(dev, NULL);
drm_dev_put(drm_dev);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index c5b06048124e..e33c2dcd0d4b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -30,6 +30,7 @@ struct rockchip_crtc_state {
int output_mode;
int output_bpc;
int output_flags;
+ bool enable_afbc;
};
#define to_rockchip_crtc_state(s) \
container_of(s, struct rockchip_crtc_state, base)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 221e72e71432..9b13c784b347 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -57,8 +57,49 @@ static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers =
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
+static struct drm_framebuffer *
+rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_afbc_framebuffer *afbc_fb;
+ const struct drm_format_info *info;
+ int ret;
+
+ info = drm_get_format_info(dev, mode_cmd);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL);
+ if (!afbc_fb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd,
+ &rockchip_drm_fb_funcs);
+ if (ret) {
+ kfree(afbc_fb);
+ return ERR_PTR(ret);
+ }
+
+ if (drm_is_afbc(mode_cmd->modifier[0])) {
+ int ret, i;
+
+ ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
+ if (ret) {
+ struct drm_gem_object **obj = afbc_fb->base.obj;
+
+ for (i = 0; i < info->num_planes; ++i)
+ drm_gem_object_put_unlocked(obj[i]);
+
+ kfree(afbc_fb);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return &afbc_fb->base;
+}
+
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
+ .fb_create = rockchip_fb_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index cecb2cc781f5..33463b79a37b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -91,9 +91,22 @@
#define VOP_WIN_TO_INDEX(vop_win) \
((vop_win) - (vop_win)->vop->win)
+#define VOP_AFBC_SET(vop, name, v) \
+ do { \
+ if ((vop)->data->afbc) \
+ vop_reg_set((vop), &(vop)->data->afbc->name, \
+ 0, ~0, v, #name); \
+ } while (0)
+
#define to_vop(x) container_of(x, struct vop, crtc)
#define to_vop_win(x) container_of(x, struct vop_win, base)
+#define AFBC_FMT_RGB565 0x0
+#define AFBC_FMT_U8U8U8U8 0x5
+#define AFBC_FMT_U8U8U8 0x4
+
+#define AFBC_TILE_16x16 BIT(4)
+
/*
* The coefficients of the following matrix are all fixed points.
* The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets.
@@ -274,6 +287,29 @@ static enum vop_data_format vop_convert_format(uint32_t format)
}
}
+static int vop_convert_afbc_format(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return AFBC_FMT_U8U8U8U8;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ return AFBC_FMT_U8U8U8;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ return AFBC_FMT_RGB565;
+ /* either of the below should not be reachable */
+ default:
+ DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format);
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
uint32_t dst, bool is_horizontal,
int vsu_mode, int *vskiplines)
@@ -598,6 +634,17 @@ static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
vop_win_disable(vop, vop_win);
}
}
+
+ if (vop->data->afbc) {
+ struct rockchip_crtc_state *s;
+ /*
+ * Disable AFBC and forget there was a vop window with AFBC
+ */
+ VOP_AFBC_SET(vop, enable, 0);
+ s = to_rockchip_crtc_state(crtc->state);
+ s->enable_afbc = false;
+ }
+
spin_unlock(&vop->reg_lock);
vop_cfg_done(vop);
@@ -710,6 +757,26 @@ static void vop_plane_destroy(struct drm_plane *plane)
drm_plane_cleanup(plane);
}
+static inline bool rockchip_afbc(u64 modifier)
+{
+ return modifier == ROCKCHIP_AFBC_MOD;
+}
+
+static bool rockchip_mod_supported(struct drm_plane *plane,
+ u32 format, u64 modifier)
+{
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ if (!rockchip_afbc(modifier)) {
+ DRM_DEBUG_KMS("Unsupported format modifier 0x%llx\n", modifier);
+
+ return false;
+ }
+
+ return vop_convert_afbc_format(format) >= 0;
+}
+
static int vop_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -758,6 +825,30 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+ if (rockchip_afbc(fb->modifier)) {
+ struct vop *vop = to_vop(crtc);
+
+ if (!vop->data->afbc) {
+ DRM_ERROR("vop does not support AFBC\n");
+ return -EINVAL;
+ }
+
+ ret = vop_convert_afbc_format(fb->format->format);
+ if (ret < 0)
+ return ret;
+
+ if (state->src.x1 || state->src.y1) {
+ DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", state->src.x1, state->src.y1, fb->offsets[0]);
+ return -EINVAL;
+ }
+
+ if (state->rotation && state->rotation != DRM_MODE_ROTATE_0) {
+ DRM_ERROR("No rotation support in AFBC, rotation=%d\n",
+ state->rotation);
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -846,6 +937,16 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
spin_lock(&vop->reg_lock);
+ if (rockchip_afbc(fb->modifier)) {
+ int afbc_format = vop_convert_afbc_format(fb->format->format);
+
+ VOP_AFBC_SET(vop, format, afbc_format | AFBC_TILE_16x16);
+ VOP_AFBC_SET(vop, hreg_block_split, 0);
+ VOP_AFBC_SET(vop, win_sel, VOP_WIN_TO_INDEX(vop_win));
+ VOP_AFBC_SET(vop, hdr_ptr, dma_addr);
+ VOP_AFBC_SET(vop, pic_size, act_info);
+ }
+
VOP_WIN_SET(vop, win, format, format);
VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
@@ -1001,6 +1102,7 @@ static const struct drm_plane_funcs vop_plane_funcs = {
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .format_mod_supported = rockchip_mod_supported,
};
static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
@@ -1310,6 +1412,10 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
struct vop *vop = to_vop(crtc);
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct rockchip_crtc_state *s;
+ int afbc_planes = 0;
if (vop->lut_regs && crtc_state->color_mgmt_changed &&
crtc_state->gamma_lut) {
@@ -1323,6 +1429,27 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc,
}
}
+ drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
+ plane_state =
+ drm_atomic_get_plane_state(crtc_state->state, plane);
+ if (IS_ERR(plane_state)) {
+ DRM_DEBUG_KMS("Cannot get plane state for plane %s\n",
+ plane->name);
+ return PTR_ERR(plane_state);
+ }
+
+ if (drm_is_afbc(plane_state->fb->modifier))
+ ++afbc_planes;
+ }
+
+ if (afbc_planes > 1) {
+ DRM_DEBUG_KMS("Invalid number of AFBC planes; got %d, expected at most 1\n", afbc_planes);
+ return -EINVAL;
+ }
+
+ s = to_rockchip_crtc_state(crtc_state);
+ s->enable_afbc = afbc_planes > 0;
+
return 0;
}
@@ -1333,6 +1460,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_plane_state *old_plane_state, *new_plane_state;
struct vop *vop = to_vop(crtc);
struct drm_plane *plane;
+ struct rockchip_crtc_state *s;
int i;
if (WARN_ON(!vop->is_enabled))
@@ -1340,6 +1468,9 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock(&vop->reg_lock);
+ /* Enable AFBC if there is some AFBC window, disable otherwise. */
+ s = to_rockchip_crtc_state(crtc->state);
+ VOP_AFBC_SET(vop, enable, s->enable_afbc);
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
@@ -1634,7 +1765,8 @@ static int vop_create_crtc(struct vop *vop)
0, &vop_plane_funcs,
win_data->phy->data_formats,
win_data->phy->nformats,
- NULL, win_data->type, NULL);
+ win_data->phy->format_modifiers,
+ win_data->type, NULL);
if (ret) {
DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
ret);
@@ -1678,7 +1810,8 @@ static int vop_create_crtc(struct vop *vop)
&vop_plane_funcs,
win_data->phy->data_formats,
win_data->phy->nformats,
- NULL, win_data->type, NULL);
+ win_data->phy->format_modifiers,
+ win_data->type, NULL);
if (ret) {
DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index cc672620d6e0..d03bdb531ef2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -17,6 +17,11 @@
#define NUM_YUV2YUV_COEFFICIENTS 12
+#define ROCKCHIP_AFBC_MOD \
+ DRM_FORMAT_MOD_ARM_AFBC( \
+ AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
+ )
+
enum vop_data_format {
VOP_FMT_ARGB8888 = 0,
VOP_FMT_RGB888,
@@ -34,6 +39,16 @@ struct vop_reg {
bool relaxed;
};
+struct vop_afbc {
+ struct vop_reg enable;
+ struct vop_reg win_sel;
+ struct vop_reg format;
+ struct vop_reg hreg_block_split;
+ struct vop_reg pic_size;
+ struct vop_reg hdr_ptr;
+ struct vop_reg rstn;
+};
+
struct vop_modeset {
struct vop_reg htotal_pw;
struct vop_reg hact_st_end;
@@ -134,6 +149,7 @@ struct vop_win_phy {
const struct vop_scl_regs *scl;
const uint32_t *data_formats;
uint32_t nformats;
+ const uint64_t *format_modifiers;
struct vop_reg enable;
struct vop_reg gate;
@@ -173,6 +189,7 @@ struct vop_data {
const struct vop_misc *misc;
const struct vop_modeset *modeset;
const struct vop_output *output;
+ const struct vop_afbc *afbc;
const struct vop_win_yuv2yuv_data *win_yuv2yuv;
const struct vop_win_data *win;
unsigned int win_size;
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 449a62908d21..63f967902c2d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -16,13 +16,14 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -435,10 +436,6 @@ struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
.atomic_check = rockchip_lvds_encoder_atomic_check,
};
-static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int rk3288_lvds_probe(struct platform_device *pdev,
struct rockchip_lvds *lvds)
{
@@ -607,8 +604,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
dev->of_node);
- ret = drm_encoder_init(drm_dev, encoder, &rockchip_lvds_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to initialize encoder: %d\n", ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 90784781e515..9a771af5d0c9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -14,6 +14,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -67,10 +68,6 @@ struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = {
.atomic_check = rockchip_rgb_encoder_atomic_check,
};
-static const struct drm_encoder_funcs rockchip_rgb_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
struct drm_device *drm_dev)
@@ -126,8 +123,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
encoder = &rgb->encoder;
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = drm_encoder_init(drm_dev, encoder, &rockchip_rgb_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to initialize encoder: %d\n", ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 7a9d979c8d5d..2413deded22c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -50,6 +50,17 @@ static const uint32_t formats_win_full[] = {
DRM_FORMAT_NV24,
};
+static const uint64_t format_modifiers_win_full[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static const uint64_t format_modifiers_win_full_afbc[] = {
+ ROCKCHIP_AFBC_MOD,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
static const uint32_t formats_win_lite[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
@@ -61,6 +72,11 @@ static const uint32_t formats_win_lite[] = {
DRM_FORMAT_BGR565,
};
+static const uint64_t format_modifiers_win_lite[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
static const struct vop_scl_regs rk3036_win_scl = {
.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
@@ -72,6 +88,7 @@ static const struct vop_win_phy rk3036_win0_data = {
.scl = &rk3036_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
@@ -87,6 +104,7 @@ static const struct vop_win_phy rk3036_win0_data = {
static const struct vop_win_phy rk3036_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
@@ -153,6 +171,7 @@ static const struct vop_data rk3036_vop = {
static const struct vop_win_phy rk3126_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
@@ -234,6 +253,7 @@ static const struct vop_win_phy px30_win0_data = {
.scl = &px30_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(PX30_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(PX30_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(PX30_WIN0_CTRL0, 0x1, 12),
@@ -249,6 +269,7 @@ static const struct vop_win_phy px30_win0_data = {
static const struct vop_win_phy px30_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(PX30_WIN1_CTRL0, 0x1, 0),
.format = VOP_REG(PX30_WIN1_CTRL0, 0x7, 4),
.rb_swap = VOP_REG(PX30_WIN1_CTRL0, 0x1, 12),
@@ -261,6 +282,7 @@ static const struct vop_win_phy px30_win1_data = {
static const struct vop_win_phy px30_win2_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.gate = VOP_REG(PX30_WIN2_CTRL0, 0x1, 4),
.enable = VOP_REG(PX30_WIN2_CTRL0, 0x1, 0),
.format = VOP_REG(PX30_WIN2_CTRL0, 0x3, 5),
@@ -316,6 +338,7 @@ static const struct vop_win_phy rk3066_win0_data = {
.scl = &rk3066_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4),
.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19),
@@ -332,6 +355,7 @@ static const struct vop_win_phy rk3066_win1_data = {
.scl = &rk3066_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7),
.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23),
@@ -347,6 +371,7 @@ static const struct vop_win_phy rk3066_win1_data = {
static const struct vop_win_phy rk3066_win2_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10),
.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27),
@@ -426,6 +451,7 @@ static const struct vop_win_phy rk3188_win0_data = {
.scl = &rk3188_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 0),
.format = VOP_REG(RK3188_SYS_CTRL, 0x7, 3),
.rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 15),
@@ -440,6 +466,7 @@ static const struct vop_win_phy rk3188_win0_data = {
static const struct vop_win_phy rk3188_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 1),
.format = VOP_REG(RK3188_SYS_CTRL, 0x7, 6),
.rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 19),
@@ -545,6 +572,7 @@ static const struct vop_win_phy rk3288_win01_data = {
.scl = &rk3288_win_full_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
@@ -563,6 +591,7 @@ static const struct vop_win_phy rk3288_win01_data = {
static const struct vop_win_phy rk3288_win23_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 4),
.gate = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN2_CTRL0, 0x7, 1),
@@ -677,6 +706,7 @@ static const struct vop_win_phy rk3368_win01_data = {
.scl = &rk3288_win_full_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3368_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 12),
@@ -697,6 +727,7 @@ static const struct vop_win_phy rk3368_win01_data = {
static const struct vop_win_phy rk3368_win23_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.gate = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 0),
.enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4),
.format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5),
@@ -817,6 +848,53 @@ static const struct vop_win_yuv2yuv_data rk3399_vop_big_win_yuv2yuv_data[] = {
.y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9) },
{ .base = 0xC0, .phy = &rk3399_yuv2yuv_win23_data },
{ .base = 0x120, .phy = &rk3399_yuv2yuv_win23_data },
+
+};
+
+static const struct vop_win_phy rk3399_win01_data = {
+ .scl = &rk3288_win_full_scl,
+ .data_formats = formats_win_full,
+ .nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full_afbc,
+ .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
+ .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
+ .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
+ .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+/*
+ * rk3399 vop big windows register layout is same as rk3288, but we
+ * have a separate rk3399 win data array here so that we can advertise
+ * AFBC on the primary plane.
+ */
+static const struct vop_win_data rk3399_vop_win_data[] = {
+ { .base = 0x00, .phy = &rk3399_win01_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x40, .phy = &rk3288_win01_data,
+ .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x00, .phy = &rk3288_win23_data,
+ .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x50, .phy = &rk3288_win23_data,
+ .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const struct vop_afbc rk3399_vop_afbc = {
+ .rstn = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 3),
+ .enable = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 0),
+ .win_sel = VOP_REG(RK3399_AFBCD0_CTRL, 0x3, 1),
+ .format = VOP_REG(RK3399_AFBCD0_CTRL, 0x1f, 16),
+ .hreg_block_split = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 21),
+ .hdr_ptr = VOP_REG(RK3399_AFBCD0_HDR_PTR, 0xffffffff, 0),
+ .pic_size = VOP_REG(RK3399_AFBCD0_PIC_SIZE, 0xffffffff, 0),
};
static const struct vop_data rk3399_vop_big = {
@@ -826,9 +904,10 @@ static const struct vop_data rk3399_vop_big = {
.common = &rk3288_common,
.modeset = &rk3288_modeset,
.output = &rk3399_output,
+ .afbc = &rk3399_vop_afbc,
.misc = &rk3368_misc,
- .win = rk3368_vop_win_data,
- .win_size = ARRAY_SIZE(rk3368_vop_win_data),
+ .win = rk3399_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3399_vop_win_data),
.win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data,
};
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 75a752d59ef1..03556dbfcafb 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -17,6 +17,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include "shmob_drm_backlight.h"
@@ -558,15 +559,6 @@ static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
.mode_set = shmob_drm_encoder_mode_set,
};
-static void shmob_drm_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = shmob_drm_encoder_destroy,
-};
-
int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
{
struct drm_encoder *encoder = &sdev->encoder.encoder;
@@ -576,8 +568,8 @@ int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
encoder->possible_crtcs = 1;
- ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ ret = drm_simple_encoder_init(sdev->ddev, encoder,
+ DRM_MODE_ENCODER_LVDS);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index b8c0930959c7..ae9d6b8d3ca8 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -192,7 +192,6 @@ static int shmob_drm_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
drm_kms_helper_poll_fini(ddev);
- drm_mode_config_cleanup(ddev);
drm_irq_uninstall(ddev);
drm_dev_put(ddev);
@@ -288,7 +287,6 @@ err_irq_uninstall:
drm_irq_uninstall(ddev);
err_modeset_cleanup:
drm_kms_helper_poll_fini(ddev);
- drm_mode_config_cleanup(ddev);
err_free_drm_dev:
drm_dev_put(ddev);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index c51197b6fd85..7a866d6ce6bb 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -126,7 +126,11 @@ static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
{
- drm_mode_config_init(sdev->ddev);
+ int ret;
+
+ ret = drmm_mode_config_init(sdev->ddev);
+ if (ret)
+ return ret;
shmob_drm_crtc_create(sdev);
shmob_drm_encoder_create(sdev);
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index c7652584255d..319962a2c17b 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -42,8 +42,8 @@ static const struct sti_compositor_data stih407_compositor_data = {
},
};
-int sti_compositor_debugfs_init(struct sti_compositor *compo,
- struct drm_minor *minor)
+void sti_compositor_debugfs_init(struct sti_compositor *compo,
+ struct drm_minor *minor)
{
unsigned int i;
@@ -54,8 +54,6 @@ int sti_compositor_debugfs_init(struct sti_compositor *compo,
for (i = 0; i < STI_MAX_MIXER; i++)
if (compo->mixer[i])
sti_mixer_debugfs_init(compo->mixer[i], minor);
-
- return 0;
}
static int sti_compositor_bind(struct device *dev,
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index ac4bb3834810..25bb01bdd013 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -79,7 +79,7 @@ struct sti_compositor {
struct notifier_block vtg_vblank_nb[STI_MAX_MIXER];
};
-int sti_compositor_debugfs_init(struct sti_compositor *compo,
- struct drm_minor *minor);
+void sti_compositor_debugfs_init(struct sti_compositor *compo,
+ struct drm_minor *minor);
#endif
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 49e6cb8f5836..6f37c104c46f 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -319,7 +319,7 @@ static int sti_crtc_late_register(struct drm_crtc *crtc)
struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
if (drm_crtc_index(crtc) == 0)
- return sti_compositor_debugfs_init(compo, crtc->dev->primary);
+ sti_compositor_debugfs_init(compo, crtc->dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index ea64c1dcaf63..a98057431023 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -131,17 +131,17 @@ static struct drm_info_list cursor_debugfs_files[] = {
{ "cursor", cursor_dbg_show, 0, NULL },
};
-static int cursor_debugfs_init(struct sti_cursor *cursor,
- struct drm_minor *minor)
+static void cursor_debugfs_init(struct sti_cursor *cursor,
+ struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++)
cursor_debugfs_files[i].data = cursor;
- return drm_debugfs_create_files(cursor_debugfs_files,
- ARRAY_SIZE(cursor_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(cursor_debugfs_files,
+ ARRAY_SIZE(cursor_debugfs_files),
+ minor->debugfs_root, minor);
}
static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
@@ -342,7 +342,9 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane)
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_cursor *cursor = to_sti_cursor(plane);
- return cursor_debugfs_init(cursor, drm_plane->dev->primary);
+ cursor_debugfs_init(cursor, drm_plane->dev->primary);
+
+ return 0;
}
static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 50870d8cbb76..3f9db3e3f397 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -92,24 +92,16 @@ static struct drm_info_list sti_drm_dbg_list[] = {
{"fps_get", sti_drm_fps_dbg_show, 0},
};
-static int sti_drm_dbg_init(struct drm_minor *minor)
+static void sti_drm_dbg_init(struct drm_minor *minor)
{
- int ret;
-
- ret = drm_debugfs_create_files(sti_drm_dbg_list,
- ARRAY_SIZE(sti_drm_dbg_list),
- minor->debugfs_root, minor);
- if (ret)
- goto err;
+ drm_debugfs_create_files(sti_drm_dbg_list,
+ ARRAY_SIZE(sti_drm_dbg_list),
+ minor->debugfs_root, minor);
debugfs_create_file("fps_show", S_IRUGO | S_IWUSR, minor->debugfs_root,
minor->dev, &sti_drm_fps_fops);
DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
- return 0;
-err:
- DRM_ERROR("%s: cannot install debugfs\n", DRIVER_NAME);
- return ret;
}
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 3d04bfca21a0..de4af7735c46 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -196,16 +196,16 @@ static struct drm_info_list dvo_debugfs_files[] = {
{ "dvo", dvo_dbg_show, 0, NULL },
};
-static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
+static void dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++)
dvo_debugfs_files[i].data = dvo;
- return drm_debugfs_create_files(dvo_debugfs_files,
- ARRAY_SIZE(dvo_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(dvo_debugfs_files,
+ ARRAY_SIZE(dvo_debugfs_files),
+ minor->debugfs_root, minor);
}
static void sti_dvo_disable(struct drm_bridge *bridge)
@@ -405,10 +405,7 @@ static int sti_dvo_late_register(struct drm_connector *connector)
= to_sti_dvo_connector(connector);
struct sti_dvo *dvo = dvo_connector->dvo;
- if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) {
- DRM_ERROR("DVO debugfs setup failed\n");
- return -EINVAL;
- }
+ dvo_debugfs_init(dvo, dvo->drm_dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 11595c748844..2d5a2b5b78b8 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -343,9 +343,10 @@ static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
for (i = 0; i < nb_files; i++)
gdp_debugfs_files[i].data = gdp;
- return drm_debugfs_create_files(gdp_debugfs_files,
- nb_files,
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(gdp_debugfs_files,
+ nb_files,
+ minor->debugfs_root, minor);
+ return 0;
}
static int sti_gdp_fourcc2format(int fourcc)
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index f3f28d79b0e4..a1ec891eaf3a 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -367,16 +367,16 @@ static struct drm_info_list hda_debugfs_files[] = {
{ "hda", hda_dbg_show, 0, NULL },
};
-static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
+static void hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++)
hda_debugfs_files[i].data = hda;
- return drm_debugfs_create_files(hda_debugfs_files,
- ARRAY_SIZE(hda_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(hda_debugfs_files,
+ ARRAY_SIZE(hda_debugfs_files),
+ minor->debugfs_root, minor);
}
/**
@@ -643,10 +643,7 @@ static int sti_hda_late_register(struct drm_connector *connector)
= to_sti_hda_connector(connector);
struct sti_hda *hda = hda_connector->hda;
- if (hda_debugfs_init(hda, hda->drm_dev->primary)) {
- DRM_ERROR("HDA debugfs setup failed\n");
- return -EINVAL;
- }
+ hda_debugfs_init(hda, hda->drm_dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 18eaf786ffa4..5b15c4974e6b 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -727,16 +727,16 @@ static struct drm_info_list hdmi_debugfs_files[] = {
{ "hdmi", hdmi_dbg_show, 0, NULL },
};
-static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
+static void hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++)
hdmi_debugfs_files[i].data = hdmi;
- return drm_debugfs_create_files(hdmi_debugfs_files,
- ARRAY_SIZE(hdmi_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(hdmi_debugfs_files,
+ ARRAY_SIZE(hdmi_debugfs_files),
+ minor->debugfs_root, minor);
}
static void sti_hdmi_disable(struct drm_bridge *bridge)
@@ -1113,10 +1113,7 @@ static int sti_hdmi_late_register(struct drm_connector *connector)
= to_sti_hdmi_connector(connector);
struct sti_hdmi *hdmi = hdmi_connector->hdmi;
- if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) {
- DRM_ERROR("HDMI debugfs setup failed\n");
- return -EINVAL;
- }
+ hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 1015abe0ce08..5a4e12194a77 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -639,16 +639,16 @@ static struct drm_info_list hqvdp_debugfs_files[] = {
{ "hqvdp", hqvdp_dbg_show, 0, NULL },
};
-static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
+static void hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
hqvdp_debugfs_files[i].data = hqvdp;
- return drm_debugfs_create_files(hqvdp_debugfs_files,
- ARRAY_SIZE(hqvdp_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(hqvdp_debugfs_files,
+ ARRAY_SIZE(hqvdp_debugfs_files),
+ minor->debugfs_root, minor);
}
/**
@@ -1274,7 +1274,9 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
- return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+ hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+
+ return 0;
}
static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index c3a3e1e5fc8a..7e5f14646625 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -178,7 +178,7 @@ static struct drm_info_list mixer1_debugfs_files[] = {
{ "mixer_aux", mixer_dbg_show, 0, NULL },
};
-int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
{
unsigned int i;
struct drm_info_list *mixer_debugfs_files;
@@ -194,15 +194,15 @@ int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
nb_files = ARRAY_SIZE(mixer1_debugfs_files);
break;
default:
- return -EINVAL;
+ return;
}
for (i = 0; i < nb_files; i++)
mixer_debugfs_files[i].data = mixer;
- return drm_debugfs_create_files(mixer_debugfs_files,
- nb_files,
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(mixer_debugfs_files,
+ nb_files,
+ minor->debugfs_root, minor);
}
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index d9544246913a..ab06beb7b258 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -58,7 +58,7 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
-int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
+void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
/* depth in Cross-bar control = z order */
#define GAM_MIXER_NB_DEPTH_LEVEL 6
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index c36a8da373cb..df3817f0fd30 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -570,16 +570,16 @@ static struct drm_info_list tvout_debugfs_files[] = {
{ "tvout", tvout_dbg_show, 0, NULL },
};
-static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
+static void tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++)
tvout_debugfs_files[i].data = tvout;
- return drm_debugfs_create_files(tvout_debugfs_files,
- ARRAY_SIZE(tvout_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(tvout_debugfs_files,
+ ARRAY_SIZE(tvout_debugfs_files),
+ minor->debugfs_root, minor);
}
static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
@@ -603,14 +603,11 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
static int sti_tvout_late_register(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- int ret;
if (tvout->debugfs_registered)
return 0;
- ret = tvout_debugfs_init(tvout, encoder->dev->primary);
- if (ret)
- return ret;
+ tvout_debugfs_init(tvout, encoder->dev->primary);
tvout->debugfs_registered = true;
return 0;
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 2d4230410464..2d818397918d 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -124,16 +124,16 @@ static struct drm_info_list vid_debugfs_files[] = {
{ "vid", vid_dbg_show, 0, NULL },
};
-int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
+void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++)
vid_debugfs_files[i].data = vid;
- return drm_debugfs_create_files(vid_debugfs_files,
- ARRAY_SIZE(vid_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(vid_debugfs_files,
+ ARRAY_SIZE(vid_debugfs_files),
+ minor->debugfs_root, minor);
}
void sti_vid_commit(struct sti_vid *vid,
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 9dbd78461de1..991849ba50b5 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -26,6 +26,6 @@ void sti_vid_disable(struct sti_vid *vid);
struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
int id, void __iomem *baseaddr);
-int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
+void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
#endif
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index ea9fcbdc68b3..0f85dd86cafa 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -88,7 +88,9 @@ static int drv_load(struct drm_device *ddev)
ddev->dev_private = (void *)ldev;
- drm_mode_config_init(ddev);
+ ret = drmm_mode_config_init(ddev);
+ if (ret)
+ return ret;
/*
* set max width and height as default value.
@@ -103,7 +105,7 @@ static int drv_load(struct drm_device *ddev)
ret = ltdc_load(ddev);
if (ret)
- goto err;
+ return ret;
drm_mode_config_reset(ddev);
drm_kms_helper_poll_init(ddev);
@@ -111,9 +113,6 @@ static int drv_load(struct drm_device *ddev)
platform_set_drvdata(pdev, ddev);
return 0;
-err:
- drm_mode_config_cleanup(ddev);
- return ret;
}
static void drv_unload(struct drm_device *ddev)
@@ -122,7 +121,6 @@ static void drv_unload(struct drm_device *ddev)
drm_kms_helper_poll_fini(ddev);
ltdc_unload(ddev);
- drm_mode_config_cleanup(ddev);
}
static __maybe_unused int drv_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index df585fe64f61..f894968d6e45 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -42,8 +42,6 @@
#define MAX_IRQ 4
-#define MAX_ENDPOINTS 2
-
#define HWVER_10200 0x010200
#define HWVER_10300 0x010300
#define HWVER_20101 0x020101
@@ -1201,36 +1199,20 @@ int ltdc_load(struct drm_device *ddev)
struct ltdc_device *ldev = ddev->dev_private;
struct device *dev = ddev->dev;
struct device_node *np = dev->of_node;
- struct drm_bridge *bridge[MAX_ENDPOINTS] = {NULL};
- struct drm_panel *panel[MAX_ENDPOINTS] = {NULL};
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
struct drm_crtc *crtc;
struct reset_control *rstc;
struct resource *res;
- int irq, ret, i, endpoint_not_ready = -ENODEV;
+ int irq, i, nb_endpoints;
+ int ret = -ENODEV;
DRM_DEBUG_DRIVER("\n");
- /* Get endpoints if any */
- for (i = 0; i < MAX_ENDPOINTS; i++) {
- ret = drm_of_find_panel_or_bridge(np, 0, i, &panel[i],
- &bridge[i]);
-
- /*
- * If at least one endpoint is -EPROBE_DEFER, defer probing,
- * else if at least one endpoint is ready, continue probing.
- */
- if (ret == -EPROBE_DEFER)
- return ret;
- else if (!ret)
- endpoint_not_ready = 0;
- }
-
- if (endpoint_not_ready)
- return endpoint_not_ready;
-
- rstc = devm_reset_control_get_exclusive(dev, NULL);
-
- mutex_init(&ldev->err_lock);
+ /* Get number of endpoints */
+ nb_endpoints = of_graph_get_endpoint_count(np);
+ if (!nb_endpoints)
+ return -ENODEV;
ldev->pixel_clk = devm_clk_get(dev, "lcd");
if (IS_ERR(ldev->pixel_clk)) {
@@ -1244,6 +1226,43 @@ int ltdc_load(struct drm_device *ddev)
return -ENODEV;
}
+ /* Get endpoints if any */
+ for (i = 0; i < nb_endpoints; i++) {
+ ret = drm_of_find_panel_or_bridge(np, 0, i, &panel, &bridge);
+
+ /*
+ * If at least one endpoint is -ENODEV, continue probing,
+ * else if at least one endpoint returned an error
+ * (ie -EPROBE_DEFER) then stop probing.
+ */
+ if (ret == -ENODEV)
+ continue;
+ else if (ret)
+ goto err;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge)) {
+ DRM_ERROR("panel-bridge endpoint %d\n", i);
+ ret = PTR_ERR(bridge);
+ goto err;
+ }
+ }
+
+ if (bridge) {
+ ret = ltdc_encoder_init(ddev, bridge);
+ if (ret) {
+ DRM_ERROR("init encoder endpoint %d\n", i);
+ goto err;
+ }
+ }
+ }
+
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
+
+ mutex_init(&ldev->err_lock);
+
if (!IS_ERR(rstc)) {
reset_control_assert(rstc);
usleep_range(10, 20);
@@ -1285,27 +1304,7 @@ int ltdc_load(struct drm_device *ddev)
DRM_ERROR("Failed to register LTDC interrupt\n");
goto err;
}
- }
- /* Add endpoints panels or bridges if any */
- for (i = 0; i < MAX_ENDPOINTS; i++) {
- if (panel[i]) {
- bridge[i] = drm_panel_bridge_add_typed(panel[i],
- DRM_MODE_CONNECTOR_DPI);
- if (IS_ERR(bridge[i])) {
- DRM_ERROR("panel-bridge endpoint %d\n", i);
- ret = PTR_ERR(bridge[i]);
- goto err;
- }
- }
-
- if (bridge[i]) {
- ret = ltdc_encoder_init(ddev, bridge[i]);
- if (ret) {
- DRM_ERROR("init encoder endpoint %d\n", i);
- goto err;
- }
- }
}
crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
@@ -1340,8 +1339,8 @@ int ltdc_load(struct drm_device *ddev)
return 0;
err:
- for (i = 0; i < MAX_ENDPOINTS; i++)
- drm_panel_bridge_remove(bridge[i]);
+ for (i = 0; i < nb_endpoints; i++)
+ drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
clk_disable_unprepare(ldev->pixel_clk);
@@ -1350,11 +1349,14 @@ err:
void ltdc_unload(struct drm_device *ddev)
{
- int i;
+ struct device *dev = ddev->dev;
+ int nb_endpoints, i;
DRM_DEBUG_DRIVER("\n");
- for (i = 0; i < MAX_ENDPOINTS; i++)
+ nb_endpoints = of_graph_get_endpoint_count(dev->of_node);
+
+ for (i = 0; i < nb_endpoints; i++)
drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
pm_runtime_disable(ddev->dev);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index 7ad3f06c127e..00ca35f07ba5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -148,7 +148,7 @@
#define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3
#define SUN4I_HDMI_DDC_CLK_REG 0x528
-#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3)
+#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0xf) << 3)
#define SUN4I_HDMI_DDC_CLK_N(n) ((n) & 0x7)
#define SUN4I_HDMI_DDC_LINE_CTRL_REG 0x540
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
index 2ff780114106..12430b9d4e93 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
@@ -33,7 +33,7 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
unsigned long best_rate = 0;
u8 best_m = 0, best_n = 0, _m, _n;
- for (_m = 0; _m < 8; _m++) {
+ for (_m = 0; _m < 16; _m++) {
for (_n = 0; _n < 8; _n++) {
unsigned long tmp_rate;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 68d4644ac2dc..ce07ddc3e058 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -22,6 +22,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
@@ -204,10 +205,6 @@ static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
.mode_valid = sun4i_hdmi_mode_valid,
};
-static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int sun4i_hdmi_get_modes(struct drm_connector *connector)
{
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
@@ -282,7 +279,7 @@ static const struct drm_connector_funcs sun4i_hdmi_connector_funcs = {
};
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
-static bool sun4i_hdmi_cec_pin_read(struct cec_adapter *adap)
+static int sun4i_hdmi_cec_pin_read(struct cec_adapter *adap)
{
struct sun4i_hdmi *hdmi = cec_get_drvdata(adap);
@@ -611,11 +608,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
drm_encoder_helper_add(&hdmi->encoder,
&sun4i_hdmi_helper_funcs);
- ret = drm_encoder_init(drm,
- &hdmi->encoder,
- &sun4i_hdmi_funcs,
- DRM_MODE_ENCODER_TMDS,
- NULL);
+ ret = drm_simple_encoder_init(drm, &hdmi->encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret) {
dev_err(dev, "Couldn't initialise the HDMI encoder\n");
goto err_put_ddc_i2c;
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 26e5c7ceb8ff..ffda3184aa12 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -12,6 +12,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
@@ -96,10 +97,6 @@ static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
.enable = sun4i_lvds_encoder_enable,
};
-static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
struct drm_encoder *encoder;
@@ -121,11 +118,8 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_encoder_helper_add(&lvds->encoder,
&sun4i_lvds_enc_helper_funcs);
- ret = drm_encoder_init(drm,
- &lvds->encoder,
- &sun4i_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS,
- NULL);
+ ret = drm_simple_encoder_init(drm, &lvds->encoder,
+ DRM_MODE_ENCODER_LVDS);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the lvds encoder\n");
goto err_out;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 3b23d5be3cf3..5a7d43939ae6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -14,6 +14,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
@@ -188,15 +189,6 @@ static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
.mode_valid = sun4i_rgb_mode_valid,
};
-static void sun4i_rgb_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
- .destroy = sun4i_rgb_enc_destroy,
-};
-
int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
struct drm_encoder *encoder;
@@ -218,11 +210,8 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_encoder_helper_add(&rgb->encoder,
&sun4i_rgb_enc_helper_funcs);
- ret = drm_encoder_init(drm,
- &rgb->encoder,
- &sun4i_rgb_enc_funcs,
- DRM_MODE_ENCODER_NONE,
- NULL);
+ ret = drm_simple_encoder_init(drm, &rgb->encoder,
+ DRM_MODE_ENCODER_NONE);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the rgb encoder\n");
goto err_out;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 624437b27cdc..359b56e43b83 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -812,10 +812,8 @@ static int sun4i_tcon_init_irq(struct device *dev,
int irq, ret;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Couldn't retrieve the TCON interrupt\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, sun4i_tcon_handler, 0,
dev_name(dev), tcon);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 39c15282e448..63f4428ac3bf 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -19,6 +19,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
@@ -473,15 +474,6 @@ static struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
.mode_set = sun4i_tv_mode_set,
};
-static void sun4i_tv_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static struct drm_encoder_funcs sun4i_tv_funcs = {
- .destroy = sun4i_tv_destroy,
-};
-
static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
{
int i;
@@ -592,11 +584,8 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
drm_encoder_helper_add(&tv->encoder,
&sun4i_tv_helper_funcs);
- ret = drm_encoder_init(drm,
- &tv->encoder,
- &sun4i_tv_funcs,
- DRM_MODE_ENCODER_TVDAC,
- NULL);
+ ret = drm_simple_encoder_init(drm, &tv->encoder,
+ DRM_MODE_ENCODER_TVDAC);
if (ret) {
dev_err(dev, "Couldn't initialise the TV encoder\n");
goto err_disable_clk;
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 3eb89f1eb0e1..aa67cb037e9d 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -24,6 +24,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
@@ -846,10 +847,6 @@ static const struct drm_encoder_helper_funcs sun6i_dsi_enc_helper_funcs = {
.enable = sun6i_dsi_encoder_enable,
};
-static const struct drm_encoder_funcs sun6i_dsi_enc_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
const struct mipi_dsi_msg *msg)
{
@@ -1062,11 +1059,8 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
drm_encoder_helper_add(&dsi->encoder,
&sun6i_dsi_enc_helper_funcs);
- ret = drm_encoder_init(drm,
- &dsi->encoder,
- &sun6i_dsi_enc_funcs,
- DRM_MODE_ENCODER_DSI,
- NULL);
+ ret = drm_simple_encoder_init(drm, &dsi->encoder,
+ DRM_MODE_ENCODER_DSI);
if (ret) {
dev_err(dsi->dev, "Couldn't initialise the DSI encoder\n");
return ret;
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index e8a317d5ba19..972682bb8000 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -10,6 +10,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun8i_dw_hdmi.h"
#include "sun8i_tcon_top.h"
@@ -29,10 +30,6 @@ sun8i_dw_hdmi_encoder_helper_funcs = {
.mode_set = sun8i_dw_hdmi_encoder_mode_set,
};
-static const struct drm_encoder_funcs sun8i_dw_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static enum drm_mode_status
sun8i_dw_hdmi_mode_valid_a83t(struct drm_connector *connector,
const struct drm_display_mode *mode)
@@ -220,8 +217,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
}
drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &sun8i_dw_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
sun8i_hdmi_phy_init(hdmi->phy);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 4a64f7ae437a..56cc037fd312 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -27,314 +27,225 @@
#include "sun8i_vi_layer.h"
#include "sunxi_engine.h"
+struct de2_fmt_info {
+ u32 drm_fmt;
+ u32 de2_fmt;
+};
+
static const struct de2_fmt_info de2_formats[] = {
{
.drm_fmt = DRM_FORMAT_ARGB8888,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR8888,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA8888,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA8888,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_XRGB8888,
.de2_fmt = SUN8I_MIXER_FBFMT_XRGB8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_XBGR8888,
.de2_fmt = SUN8I_MIXER_FBFMT_XBGR8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBX8888,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBX8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRX8888,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRX8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGB888,
.de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGR888,
.de2_fmt = SUN8I_MIXER_FBFMT_BGR888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGB565,
.de2_fmt = SUN8I_MIXER_FBFMT_RGB565,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGR565,
.de2_fmt = SUN8I_MIXER_FBFMT_BGR565,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XRGB4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XBGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_RGBX4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_BGRX4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XRGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XBGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_RGBX5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_BGRX5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB2101010,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR2101010,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA1010102,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA1010102,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_UYVY,
.de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_VYUY,
.de2_fmt = SUN8I_MIXER_FBFMT_VYUY,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUYV,
.de2_fmt = SUN8I_MIXER_FBFMT_YUYV,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVYU,
.de2_fmt = SUN8I_MIXER_FBFMT_YVYU,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_NV16,
.de2_fmt = SUN8I_MIXER_FBFMT_NV16,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_NV61,
.de2_fmt = SUN8I_MIXER_FBFMT_NV61,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_NV12,
.de2_fmt = SUN8I_MIXER_FBFMT_NV12,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_NV21,
.de2_fmt = SUN8I_MIXER_FBFMT_NV21,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV420,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV411,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU420,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU411,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_P010,
.de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_P210,
.de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
};
-const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
+int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(de2_formats); ++i)
- if (de2_formats[i].drm_fmt == format)
- return &de2_formats[i];
+ if (de2_formats[i].drm_fmt == format) {
+ *hw_format = de2_formats[i].de2_fmt;
+ return 0;
+ }
- return NULL;
+ return -EINVAL;
}
static void sun8i_mixer_commit(struct sunxi_engine *engine)
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 345b28b0a80a..7576b523fdbb 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -10,7 +10,6 @@
#include <linux/regmap.h>
#include <linux/reset.h>
-#include "sun8i_csc.h"
#include "sunxi_engine.h"
#define SUN8I_MIXER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
@@ -144,13 +143,6 @@
#define SUN50I_MIXER_CDC0_EN 0xd0000
#define SUN50I_MIXER_CDC1_EN 0xd8000
-struct de2_fmt_info {
- u32 drm_fmt;
- u32 de2_fmt;
- bool rgb;
- enum sun8i_csc_mode csc;
-};
-
/**
* struct sun8i_mixer_cfg - mixer HW configuration
* @vi_num: number of VI channels
@@ -210,5 +202,5 @@ sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
return DE2_CH_BASE + channel * DE2_CH_SIZE;
}
-const struct de2_fmt_info *sun8i_mixer_format_info(u32 format);
+int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format);
#endif /* _SUN8I_MIXER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index c87fd842918e..54f937a7d5e7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -19,8 +19,8 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include "sun8i_ui_layer.h"
#include "sun8i_mixer.h"
+#include "sun8i_ui_layer.h"
#include "sun8i_ui_scaler.h"
static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
@@ -174,18 +174,20 @@ static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
- const struct de2_fmt_info *fmt_info;
- u32 val, ch_base;
+ const struct drm_format_info *fmt;
+ u32 val, ch_base, hw_fmt;
+ int ret;
ch_base = sun8i_channel_base(mixer, channel);
- fmt_info = sun8i_mixer_format_info(state->fb->format->format);
- if (!fmt_info || !fmt_info->rgb) {
+ fmt = state->fb->format;
+ ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+ if (ret || fmt->is_yuv) {
DRM_DEBUG_DRIVER("Invalid format\n");
return -EINVAL;
}
- val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
+ val = hw_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index b8398ca18b0f..22c8c5375d0d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -12,8 +12,9 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include "sun8i_vi_layer.h"
+#include "sun8i_csc.h"
#include "sun8i_mixer.h"
+#include "sun8i_vi_layer.h"
#include "sun8i_vi_scaler.h"
static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
@@ -210,28 +211,47 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
return 0;
}
+static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
+{
+ if (!format->is_yuv)
+ return SUN8I_CSC_MODE_OFF;
+
+ switch (format->format) {
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YVU444:
+ return SUN8I_CSC_MODE_YVU2RGB;
+ default:
+ return SUN8I_CSC_MODE_YUV2RGB;
+ }
+}
+
static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
- const struct de2_fmt_info *fmt_info;
- u32 val, ch_base;
+ u32 val, ch_base, csc_mode, hw_fmt;
+ const struct drm_format_info *fmt;
+ int ret;
ch_base = sun8i_channel_base(mixer, channel);
- fmt_info = sun8i_mixer_format_info(state->fb->format->format);
- if (!fmt_info) {
+ fmt = state->fb->format;
+ ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+ if (ret) {
DRM_DEBUG_DRIVER("Invalid format\n");
- return -EINVAL;
+ return ret;
}
- val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
+ val = hw_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
- if (fmt_info->csc != SUN8I_CSC_MODE_OFF) {
- sun8i_csc_set_ccsc_coefficients(mixer, channel, fmt_info->csc,
+ csc_mode = sun8i_vi_layer_get_csc_mode(fmt);
+ if (csc_mode != SUN8I_CSC_MODE_OFF) {
+ sun8i_csc_set_ccsc_coefficients(mixer, channel, csc_mode,
state->color_encoding,
state->color_range);
sun8i_csc_enable_ccsc(mixer, channel, true);
@@ -239,7 +259,7 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
sun8i_csc_enable_ccsc(mixer, channel, false);
}
- if (fmt_info->rgb)
+ if (!fmt->is_yuv)
val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE;
else
val = 0;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 1a7b08f35776..83f31c6e891c 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1496,7 +1496,6 @@ static int tegra_dc_late_register(struct drm_crtc *crtc)
struct drm_minor *minor = crtc->dev->primary;
struct dentry *root;
struct tegra_dc *dc = to_tegra_dc(crtc);
- int err;
#ifdef CONFIG_DEBUG_FS
root = crtc->debugfs_entry;
@@ -1512,17 +1511,9 @@ static int tegra_dc_late_register(struct drm_crtc *crtc)
for (i = 0; i < count; i++)
dc->debugfs_files[i].data = dc;
- err = drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
- if (err < 0)
- goto free;
+ drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
return 0;
-
-free:
- kfree(dc->debugfs_files);
- dc->debugfs_files = NULL;
-
- return err;
}
static void tegra_dc_early_unregister(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 7dfb50f65067..105fb9cdbb3b 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -5,12 +5,10 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 583cd6e0ae27..211906347f3f 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -839,11 +839,11 @@ static struct drm_info_list tegra_debugfs_list[] = {
{ "iova", tegra_debugfs_iova, 0 },
};
-static int tegra_debugfs_init(struct drm_minor *minor)
+static void tegra_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(tegra_debugfs_list,
- ARRAY_SIZE(tegra_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index ed99b67deb29..b25443255be6 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -9,7 +9,7 @@
#include <linux/host1x.h>
#include <linux/iova.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <drm/drm_atomic.h>
#include <drm/drm_edid.h>
@@ -152,8 +152,6 @@ enum drm_connector_status
tegra_output_connector_detect(struct drm_connector *connector, bool force);
void tegra_output_connector_destroy(struct drm_connector *connector);
-void tegra_output_encoder_destroy(struct drm_encoder *encoder);
-
/* from dpaux.c */
struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 88b9d64c77bf..38beab9ab4f8 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -22,6 +22,7 @@
#include <drm/drm_file.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
#include "dc.h"
#include "drm.h"
@@ -234,7 +235,6 @@ static int tegra_dsi_late_register(struct drm_connector *connector)
struct drm_minor *minor = connector->dev->primary;
struct dentry *root = connector->debugfs_entry;
struct tegra_dsi *dsi = to_dsi(output);
- int err;
dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
@@ -244,17 +244,9 @@ static int tegra_dsi_late_register(struct drm_connector *connector)
for (i = 0; i < count; i++)
dsi->debugfs_files[i].data = dsi;
- err = drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
- if (err < 0)
- goto free;
+ drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
return 0;
-
-free:
- kfree(dsi->debugfs_files);
- dsi->debugfs_files = NULL;
-
- return err;
}
static void tegra_dsi_early_unregister(struct drm_connector *connector)
@@ -824,10 +816,6 @@ static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs
.mode_valid = tegra_dsi_connector_mode_valid,
};
-static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
- .destroy = tegra_output_encoder_destroy,
-};
-
static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
{
int err;
@@ -1058,9 +1046,8 @@ static int tegra_dsi_init(struct host1x_client *client)
&tegra_dsi_connector_helper_funcs);
dsi->output.connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, &dsi->output.encoder,
- &tegra_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ drm_simple_encoder_init(drm, &dsi->output.encoder,
+ DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(&dsi->output.encoder,
&tegra_dsi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index b8a328f53862..2b0666ac681b 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -4,7 +4,7 @@
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*
* Based on the KMS/FB CMA helpers
- * Copyright (C) 2012 Analog Device Inc.
+ * Copyright (C) 2012 Analog Devices Inc.
*/
#include <linux/console.h>
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 38252c0f068d..d09a24931c87 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/hdmi.h>
#include <linux/math64.h>
#include <linux/module.h>
@@ -22,6 +21,7 @@
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "hda.h"
#include "hdmi.h"
@@ -1064,7 +1064,6 @@ static int tegra_hdmi_late_register(struct drm_connector *connector)
struct drm_minor *minor = connector->dev->primary;
struct dentry *root = connector->debugfs_entry;
struct tegra_hdmi *hdmi = to_hdmi(output);
- int err;
hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
@@ -1074,17 +1073,9 @@ static int tegra_hdmi_late_register(struct drm_connector *connector)
for (i = 0; i < count; i++)
hdmi->debugfs_files[i].data = hdmi;
- err = drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
- if (err < 0)
- goto free;
+ drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
return 0;
-
-free:
- kfree(hdmi->debugfs_files);
- hdmi->debugfs_files = NULL;
-
- return err;
}
static void tegra_hdmi_early_unregister(struct drm_connector *connector)
@@ -1136,10 +1127,6 @@ tegra_hdmi_connector_helper_funcs = {
.mode_valid = tegra_hdmi_connector_mode_valid,
};
-static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
- .destroy = tegra_output_encoder_destroy,
-};
-
static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
@@ -1445,8 +1432,8 @@ static int tegra_hdmi_init(struct host1x_client *client)
&tegra_hdmi_connector_helper_funcs);
hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, &hdmi->output.encoder,
+ DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&hdmi->output.encoder,
&tegra_hdmi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index a264259b97a2..e36e5e7c2f69 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
#include "drm.h"
#include "dc.h"
@@ -79,11 +80,6 @@ void tegra_output_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-void tegra_output_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
static irqreturn_t hpd_irq(int irq, void *data)
{
struct tegra_output *output = data;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 4be4dfd4a68a..0562a7eb793f 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
#include "drm.h"
#include "dc.h"
@@ -110,10 +111,6 @@ static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs
.mode_valid = tegra_rgb_connector_mode_valid,
};
-static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
- .destroy = tegra_output_encoder_destroy,
-};
-
static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
@@ -281,8 +278,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
&tegra_rgb_connector_helper_funcs);
output->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS);
drm_encoder_helper_add(&output->encoder,
&tegra_rgb_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 81226a4953c1..7cbcf9617f5e 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -6,7 +6,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/debugfs.h>
-#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -23,6 +22,7 @@
#include <drm/drm_file.h>
#include <drm/drm_panel.h>
#include <drm/drm_scdc_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "dc.h"
#include "dp.h"
@@ -1687,7 +1687,6 @@ static int tegra_sor_late_register(struct drm_connector *connector)
struct drm_minor *minor = connector->dev->primary;
struct dentry *root = connector->debugfs_entry;
struct tegra_sor *sor = to_sor(output);
- int err;
sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
@@ -1697,17 +1696,9 @@ static int tegra_sor_late_register(struct drm_connector *connector)
for (i = 0; i < count; i++)
sor->debugfs_files[i].data = sor;
- err = drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
- if (err < 0)
- goto free;
+ drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
return 0;
-
-free:
- kfree(sor->debugfs_files);
- sor->debugfs_files = NULL;
-
- return err;
}
static void tegra_sor_early_unregister(struct drm_connector *connector)
@@ -1805,10 +1796,6 @@ static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs
.mode_valid = tegra_sor_connector_mode_valid,
};
-static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
- .destroy = tegra_output_encoder_destroy,
-};
-
static int
tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -3102,8 +3089,7 @@ static int tegra_sor_init(struct host1x_client *client)
&tegra_sor_connector_helper_funcs);
sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
- encoder, NULL);
+ drm_simple_encoder_init(drm, &sor->output.encoder, encoder);
drm_encoder_helper_add(&sor->output.encoder, helpers);
drm_connector_attach_encoder(&sor->output.connector,
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 3221a707e073..89a226912de8 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -24,7 +24,7 @@
static void tidss_crtc_finish_page_flip(struct tidss_crtc *tcrtc)
{
struct drm_device *ddev = tcrtc->crtc.dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct drm_pending_vblank_event *event;
unsigned long flags;
bool busy;
@@ -88,7 +88,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct dispc_device *dispc = tidss->dispc;
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
u32 hw_videoport = tcrtc->hw_videoport;
@@ -165,7 +165,7 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
dev_dbg(ddev->dev,
@@ -216,7 +216,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
unsigned long flags;
int r;
@@ -259,7 +259,7 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
@@ -295,7 +295,7 @@ enum drm_mode_status tidss_crtc_mode_valid(struct drm_crtc *crtc,
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
return dispc_vp_mode_valid(tidss->dispc, tcrtc->hw_videoport, mode);
}
@@ -314,7 +314,7 @@ static const struct drm_crtc_helper_funcs tidss_crtc_helper_funcs = {
static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -328,7 +328,7 @@ static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
static void tidss_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
dev_dbg(ddev->dev, "%s\n", __func__);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 29f42768e294..629dd06393f6 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -181,10 +181,6 @@ const struct dispc_features dispc_am65x_feats = {
.vid_name = { "vid", "vidl1" },
.vid_lite = { false, true, },
.vid_order = { 1, 0 },
-
- .errata = {
- .i2000 = true,
- },
};
static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
@@ -2674,12 +2670,9 @@ int dispc_init(struct tidss_device *tidss)
return -ENOMEM;
num_fourccs = 0;
- for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
- if (feat->errata.i2000 &&
- dispc_fourcc_is_yuv(dispc_color_formats[i].fourcc))
- continue;
+ for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i)
dispc->fourccs[num_fourccs++] = dispc_color_formats[i].fourcc;
- }
+
dispc->num_fourccs = num_fourccs;
dispc->tidss = tidss;
dispc->dev = dev;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index a4a68249e44b..902e612ff7ac 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -46,10 +46,6 @@ struct dispc_features_scaling {
u32 xinc_max;
};
-struct dispc_errata {
- bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
-};
-
enum dispc_vp_bus_type {
DISPC_VP_DPI, /* DPI output */
DISPC_VP_OLDI, /* OLDI (LVDS) output */
@@ -83,8 +79,6 @@ struct dispc_features {
const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
bool vid_lite[TIDSS_MAX_PLANES];
u32 vid_order[TIDSS_MAX_PLANES];
-
- struct dispc_errata errata;
};
extern const struct dispc_features dispc_k2g_feats;
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index d95e4be2c7b9..99edc66ebdef 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "tidss_dispc.h"
@@ -102,15 +103,7 @@ static const struct dev_pm_ops tidss_pm_ops = {
static void tidss_release(struct drm_device *ddev)
{
- struct tidss_device *tidss = ddev->dev_private;
-
drm_kms_helper_poll_fini(ddev);
-
- tidss_modeset_cleanup(tidss);
-
- drm_dev_fini(ddev);
-
- kfree(tidss);
}
DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
@@ -142,26 +135,18 @@ static int tidss_probe(struct platform_device *pdev)
dev_dbg(dev, "%s\n", __func__);
- /* Can't use devm_* since drm_device's lifetime may exceed dev's */
- tidss = kzalloc(sizeof(*tidss), GFP_KERNEL);
- if (!tidss)
- return -ENOMEM;
+ tidss = devm_drm_dev_alloc(&pdev->dev, &tidss_driver,
+ struct tidss_device, ddev);
+ if (IS_ERR(tidss))
+ return PTR_ERR(tidss);
ddev = &tidss->ddev;
- ret = devm_drm_dev_init(&pdev->dev, ddev, &tidss_driver);
- if (ret) {
- kfree(ddev);
- return ret;
- }
-
tidss->dev = dev;
tidss->feat = of_device_get_match_data(dev);
platform_set_drvdata(pdev, tidss);
- ddev->dev_private = tidss;
-
ret = dispc_init(tidss);
if (ret) {
dev_err(dev, "failed to initialize dispc: %d\n", ret);
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index e2aa6436ad18..3b0a3d87b7c4 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -29,10 +29,10 @@ struct tidss_device {
spinlock_t wait_lock; /* protects the irq masks */
dispc_irq_t irq_mask; /* enabled irqs in addition to wait_list */
-
- struct drm_atomic_state *saved_state;
};
+#define to_tidss(__dev) container_of(__dev, struct tidss_device, ddev)
+
int tidss_runtime_get(struct tidss_device *tidss);
void tidss_runtime_put(struct tidss_device *tidss);
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
index 612c046738e5..1b80f2d62e0a 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.c
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -23,7 +23,7 @@ static void tidss_irq_update(struct tidss_device *tidss)
void tidss_irq_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
u32 hw_videoport = tcrtc->hw_videoport;
unsigned long flags;
@@ -38,7 +38,7 @@ void tidss_irq_enable_vblank(struct drm_crtc *crtc)
void tidss_irq_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
u32 hw_videoport = tcrtc->hw_videoport;
unsigned long flags;
@@ -53,7 +53,7 @@ void tidss_irq_disable_vblank(struct drm_crtc *crtc)
irqreturn_t tidss_irq_handler(int irq, void *arg)
{
struct drm_device *ddev = (struct drm_device *)arg;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
unsigned int id;
dispc_irq_t irqstatus;
@@ -95,7 +95,7 @@ void tidss_irq_resume(struct tidss_device *tidss)
void tidss_irq_preinstall(struct drm_device *ddev)
{
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
spin_lock_init(&tidss->wait_lock);
@@ -109,7 +109,7 @@ void tidss_irq_preinstall(struct drm_device *ddev)
int tidss_irq_postinstall(struct drm_device *ddev)
{
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
unsigned int i;
@@ -138,7 +138,7 @@ int tidss_irq_postinstall(struct drm_device *ddev)
void tidss_irq_uninstall(struct drm_device *ddev)
{
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
tidss_runtime_get(tidss);
dispc_set_irqenable(tidss->dispc, 0);
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index 7d419960b030..4b99e9fa84a5 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -25,7 +25,7 @@
static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *ddev = old_state->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -258,7 +258,9 @@ int tidss_modeset_init(struct tidss_device *tidss)
dev_dbg(tidss->dev, "%s\n", __func__);
- drm_mode_config_init(ddev);
+ ret = drmm_mode_config_init(ddev);
+ if (ret)
+ return ret;
ddev->mode_config.min_width = 8;
ddev->mode_config.min_height = 8;
@@ -270,11 +272,11 @@ int tidss_modeset_init(struct tidss_device *tidss)
ret = tidss_dispc_modeset_init(tidss);
if (ret)
- goto err_mode_config_cleanup;
+ return ret;
ret = drm_vblank_init(ddev, tidss->num_crtcs);
if (ret)
- goto err_mode_config_cleanup;
+ return ret;
/* Start with vertical blanking interrupt reporting disabled. */
for (i = 0; i < tidss->num_crtcs; ++i)
@@ -285,15 +287,4 @@ int tidss_modeset_init(struct tidss_device *tidss)
dev_dbg(tidss->dev, "%s done\n", __func__);
return 0;
-
-err_mode_config_cleanup:
- drm_mode_config_cleanup(ddev);
- return ret;
-}
-
-void tidss_modeset_cleanup(struct tidss_device *tidss)
-{
- struct drm_device *ddev = &tidss->ddev;
-
- drm_mode_config_cleanup(ddev);
}
diff --git a/drivers/gpu/drm/tidss/tidss_kms.h b/drivers/gpu/drm/tidss/tidss_kms.h
index dda5625d0128..99aaff099f22 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.h
+++ b/drivers/gpu/drm/tidss/tidss_kms.h
@@ -10,6 +10,5 @@
struct tidss_device;
int tidss_modeset_init(struct tidss_device *tidss);
-void tidss_modeset_cleanup(struct tidss_device *tidss);
#endif
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 798488948fc5..0a563eabcbb9 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -22,7 +22,7 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_device *ddev = plane->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
const struct drm_format_info *finfo;
struct drm_crtc_state *crtc_state;
@@ -101,7 +101,7 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_device *ddev = plane->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
struct drm_plane_state *state = plane->state;
u32 hw_videoport;
@@ -133,7 +133,7 @@ static void tidss_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_device *ddev = plane->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
dev_dbg(ddev->dev, "%s\n", __func__);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 0791a0200cc3..a5e9ee4c7fbf 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -390,10 +390,9 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
ret = drm_dev_register(ddev, 0);
if (ret)
goto init_failed;
+ priv->is_registered = true;
drm_fbdev_generic_setup(ddev, bpp);
-
- priv->is_registered = true;
return 0;
init_failed:
@@ -478,26 +477,17 @@ static struct drm_info_list tilcdc_debugfs_list[] = {
{ "mm", tilcdc_mm_show, 0 },
};
-static int tilcdc_debugfs_init(struct drm_minor *minor)
+static void tilcdc_debugfs_init(struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
struct tilcdc_module *mod;
- int ret;
- ret = drm_debugfs_create_files(tilcdc_debugfs_list,
- ARRAY_SIZE(tilcdc_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(tilcdc_debugfs_list,
+ ARRAY_SIZE(tilcdc_debugfs_list),
+ minor->debugfs_root, minor);
list_for_each_entry(mod, &module_list, list)
if (mod->funcs->debugfs_init)
mod->funcs->debugfs_init(mod, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install tilcdc_debugfs_list\n");
- return ret;
- }
-
- return ret;
}
#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 28b7f703236e..b177525588c1 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_external.h"
@@ -83,10 +84,6 @@ int tilcdc_add_component_encoder(struct drm_device *ddev)
return 0;
}
-static const struct drm_encoder_funcs tilcdc_external_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static
int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
{
@@ -131,9 +128,8 @@ int tilcdc_attach_external_device(struct drm_device *ddev)
if (!priv->external_encoder)
return -ENOMEM;
- ret = drm_encoder_init(ddev, priv->external_encoder,
- &tilcdc_external_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(ddev, priv->external_encoder,
+ DRM_MODE_ENCODER_NONE);
if (ret) {
dev_err(ddev->dev, "drm_encoder_init() failed %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 5584e656b857..12823d60c4e8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -16,6 +16,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_panel.h"
@@ -74,10 +75,6 @@ static void panel_encoder_mode_set(struct drm_encoder *encoder,
/* nothing needed */
}
-static const struct drm_encoder_funcs panel_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
.dpms = panel_encoder_dpms,
.prepare = panel_encoder_prepare,
@@ -102,8 +99,7 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
encoder = &panel_encoder->base;
encoder->possible_crtcs = 1;
- ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret < 0)
goto fail;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 4160e74e4751..2b6414f0fa75 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -1,5 +1,24 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_CIRRUS_QEMU
+ tristate "Cirrus driver for QEMU emulated device"
+ depends on DRM && PCI && MMU
+ select DRM_KMS_HELPER
+ select DRM_GEM_SHMEM_HELPER
+ help
+ This is a KMS driver for emulated cirrus device in qemu.
+ It is *NOT* intended for real cirrus devices. This requires
+ the modesetting userspace X.org driver.
+
+ Cirrus is obsolete, the hardware was designed in the 90ies
+ and can't keep up with todays needs. More background:
+ https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
+
+ Better alternatives are:
+ - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
+ - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
+ - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
+
config DRM_GM12U320
tristate "GM12U320 driver for USB projectors"
depends on DRM && USB
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index c96ceee71453..6ae4e9e5a35f 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index d2ff63ce8eaf..744a8e337e41 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -35,6 +35,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -58,6 +59,8 @@ struct cirrus_device {
void __iomem *mmio;
};
+#define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev)
+
/* ------------------------------------------------------------------ */
/*
* The meat of this driver. The core passes us a mode and we have to program
@@ -310,7 +313,7 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
struct drm_rect *rect)
{
- struct cirrus_device *cirrus = fb->dev->dev_private;
+ struct cirrus_device *cirrus = to_cirrus(fb->dev);
void *vmap;
int idx, ret;
@@ -435,7 +438,7 @@ static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+ struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb);
cirrus_fb_blit_fullscreen(plane_state->fb);
@@ -444,7 +447,7 @@ static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
- struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+ struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
struct drm_plane_state *state = pipe->plane.state;
struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
@@ -509,11 +512,15 @@ static const struct drm_mode_config_funcs cirrus_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static void cirrus_mode_config_init(struct cirrus_device *cirrus)
+static int cirrus_mode_config_init(struct cirrus_device *cirrus)
{
struct drm_device *dev = &cirrus->dev;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
- drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.max_width = CIRRUS_MAX_PITCH / 2;
@@ -521,18 +528,12 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
dev->mode_config.preferred_depth = 16;
dev->mode_config.prefer_shadow = 0;
dev->mode_config.funcs = &cirrus_mode_config_funcs;
+
+ return 0;
}
/* ------------------------------------------------------------------ */
-static void cirrus_release(struct drm_device *dev)
-{
- struct cirrus_device *cirrus = dev->dev_private;
-
- drm_mode_config_cleanup(dev);
- kfree(cirrus);
-}
-
DEFINE_DRM_GEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
@@ -546,7 +547,6 @@ static struct drm_driver cirrus_driver = {
.fops = &cirrus_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .release = cirrus_release,
};
static int cirrus_pci_probe(struct pci_dev *pdev,
@@ -560,7 +560,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
@@ -569,36 +569,34 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
return ret;
ret = -ENOMEM;
- cirrus = kzalloc(sizeof(*cirrus), GFP_KERNEL);
- if (cirrus == NULL)
- goto err_pci_release;
+ cirrus = devm_drm_dev_alloc(&pdev->dev, &cirrus_driver,
+ struct cirrus_device, dev);
+ if (IS_ERR(cirrus))
+ return PTR_ERR(cirrus);
dev = &cirrus->dev;
- ret = drm_dev_init(dev, &cirrus_driver, &pdev->dev);
- if (ret)
- goto err_free_cirrus;
- dev->dev_private = cirrus;
- ret = -ENOMEM;
- cirrus->vram = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ cirrus->vram = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
if (cirrus->vram == NULL)
- goto err_dev_put;
+ return -ENOMEM;
- cirrus->mmio = ioremap(pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1));
+ cirrus->mmio = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
if (cirrus->mmio == NULL)
- goto err_unmap_vram;
+ return -ENOMEM;
- cirrus_mode_config_init(cirrus);
+ ret = cirrus_mode_config_init(cirrus);
+ if (ret)
+ return ret;
ret = cirrus_conn_init(cirrus);
if (ret < 0)
- goto err_cleanup;
+ return ret;
ret = cirrus_pipe_init(cirrus);
if (ret < 0)
- goto err_cleanup;
+ return ret;
drm_mode_config_reset(dev);
@@ -606,36 +604,18 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_cleanup;
+ return ret;
drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
return 0;
-
-err_cleanup:
- drm_mode_config_cleanup(dev);
- iounmap(cirrus->mmio);
-err_unmap_vram:
- iounmap(cirrus->vram);
-err_dev_put:
- drm_dev_put(dev);
-err_free_cirrus:
- kfree(cirrus);
-err_pci_release:
- pci_release_regions(pdev);
- return ret;
}
static void cirrus_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct cirrus_device *cirrus = dev->dev_private;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
- iounmap(cirrus->mmio);
- iounmap(cirrus->vram);
- drm_dev_put(dev);
- pci_release_regions(pdev);
}
static const struct pci_device_id pciidlist[] = {
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index a48173441ae0..cc397671f689 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -19,6 +19,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -87,18 +88,18 @@ struct gm12u320_device {
struct usb_device *udev;
unsigned char *cmd_buf;
unsigned char *data_buf[GM12U320_BLOCK_COUNT];
- bool pipe_enabled;
struct {
- bool run;
- struct workqueue_struct *workq;
- struct work_struct work;
- wait_queue_head_t waitq;
+ struct delayed_work work;
struct mutex lock;
struct drm_framebuffer *fb;
struct drm_rect rect;
+ int frame;
+ int draw_status_timeout;
} fb_update;
};
+#define to_gm12u320(__dev) container_of(__dev, struct gm12u320_device, dev)
+
static const char cmd_data[CMD_SIZE] = {
0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
0x68, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x10, 0xff,
@@ -159,7 +160,7 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
int i, block_size;
const char *hdr;
- gm12u320->cmd_buf = kmalloc(CMD_SIZE, GFP_KERNEL);
+ gm12u320->cmd_buf = drmm_kmalloc(&gm12u320->dev, CMD_SIZE, GFP_KERNEL);
if (!gm12u320->cmd_buf)
return -ENOMEM;
@@ -172,7 +173,8 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
hdr = data_block_header;
}
- gm12u320->data_buf[i] = kzalloc(block_size, GFP_KERNEL);
+ gm12u320->data_buf[i] = drmm_kzalloc(&gm12u320->dev,
+ block_size, GFP_KERNEL);
if (!gm12u320->data_buf[i])
return -ENOMEM;
@@ -182,26 +184,9 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
data_block_footer, DATA_BLOCK_FOOTER_SIZE);
}
- gm12u320->fb_update.workq = create_singlethread_workqueue(DRIVER_NAME);
- if (!gm12u320->fb_update.workq)
- return -ENOMEM;
-
return 0;
}
-static void gm12u320_usb_free(struct gm12u320_device *gm12u320)
-{
- int i;
-
- if (gm12u320->fb_update.workq)
- destroy_workqueue(gm12u320->fb_update.workq);
-
- for (i = 0; i < GM12U320_BLOCK_COUNT; i++)
- kfree(gm12u320->data_buf[i]);
-
- kfree(gm12u320->cmd_buf);
-}
-
static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
u8 req_a, u8 req_b,
u8 arg_a, u8 arg_b, u8 arg_c, u8 arg_d)
@@ -344,80 +329,77 @@ unlock:
static void gm12u320_fb_update_work(struct work_struct *work)
{
struct gm12u320_device *gm12u320 =
- container_of(work, struct gm12u320_device, fb_update.work);
- int draw_status_timeout = FIRST_FRAME_TIMEOUT;
+ container_of(to_delayed_work(work), struct gm12u320_device,
+ fb_update.work);
int block, block_size, len;
- int frame = 0;
int ret = 0;
- while (gm12u320->fb_update.run) {
- gm12u320_copy_fb_to_blocks(gm12u320);
-
- for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
- if (block == GM12U320_BLOCK_COUNT - 1)
- block_size = DATA_LAST_BLOCK_SIZE;
- else
- block_size = DATA_BLOCK_SIZE;
-
- /* Send data command to device */
- memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
- gm12u320->cmd_buf[8] = block_size & 0xff;
- gm12u320->cmd_buf[9] = block_size >> 8;
- gm12u320->cmd_buf[20] = 0xfc - block * 4;
- gm12u320->cmd_buf[21] = block | (frame << 7);
-
- ret = usb_bulk_msg(gm12u320->udev,
- usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
- gm12u320->cmd_buf, CMD_SIZE, &len,
- CMD_TIMEOUT);
- if (ret || len != CMD_SIZE)
- goto err;
-
- /* Send data block to device */
- ret = usb_bulk_msg(gm12u320->udev,
- usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
- gm12u320->data_buf[block], block_size,
- &len, DATA_TIMEOUT);
- if (ret || len != block_size)
- goto err;
-
- /* Read status */
- ret = usb_bulk_msg(gm12u320->udev,
- usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
- gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
- CMD_TIMEOUT);
- if (ret || len != READ_STATUS_SIZE)
- goto err;
- }
+ gm12u320_copy_fb_to_blocks(gm12u320);
+
+ for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
+ if (block == GM12U320_BLOCK_COUNT - 1)
+ block_size = DATA_LAST_BLOCK_SIZE;
+ else
+ block_size = DATA_BLOCK_SIZE;
+
+ /* Send data command to device */
+ memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
+ gm12u320->cmd_buf[8] = block_size & 0xff;
+ gm12u320->cmd_buf[9] = block_size >> 8;
+ gm12u320->cmd_buf[20] = 0xfc - block * 4;
+ gm12u320->cmd_buf[21] =
+ block | (gm12u320->fb_update.frame << 7);
- /* Send draw command to device */
- memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
ret = usb_bulk_msg(gm12u320->udev,
usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
- gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+ gm12u320->cmd_buf, CMD_SIZE, &len,
+ CMD_TIMEOUT);
if (ret || len != CMD_SIZE)
goto err;
+ /* Send data block to device */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+ gm12u320->data_buf[block], block_size,
+ &len, DATA_TIMEOUT);
+ if (ret || len != block_size)
+ goto err;
+
/* Read status */
ret = usb_bulk_msg(gm12u320->udev,
usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
- draw_status_timeout);
+ CMD_TIMEOUT);
if (ret || len != READ_STATUS_SIZE)
goto err;
-
- draw_status_timeout = CMD_TIMEOUT;
- frame = !frame;
-
- /*
- * We must draw a frame every 2s otherwise the projector
- * switches back to showing its logo.
- */
- wait_event_timeout(gm12u320->fb_update.waitq,
- !gm12u320->fb_update.run ||
- gm12u320->fb_update.fb != NULL,
- IDLE_TIMEOUT);
}
+
+ /* Send draw command to device */
+ memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+ gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+ if (ret || len != CMD_SIZE)
+ goto err;
+
+ /* Read status */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
+ gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
+ gm12u320->fb_update.draw_status_timeout);
+ if (ret || len != READ_STATUS_SIZE)
+ goto err;
+
+ gm12u320->fb_update.draw_status_timeout = CMD_TIMEOUT;
+ gm12u320->fb_update.frame = !gm12u320->fb_update.frame;
+
+ /*
+ * We must draw a frame every 2s otherwise the projector
+ * switches back to showing its logo.
+ */
+ queue_delayed_work(system_long_wq, &gm12u320->fb_update.work,
+ IDLE_TIMEOUT);
+
return;
err:
/* Do not log errors caused by module unload or device unplug */
@@ -428,7 +410,7 @@ err:
static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
struct drm_rect *dirty)
{
- struct gm12u320_device *gm12u320 = fb->dev->dev_private;
+ struct gm12u320_device *gm12u320 = to_gm12u320(fb->dev);
struct drm_framebuffer *old_fb = NULL;
bool wakeup = false;
@@ -452,36 +434,24 @@ static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
mutex_unlock(&gm12u320->fb_update.lock);
if (wakeup)
- wake_up(&gm12u320->fb_update.waitq);
+ mod_delayed_work(system_long_wq, &gm12u320->fb_update.work, 0);
if (old_fb)
drm_framebuffer_put(old_fb);
}
-static void gm12u320_start_fb_update(struct gm12u320_device *gm12u320)
-{
- mutex_lock(&gm12u320->fb_update.lock);
- gm12u320->fb_update.run = true;
- mutex_unlock(&gm12u320->fb_update.lock);
-
- queue_work(gm12u320->fb_update.workq, &gm12u320->fb_update.work);
-}
-
static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320)
{
- mutex_lock(&gm12u320->fb_update.lock);
- gm12u320->fb_update.run = false;
- mutex_unlock(&gm12u320->fb_update.lock);
+ struct drm_framebuffer *old_fb;
- wake_up(&gm12u320->fb_update.waitq);
- cancel_work_sync(&gm12u320->fb_update.work);
+ cancel_delayed_work_sync(&gm12u320->fb_update.work);
mutex_lock(&gm12u320->fb_update.lock);
- if (gm12u320->fb_update.fb) {
- drm_framebuffer_put(gm12u320->fb_update.fb);
- gm12u320->fb_update.fb = NULL;
- }
+ old_fb = gm12u320->fb_update.fb;
+ gm12u320->fb_update.fb = NULL;
mutex_unlock(&gm12u320->fb_update.lock);
+
+ drm_framebuffer_put(old_fb);
}
static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320)
@@ -589,20 +559,18 @@ static void gm12u320_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT };
+ struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
+ gm12u320->fb_update.draw_status_timeout = FIRST_FRAME_TIMEOUT;
gm12u320_fb_mark_dirty(plane_state->fb, &rect);
- gm12u320_start_fb_update(gm12u320);
- gm12u320->pipe_enabled = true;
}
static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
+ struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
gm12u320_stop_fb_update(gm12u320);
- gm12u320->pipe_enabled = false;
}
static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -630,16 +598,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
-static void gm12u320_driver_release(struct drm_device *dev)
-{
- struct gm12u320_device *gm12u320 = dev->dev_private;
-
- gm12u320_usb_free(gm12u320);
- drm_mode_config_cleanup(dev);
- drm_dev_fini(dev);
- kfree(gm12u320);
-}
-
DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static struct drm_driver gm12u320_drm_driver = {
@@ -651,7 +609,6 @@ static struct drm_driver gm12u320_drm_driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
- .release = gm12u320_driver_release,
.fops = &gm12u320_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
};
@@ -676,24 +633,21 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
if (interface->cur_altsetting->desc.bInterfaceNumber != 0)
return -ENODEV;
- gm12u320 = kzalloc(sizeof(*gm12u320), GFP_KERNEL);
- if (gm12u320 == NULL)
- return -ENOMEM;
+ gm12u320 = devm_drm_dev_alloc(&interface->dev, &gm12u320_drm_driver,
+ struct gm12u320_device, dev);
+ if (IS_ERR(gm12u320))
+ return PTR_ERR(gm12u320);
gm12u320->udev = interface_to_usbdev(interface);
- INIT_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
+ INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
mutex_init(&gm12u320->fb_update.lock);
- init_waitqueue_head(&gm12u320->fb_update.waitq);
dev = &gm12u320->dev;
- ret = drm_dev_init(dev, &gm12u320_drm_driver, &interface->dev);
- if (ret) {
- kfree(gm12u320);
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
return ret;
- }
- dev->dev_private = gm12u320;
- drm_mode_config_init(dev);
dev->mode_config.min_width = GM12U320_USER_WIDTH;
dev->mode_config.max_width = GM12U320_USER_WIDTH;
dev->mode_config.min_height = GM12U320_HEIGHT;
@@ -702,15 +656,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
ret = gm12u320_usb_alloc(gm12u320);
if (ret)
- goto err_put;
+ return ret;
ret = gm12u320_set_ecomode(gm12u320);
if (ret)
- goto err_put;
+ return ret;
ret = gm12u320_conn_init(gm12u320);
if (ret)
- goto err_put;
+ return ret;
ret = drm_simple_display_pipe_init(&gm12u320->dev,
&gm12u320->pipe,
@@ -720,56 +674,44 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
gm12u320_pipe_modifiers,
&gm12u320->conn);
if (ret)
- goto err_put;
+ return ret;
drm_mode_config_reset(dev);
usb_set_intfdata(interface, dev);
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_put;
+ return ret;
drm_fbdev_generic_setup(dev, 0);
return 0;
-
-err_put:
- drm_dev_put(dev);
- return ret;
}
static void gm12u320_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = dev->dev_private;
- gm12u320_stop_fb_update(gm12u320);
drm_dev_unplug(dev);
- drm_dev_put(dev);
+ drm_atomic_helper_shutdown(dev);
}
static __maybe_unused int gm12u320_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = dev->dev_private;
- if (gm12u320->pipe_enabled)
- gm12u320_stop_fb_update(gm12u320);
-
- return 0;
+ return drm_mode_config_helper_suspend(dev);
}
static __maybe_unused int gm12u320_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = dev->dev_private;
+ struct gm12u320_device *gm12u320 = to_gm12u320(dev);
gm12u320_set_ecomode(gm12u320);
- if (gm12u320->pipe_enabled)
- gm12u320_start_fb_update(gm12u320);
- return 0;
+ return drm_mode_config_helper_resume(dev);
}
static const struct usb_device_id id_table[] = {
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 9af8ff84974f..b4bc358a3269 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -21,6 +21,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
@@ -195,7 +196,6 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
static struct drm_driver hx8357d_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
@@ -226,18 +226,12 @@ static int hx8357d_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &hx8357d_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &hx8357d_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc)) {
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 802fb8dde1b6..d1a5ab6747d5 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -24,6 +24,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
@@ -345,7 +346,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
static struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9225_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "ili9225",
.desc = "Ilitek ILI9225",
@@ -376,19 +376,13 @@ static int ili9225_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &ili9225_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &ili9225_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 33b51dc7faa8..bb819f45a5d3 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -20,6 +20,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
@@ -151,7 +152,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
static struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
@@ -183,19 +183,13 @@ static int ili9341_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &ili9341_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &ili9341_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 532560aebb1e..2702ea557d29 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -19,6 +19,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -164,7 +165,6 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
static struct drm_driver ili9486_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9486_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9486",
@@ -197,19 +197,13 @@ static int ili9486_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &ili9486_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &ili9486_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index e2cfd9a17143..08ac549ab0f7 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -18,6 +18,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
@@ -155,7 +156,6 @@ DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
static struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
@@ -187,19 +187,13 @@ static int mi0283qt_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &mi0283qt_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index f5ebcaf7ee3a..1c0e7169545b 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -31,6 +31,7 @@
#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
#include <drm/drm_probe_helper.h>
@@ -908,17 +909,6 @@ static const struct drm_mode_config_funcs repaper_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static void repaper_release(struct drm_device *drm)
-{
- struct repaper_epd *epd = drm_to_epd(drm);
-
- DRM_DEBUG_DRIVER("\n");
-
- drm_mode_config_cleanup(drm);
- drm_dev_fini(drm);
- kfree(epd);
-}
-
static const uint32_t repaper_formats[] = {
DRM_FORMAT_XRGB8888,
};
@@ -956,7 +946,6 @@ DEFINE_DRM_GEM_CMA_FOPS(repaper_fops);
static struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &repaper_fops,
- .release = repaper_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
@@ -1013,19 +1002,16 @@ static int repaper_probe(struct spi_device *spi)
}
}
- epd = kzalloc(sizeof(*epd), GFP_KERNEL);
- if (!epd)
- return -ENOMEM;
+ epd = devm_drm_dev_alloc(dev, &repaper_driver,
+ struct repaper_epd, drm);
+ if (IS_ERR(epd))
+ return PTR_ERR(epd);
drm = &epd->drm;
- ret = devm_drm_dev_init(dev, drm, &repaper_driver);
- if (ret) {
- kfree(epd);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
return ret;
- }
-
- drm_mode_config_init(drm);
drm->mode_config.funcs = &repaper_mode_config_funcs;
epd->spi = spi;
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 9ef559dd3191..2a1fae422f7a 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -21,6 +21,7 @@
#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
@@ -284,7 +285,6 @@ DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
static struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7586_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
@@ -317,19 +317,13 @@ static int st7586_probe(struct spi_device *spi)
size_t bufsize;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &st7586_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &st7586_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 3cd9b8d9888d..0af1b15efdf8 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -21,6 +21,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#define ST7735R_FRMCTR1 0xb1
@@ -156,7 +157,6 @@ DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
static struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7735r_fops,
- .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
@@ -195,22 +195,16 @@ static int st7735r_probe(struct spi_device *spi)
if (!cfg)
cfg = (void *)spi_get_device_id(spi)->driver_data;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_dev_alloc(dev, &st7735r_driver,
+ struct st7735r_priv, dbidev.drm);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dbidev = &priv->dbidev;
priv->cfg = cfg;
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9e07c3f75156..f73b81c2576e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -588,7 +588,8 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_unlock(man);
}
- if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) {
+ if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+ !dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
bo->deleted = true;
@@ -621,6 +622,7 @@ static void ttm_bo_release(struct kref *kref)
spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
+ dma_resv_unlock(bo->base.resv);
BUG_ON(bo->mem.mm_node != NULL);
atomic_dec(&ttm_bo_glob.bo_count);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 52d2b71f1588..f09b096ba4fd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -257,54 +257,6 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
return 0;
}
-#ifdef CONFIG_X86
-#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
-#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
-#else
-#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
-#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
-#endif
-
-
-/**
- * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
- * specified page protection.
- *
- * @page: The page to map.
- * @prot: The page protection.
- *
- * This function maps a TTM page using the kmap_atomic api if available,
- * otherwise falls back to vmap. The user must make sure that the
- * specified page does not have an aliased mapping with a different caching
- * policy unless the architecture explicitly allows it. Also mapping and
- * unmapping using this api must be correctly nested. Unmapping should
- * occur in the reverse order of mapping.
- */
-void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
-{
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
- return kmap_atomic(page);
- else
- return __ttm_kmap_atomic_prot(page, prot);
-}
-EXPORT_SYMBOL(ttm_kmap_atomic_prot);
-
-/**
- * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
- * ttm_kmap_atomic_prot.
- *
- * @addr: The virtual address from the map.
- * @prot: The page protection.
- */
-void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
-{
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
- kunmap_atomic(addr);
- else
- __ttm_kunmap_atomic(addr);
-}
-EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
-
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
@@ -316,13 +268,13 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
- dst = ttm_kmap_atomic_prot(d, prot);
+ dst = kmap_atomic_prot(d, prot);
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
- ttm_kunmap_atomic_prot(dst, prot);
+ kunmap_atomic(dst);
return 0;
}
@@ -338,13 +290,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
- src = ttm_kmap_atomic_prot(s, prot);
+ src = kmap_atomic_prot(s, prot);
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
- ttm_kunmap_atomic_prot(src, prot);
+ kunmap_atomic(src);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 0ad30b112982..a43aa7275f12 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -58,7 +58,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_clear;
/*
- * If possible, avoid waiting for GPU with mmap_sem
+ * If possible, avoid waiting for GPU with mmap_lock
* held. We only do this if the fault allows retry and this
* is the first attempt.
*/
@@ -68,7 +68,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
ttm_bo_get(bo);
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mmap_read_unlock(vmf->vma->vm_mm);
(void) dma_fence_wait(bo->moving, true);
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
@@ -131,20 +131,20 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
{
/*
* Work around locking order reversal in fault / nopfn
- * between mmap_sem and bo_reserve: Perform a trylock operation
+ * between mmap_lock and bo_reserve: Perform a trylock operation
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
if (unlikely(!dma_resv_trylock(bo->base.resv))) {
/*
* If the fault allows retry and this is the first
- * fault attempt, we try to release the mmap_sem
+ * fault attempt, we try to release the mmap_lock
* before waiting
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mmap_read_unlock(vmf->vma->vm_mm);
if (!dma_resv_lock_interruptible(bo->base.resv,
NULL))
dma_resv_unlock(bo->base.resv);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 0afdfb0d1fe1..cdc1c42e1669 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -59,7 +59,7 @@ static int udl_get_modes(struct drm_connector *connector)
static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct udl_device *udl = connector->dev->dev_private;
+ struct udl_device *udl = to_udl(connector->dev);
if (!udl->sku_pixel_limit)
return 0;
@@ -72,7 +72,7 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
udl_detect(struct drm_connector *connector, bool force)
{
- struct udl_device *udl = connector->dev->dev_private;
+ struct udl_device *udl = to_udl(connector->dev);
struct udl_drm_connector *udl_connector =
container_of(connector,
struct udl_drm_connector,
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index e6c1cd77d4d4..d1aa50fd6d65 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -10,6 +10,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
@@ -33,17 +34,8 @@ static int udl_usb_resume(struct usb_interface *interface)
DEFINE_DRM_GEM_FOPS(udl_driver_fops);
-static void udl_driver_release(struct drm_device *dev)
-{
- udl_fini(dev);
- udl_modeset_cleanup(dev);
- drm_dev_fini(dev);
- kfree(dev);
-}
-
static struct drm_driver driver = {
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
- .release = udl_driver_release,
/* gem hooks */
.gem_create_object = udl_driver_gem_create_object,
@@ -65,27 +57,19 @@ static struct udl_device *udl_driver_create(struct usb_interface *interface)
struct udl_device *udl;
int r;
- udl = kzalloc(sizeof(*udl), GFP_KERNEL);
- if (!udl)
- return ERR_PTR(-ENOMEM);
-
- r = drm_dev_init(&udl->drm, &driver, &interface->dev);
- if (r) {
- kfree(udl);
- return ERR_PTR(r);
- }
+ udl = devm_drm_dev_alloc(&interface->dev, &driver,
+ struct udl_device, drm);
+ if (IS_ERR(udl))
+ return udl;
udl->udev = udev;
- udl->drm.dev_private = udl;
r = udl_init(udl);
- if (r) {
- drm_dev_fini(&udl->drm);
- kfree(udl);
+ if (r)
return ERR_PTR(r);
- }
usb_set_intfdata(interface, udl);
+
return udl;
}
@@ -101,31 +85,22 @@ static int udl_usb_probe(struct usb_interface *interface,
r = drm_dev_register(&udl->drm, 0);
if (r)
- goto err_free;
+ return r;
DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
- r = drm_fbdev_generic_setup(&udl->drm, 0);
- if (r)
- goto err_drm_dev_unregister;
+ drm_fbdev_generic_setup(&udl->drm, 0);
return 0;
-
-err_drm_dev_unregister:
- drm_dev_unregister(&udl->drm);
-err_free:
- drm_dev_put(&udl->drm);
- return r;
}
static void udl_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- drm_kms_helper_poll_disable(dev);
+ drm_kms_helper_poll_fini(dev);
udl_drop_usb(dev);
drm_dev_unplug(dev);
- drm_dev_put(dev);
}
/*
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index e67227c44cc4..2642f94a63fc 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -68,7 +68,6 @@ struct udl_device {
/* modeset */
int udl_modeset_init(struct drm_device *dev);
-void udl_modeset_cleanup(struct drm_device *dev);
struct drm_connector *udl_connector_init(struct drm_device *dev);
struct urb *udl_get_urb(struct drm_device *dev);
@@ -77,7 +76,6 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
void udl_urb_completion(struct urb *urb);
int udl_init(struct udl_device *udl);
-void udl_fini(struct drm_device *dev);
int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 538718919916..f5d27f2a5654 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -351,13 +351,3 @@ int udl_drop_usb(struct drm_device *dev)
udl_free_urb_list(dev);
return 0;
}
-
-void udl_fini(struct drm_device *dev)
-{
- struct udl_device *udl = to_udl(dev);
-
- drm_kms_helper_poll_fini(dev);
-
- if (udl->urbs.count)
- udl_free_urb_list(dev);
-}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index d59ebac70b15..fef43f4e3bac 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -215,7 +215,7 @@ static char *udl_dummy_render(char *wrptr)
static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct urb *urb;
char *buf;
int retval;
@@ -266,8 +266,8 @@ static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y,
return 0;
}
-int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
- int width, int height)
+static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
+ int width, int height)
{
struct drm_device *dev = fb->dev;
struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
@@ -369,7 +369,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
struct drm_framebuffer *fb = plane_state->fb;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct drm_display_mode *mode = &crtc_state->mode;
char *buf;
char *wrptr;
@@ -464,11 +464,13 @@ static const struct drm_mode_config_funcs udl_mode_funcs = {
int udl_modeset_init(struct drm_device *dev)
{
size_t format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct drm_connector *connector;
int ret;
- drm_mode_config_init(dev);
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
dev->mode_config.min_width = 640;
dev->mode_config.min_height = 480;
@@ -482,10 +484,8 @@ int udl_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &udl_mode_funcs;
connector = udl_connector_init(dev);
- if (IS_ERR(connector)) {
- ret = PTR_ERR(connector);
- goto err_drm_mode_config_cleanup;
- }
+ if (IS_ERR(connector))
+ return PTR_ERR(connector);
format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
@@ -494,18 +494,9 @@ int udl_modeset_init(struct drm_device *dev)
udl_simple_display_pipe_formats,
format_count, NULL, connector);
if (ret)
- goto err_drm_mode_config_cleanup;
+ return ret;
drm_mode_config_reset(dev);
return 0;
-
-err_drm_mode_config_cleanup:
- drm_mode_config_cleanup(dev);
- return ret;
-}
-
-void udl_modeset_cleanup(struct drm_device *dev)
-{
- drm_mode_config_cleanup(dev);
}
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 9e953ce64ef7..e76b24bb8828 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -132,7 +132,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
u32 ident0, ident1, ident2, ident3, cores;
int ret, core;
- ret = pm_runtime_get_sync(v3d->dev);
+ ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
return ret;
@@ -187,8 +187,8 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
(misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
}
- pm_runtime_mark_last_busy(v3d->dev);
- pm_runtime_put_autosuspend(v3d->dev);
+ pm_runtime_mark_last_busy(v3d->drm.dev);
+ pm_runtime_put_autosuspend(v3d->drm.dev);
return 0;
}
@@ -219,7 +219,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
int measure_ms = 1000;
int ret;
- ret = pm_runtime_get_sync(v3d->dev);
+ ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
return ret;
@@ -245,8 +245,8 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
cycles / (measure_ms * 1000),
(cycles / (measure_ms * 100)) % 10);
- pm_runtime_mark_last_busy(v3d->dev);
- pm_runtime_put_autosuspend(v3d->dev);
+ pm_runtime_mark_last_busy(v3d->drm.dev);
+ pm_runtime_put_autosuspend(v3d->drm.dev);
return 0;
}
@@ -258,10 +258,10 @@ static const struct drm_info_list v3d_debugfs_list[] = {
{"bo_stats", v3d_debugfs_bo_stats, 0},
};
-int
+void
v3d_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(v3d_debugfs_list,
- ARRAY_SIZE(v3d_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(v3d_debugfs_list,
+ ARRAY_SIZE(v3d_debugfs_list),
+ minor->debugfs_root, minor);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index eaa8e9682373..82a7dfdd14c2 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
#include <uapi/drm/v3d_drm.h>
#include "v3d_drv.h"
@@ -104,7 +105,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
if (args->value != 0)
return -EINVAL;
- ret = pm_runtime_get_sync(v3d->dev);
+ ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
return ret;
if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
@@ -113,8 +114,8 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
} else {
args->value = V3D_READ(offset);
}
- pm_runtime_mark_last_busy(v3d->dev);
- pm_runtime_put_autosuspend(v3d->dev);
+ pm_runtime_mark_last_busy(v3d->drm.dev);
+ pm_runtime_put_autosuspend(v3d->drm.dev);
return 0;
}
@@ -234,9 +235,9 @@ static int
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
{
struct resource *res =
- platform_get_resource_byname(v3d->pdev, IORESOURCE_MEM, name);
+ platform_get_resource_byname(v3d_to_pdev(v3d), IORESOURCE_MEM, name);
- *regs = devm_ioremap_resource(v3d->dev, res);
+ *regs = devm_ioremap_resource(v3d->drm.dev, res);
return PTR_ERR_OR_ZERO(*regs);
}
@@ -250,20 +251,21 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
u32 ident1;
- v3d = kzalloc(sizeof(*v3d), GFP_KERNEL);
- if (!v3d)
- return -ENOMEM;
- v3d->dev = dev;
- v3d->pdev = pdev;
+ v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
+ if (IS_ERR(v3d))
+ return PTR_ERR(v3d);
+
drm = &v3d->drm;
+ platform_set_drvdata(pdev, drm);
+
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
- goto dev_free;
+ return ret;
ret = map_regs(v3d, &v3d->core_regs[0], "core0");
if (ret)
- goto dev_free;
+ return ret;
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
dev->coherent_dma_mask =
@@ -281,45 +283,37 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ret = PTR_ERR(v3d->reset);
if (ret == -EPROBE_DEFER)
- goto dev_free;
+ return ret;
v3d->reset = NULL;
ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
if (ret) {
dev_err(dev,
"Failed to get reset control or bridge regs\n");
- goto dev_free;
+ return ret;
}
}
if (v3d->ver < 41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
- goto dev_free;
+ return ret;
}
v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!v3d->mmu_scratch) {
dev_err(dev, "Failed to allocate MMU scratch page\n");
- ret = -ENOMEM;
- goto dev_free;
+ return -ENOMEM;
}
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_enable(dev);
- ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev);
- if (ret)
- goto dma_free;
-
- platform_set_drvdata(pdev, drm);
- drm->dev_private = v3d;
-
ret = v3d_gem_init(drm);
if (ret)
- goto dev_destroy;
+ goto dma_free;
ret = v3d_irq_init(v3d);
if (ret)
@@ -335,12 +329,8 @@ irq_disable:
v3d_irq_disable(v3d);
gem_destroy:
v3d_gem_destroy(drm);
-dev_destroy:
- drm_dev_put(drm);
dma_free:
dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
-dev_free:
- kfree(v3d);
return ret;
}
@@ -353,9 +343,8 @@ static int v3d_platform_drm_remove(struct platform_device *pdev)
v3d_gem_destroy(drm);
- drm_dev_put(drm);
-
- dma_free_wc(v3d->dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
+ dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
+ v3d->mmu_scratch_paddr);
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index ac2603334587..8a390738d65b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -14,7 +14,6 @@
#include "uapi/drm/v3d_drm.h"
struct clk;
-struct device;
struct platform_device;
struct reset_control;
@@ -47,8 +46,6 @@ struct v3d_dev {
int ver;
bool single_irq_line;
- struct device *dev;
- struct platform_device *pdev;
void __iomem *hub_regs;
void __iomem *core_regs[3];
void __iomem *bridge_regs;
@@ -121,7 +118,7 @@ struct v3d_dev {
static inline struct v3d_dev *
to_v3d_dev(struct drm_device *dev)
{
- return (struct v3d_dev *)dev->dev_private;
+ return container_of(dev, struct v3d_dev, drm);
}
static inline bool
@@ -130,6 +127,8 @@ v3d_has_csd(struct v3d_dev *v3d)
return v3d->ver >= 41;
}
+#define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
+
/* The per-fd struct, which tracks the MMU mappings. */
struct v3d_file_priv {
struct v3d_dev *v3d;
@@ -316,7 +315,7 @@ struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt);
/* v3d_debugfs.c */
-int v3d_debugfs_init(struct drm_minor *minor);
+void v3d_debugfs_init(struct drm_minor *minor);
/* v3d_fence.c */
extern const struct dma_fence_ops v3d_fence_ops;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 549dde83408b..09a7639cf161 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -370,8 +370,8 @@ v3d_job_free(struct kref *ref)
dma_fence_put(job->irq_fence);
dma_fence_put(job->done_fence);
- pm_runtime_mark_last_busy(job->v3d->dev);
- pm_runtime_put_autosuspend(job->v3d->dev);
+ pm_runtime_mark_last_busy(job->v3d->drm.dev);
+ pm_runtime_put_autosuspend(job->v3d->drm.dev);
kfree(job);
}
@@ -439,7 +439,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
job->v3d = v3d;
job->free = free;
- ret = pm_runtime_get_sync(v3d->dev);
+ ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
return ret;
@@ -458,7 +458,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
return 0;
fail:
xa_destroy(&job->deps);
- pm_runtime_put_autosuspend(v3d->dev);
+ pm_runtime_put_autosuspend(v3d->drm.dev);
return ret;
}
@@ -886,12 +886,12 @@ v3d_gem_init(struct drm_device *dev)
*/
drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
- v3d->pt = dma_alloc_wc(v3d->dev, pt_size,
+ v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size,
&v3d->pt_paddr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!v3d->pt) {
drm_mm_takedown(&v3d->mm);
- dev_err(v3d->dev,
+ dev_err(v3d->drm.dev,
"Failed to allocate page tables. "
"Please ensure you have CMA enabled.\n");
return -ENOMEM;
@@ -903,7 +903,7 @@ v3d_gem_init(struct drm_device *dev)
ret = v3d_sched_init(v3d);
if (ret) {
drm_mm_takedown(&v3d->mm);
- dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt,
+ dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
v3d->pt_paddr);
}
@@ -925,5 +925,6 @@ v3d_gem_destroy(struct drm_device *dev)
drm_mm_takedown(&v3d->mm);
- dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr);
+ dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
+ v3d->pt_paddr);
}
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 662e67279a7b..51b65263c657 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -128,7 +128,7 @@ v3d_irq(int irq, void *arg)
* always-allowed mode.
*/
if (intsts & V3D_INT_GMPV)
- dev_err(v3d->dev, "GMP violation\n");
+ dev_err(v3d->drm.dev, "GMP violation\n");
/* V3D 4.2 wires the hub and core IRQs together, so if we &
* didn't see the common one then check hub for MMU IRQs.
@@ -189,7 +189,7 @@ v3d_hub_irq(int irq, void *arg)
client = v3d41_axi_ids[axi_id];
}
- dev_err(v3d->dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
+ dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
client, axi_id, (long long)vio_addr,
((intsts & V3D_HUB_INT_MMU_WRV) ?
", write violation" : ""),
@@ -217,16 +217,17 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
- irq1 = platform_get_irq(v3d->pdev, 1);
+ irq1 = platform_get_irq(v3d_to_pdev(v3d), 1);
if (irq1 == -EPROBE_DEFER)
return irq1;
if (irq1 > 0) {
- ret = devm_request_irq(v3d->dev, irq1,
+ ret = devm_request_irq(v3d->drm.dev, irq1,
v3d_irq, IRQF_SHARED,
"v3d_core0", v3d);
if (ret)
goto fail;
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ ret = devm_request_irq(v3d->drm.dev,
+ platform_get_irq(v3d_to_pdev(v3d), 0),
v3d_hub_irq, IRQF_SHARED,
"v3d_hub", v3d);
if (ret)
@@ -234,7 +235,8 @@ v3d_irq_init(struct v3d_dev *v3d)
} else {
v3d->single_irq_line = true;
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ ret = devm_request_irq(v3d->drm.dev,
+ platform_get_irq(v3d_to_pdev(v3d), 0),
v3d_irq, IRQF_SHARED,
"v3d", v3d);
if (ret)
@@ -246,7 +248,7 @@ v3d_irq_init(struct v3d_dev *v3d)
fail:
if (ret != -EPROBE_DEFER)
- dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+ dev_err(v3d->drm.dev, "IRQ setup failed: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index 395e81d97163..3b81ea28c0bb 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -40,7 +40,7 @@ static int v3d_mmu_flush_all(struct v3d_dev *v3d)
ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
V3D_MMU_CTL_TLB_CLEARING), 100);
if (ret)
- dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n");
+ dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n");
V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
V3D_MMU_CTL_TLB_CLEAR);
@@ -52,14 +52,14 @@ static int v3d_mmu_flush_all(struct v3d_dev *v3d)
ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
V3D_MMU_CTL_TLB_CLEARING), 100);
if (ret) {
- dev_err(v3d->dev, "TLB clear wait idle failed\n");
+ dev_err(v3d->drm.dev, "TLB clear wait idle failed\n");
return ret;
}
ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
V3D_MMUC_CONTROL_FLUSHING), 100);
if (ret)
- dev_err(v3d->dev, "MMUC flush wait idle failed\n");
+ dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
return ret;
}
@@ -109,7 +109,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
if (v3d_mmu_flush_all(v3d))
- dev_err(v3d->dev, "MMU flush timeout\n");
+ dev_err(v3d->drm.dev, "MMU flush timeout\n");
}
void v3d_mmu_remove_ptes(struct v3d_bo *bo)
@@ -122,5 +122,5 @@ void v3d_mmu_remove_ptes(struct v3d_bo *bo)
v3d->pt[page] = 0;
if (v3d_mmu_flush_all(v3d))
- dev_err(v3d->dev, "MMU flush timeout\n");
+ dev_err(v3d->drm.dev, "MMU flush timeout\n");
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 8c2df6d95283..0747614a78f0 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -403,7 +403,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_bin");
if (ret) {
- dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret);
+ dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret);
return ret;
}
@@ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_render");
if (ret) {
- dev_err(v3d->dev, "Failed to create render scheduler: %d.",
+ dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.",
ret);
v3d_sched_fini(v3d);
return ret;
@@ -425,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_tfu");
if (ret) {
- dev_err(v3d->dev, "Failed to create TFU scheduler: %d.",
+ dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.",
ret);
v3d_sched_fini(v3d);
return ret;
@@ -438,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_csd");
if (ret) {
- dev_err(v3d->dev, "Failed to create CSD scheduler: %d.",
+ dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.",
ret);
v3d_sched_fini(v3d);
return ret;
@@ -450,7 +450,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_cache_clean");
if (ret) {
- dev_err(v3d->dev, "Failed to create CACHE_CLEAN scheduler: %d.",
+ dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.",
ret);
v3d_sched_fini(v3d);
return ret;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index ac8f75db2ecd..cf2e3e6a2388 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include "vbox_drv.h"
@@ -45,28 +46,22 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
- if (!vbox)
- return -ENOMEM;
-
- ret = drm_dev_init(&vbox->ddev, &driver, &pdev->dev);
- if (ret) {
- kfree(vbox);
- return ret;
- }
+ vbox = devm_drm_dev_alloc(&pdev->dev, &driver,
+ struct vbox_private, ddev);
+ if (IS_ERR(vbox))
+ return PTR_ERR(vbox);
vbox->ddev.pdev = pdev;
- vbox->ddev.dev_private = vbox;
pci_set_drvdata(pdev, vbox);
mutex_init(&vbox->hw_mutex);
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
- goto err_dev_put;
+ return ret;
ret = vbox_hw_init(vbox);
if (ret)
- goto err_pci_disable;
+ return ret;
ret = vbox_mm_init(vbox);
if (ret)
@@ -80,14 +75,12 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_mode_fini;
- ret = drm_fbdev_generic_setup(&vbox->ddev, 32);
- if (ret)
- goto err_irq_fini;
-
ret = drm_dev_register(&vbox->ddev, 0);
if (ret)
goto err_irq_fini;
+ drm_fbdev_generic_setup(&vbox->ddev, 32);
+
return 0;
err_irq_fini:
@@ -98,10 +91,6 @@ err_mm_fini:
vbox_mm_fini(vbox);
err_hw_fini:
vbox_hw_fini(vbox);
-err_pci_disable:
- pci_disable_device(pdev);
-err_dev_put:
- drm_dev_put(&vbox->ddev);
return ret;
}
@@ -114,7 +103,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
vbox_mode_fini(vbox);
vbox_mm_fini(vbox);
vbox_hw_fini(vbox);
- drm_dev_put(&vbox->ddev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index 87421903816c..ac7c2effc46f 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -127,6 +127,7 @@ struct vbox_encoder {
#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
+#define to_vbox_dev(x) container_of(x, struct vbox_private, ddev)
bool vbox_check_supported(u16 id);
int vbox_hw_init(struct vbox_private *vbox);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c
index 16a1e29f5292..631657fa554f 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_irq.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c
@@ -34,7 +34,7 @@ void vbox_report_hotplug(struct vbox_private *vbox)
irqreturn_t vbox_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
- struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
+ struct vbox_private *vbox = to_vbox_dev(dev);
u32 host_flags = vbox_get_flags(vbox);
if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index 9dcab115a261..d68d9bad7674 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -71,8 +71,6 @@ static void vbox_accel_fini(struct vbox_private *vbox)
for (i = 0; i < vbox->num_crtcs; ++i)
vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
-
- pci_iounmap(vbox->ddev.pdev, vbox->vbva_buffers);
}
/* Do we support the 4.3 plus mode hint reporting interface? */
@@ -123,21 +121,22 @@ int vbox_hw_init(struct vbox_private *vbox)
return -ENOMEM;
/* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
- vbox->guest_pool = gen_pool_create(4, -1);
+ vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1,
+ "vboxvideo-accel");
if (!vbox->guest_pool)
- goto err_unmap_guest_heap;
+ return -ENOMEM;
ret = gen_pool_add_virt(vbox->guest_pool,
(unsigned long)vbox->guest_heap,
GUEST_HEAP_OFFSET(vbox),
GUEST_HEAP_USABLE_SIZE, -1);
if (ret)
- goto err_destroy_guest_pool;
+ return ret;
ret = hgsmi_test_query_conf(vbox->guest_pool);
if (ret) {
DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
- goto err_destroy_guest_pool;
+ return ret;
}
/* Reduce available VRAM size to reflect the guest heap. */
@@ -149,33 +148,23 @@ int vbox_hw_init(struct vbox_private *vbox)
if (!have_hgsmi_mode_hints(vbox)) {
ret = -ENOTSUPP;
- goto err_destroy_guest_pool;
+ return ret;
}
vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
sizeof(struct vbva_modehint),
GFP_KERNEL);
- if (!vbox->last_mode_hints) {
- ret = -ENOMEM;
- goto err_destroy_guest_pool;
- }
+ if (!vbox->last_mode_hints)
+ return -ENOMEM;
ret = vbox_accel_init(vbox);
if (ret)
- goto err_destroy_guest_pool;
+ return ret;
return 0;
-
-err_destroy_guest_pool:
- gen_pool_destroy(vbox->guest_pool);
-err_unmap_guest_heap:
- pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
- return ret;
}
void vbox_hw_fini(struct vbox_private *vbox)
{
vbox_accel_fini(vbox);
- gen_pool_destroy(vbox->guest_pool);
- pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 0883a435e62b..d9a5af62af89 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -36,7 +36,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc)
u16 flags;
s32 x_offset, y_offset;
- vbox = crtc->dev->dev_private;
+ vbox = to_vbox_dev(crtc->dev);
width = vbox_crtc->width ? vbox_crtc->width : 640;
height = vbox_crtc->height ? vbox_crtc->height : 480;
bpp = fb ? fb->format->cpp[0] * 8 : 32;
@@ -77,7 +77,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc)
static int vbox_set_view(struct drm_crtc *crtc)
{
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
- struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_private *vbox = to_vbox_dev(crtc->dev);
struct vbva_infoview *p;
/*
@@ -174,7 +174,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
int x, int y)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
- struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_private *vbox = to_vbox_dev(crtc->dev);
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
@@ -272,7 +272,7 @@ static void vbox_primary_atomic_update(struct drm_plane *plane,
{
struct drm_crtc *crtc = plane->state->crtc;
struct drm_framebuffer *fb = plane->state->fb;
- struct vbox_private *vbox = fb->dev->dev_private;
+ struct vbox_private *vbox = to_vbox_dev(fb->dev);
struct drm_mode_rect *clips;
uint32_t num_clips, i;
@@ -704,7 +704,7 @@ static int vbox_get_modes(struct drm_connector *connector)
int preferred_width, preferred_height;
vbox_connector = to_vbox_connector(connector);
- vbox = connector->dev->dev_private;
+ vbox = to_vbox_dev(connector->dev);
hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
HOST_FLAGS_OFFSET);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index 976423d0c3cc..f5a06675da43 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -24,25 +24,13 @@ int vbox_mm_init(struct vbox_private *vbox)
return ret;
}
-#ifdef DRM_MTRR_WC
- vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0),
- DRM_MTRR_WC);
-#else
vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
-#endif
return 0;
}
void vbox_mm_fini(struct vbox_private *vbox)
{
-#ifdef DRM_MTRR_WC
- drm_mtrr_del(vbox->fb_mtrr,
- pci_resource_start(vbox->ddev.pdev, 0),
- pci_resource_len(vbox->ddev.pdev, 0), DRM_MTRR_WC);
-#else
arch_phys_wc_del(vbox->fb_mtrr);
-#endif
drm_vram_helper_release_mm(&vbox->ddev);
}
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index b61b2d3407b5..4fbbf980a299 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -20,7 +20,7 @@ struct vc4_debugfs_info_entry {
* Called at drm_dev_register() time on each of the minors registered
* by the DRM device, to attach the debugfs files.
*/
-int
+void
vc4_debugfs_init(struct drm_minor *minor)
{
struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
@@ -30,14 +30,9 @@ vc4_debugfs_init(struct drm_minor *minor)
minor->debugfs_root, &vc4->load_tracker_enabled);
list_for_each_entry(entry, &vc4->debugfs_list, link) {
- int ret = drm_debugfs_create_files(&entry->info, 1,
- minor->debugfs_root, minor);
-
- if (ret)
- return ret;
+ drm_debugfs_create_files(&entry->info, 1,
+ minor->debugfs_root, minor);
}
-
- return 0;
}
static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 6dfede03396e..a90f2545baee 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -17,6 +17,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_graph.h>
@@ -114,10 +115,6 @@ static const struct debugfs_reg32 dpi_regs[] = {
VC4_REG32(DPI_ID),
};
-static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
{
struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
@@ -309,8 +306,7 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
if (ret)
DRM_ERROR("Failed to turn on core clock: %d\n", ret);
- drm_encoder_init(drm, dpi->encoder, &vc4_dpi_encoder_funcs,
- DRM_MODE_ENCODER_DPI, NULL);
+ drm_simple_encoder_init(drm, dpi->encoder, DRM_MODE_ENCODER_DPI);
drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs);
ret = vc4_dpi_init_bridge(dpi);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 139d25a8328e..3b1f02efefbe 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -759,7 +759,7 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
unsigned int *top, unsigned int *bottom);
/* vc4_debugfs.c */
-int vc4_debugfs_init(struct drm_minor *minor);
+void vc4_debugfs_init(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
void vc4_debugfs_add_file(struct drm_device *drm,
const char *filename,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index d99b1d526651..eaf276978ee7 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -37,6 +37,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -652,15 +653,6 @@ static const struct debugfs_reg32 dsi1_regs[] = {
VC4_REG32(DSI1_ID),
};
-static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = {
- .destroy = vc4_dsi_encoder_destroy,
-};
-
static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch)
{
u32 afec0 = DSI_PORT_READ(PHY_AFEC0);
@@ -1615,8 +1607,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (dsi->port == 1)
vc4->dsi1 = dsi;
- drm_encoder_init(drm, dsi->encoder, &vc4_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
@@ -1656,7 +1647,7 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
* normally.
*/
list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
- vc4_dsi_encoder_destroy(dsi->encoder);
+ drm_encoder_cleanup(dsi->encoder);
if (dsi->port == 1)
vc4->dsi1 = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 340719238753..625bfcf52dc4 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -34,6 +34,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/i2c.h>
@@ -306,15 +307,6 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
return connector;
}
-static void vc4_hdmi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = {
- .destroy = vc4_hdmi_encoder_destroy,
-};
-
static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
enum hdmi_infoframe_type type)
{
@@ -1406,8 +1398,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
}
pm_runtime_enable(dev);
- drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, hdmi->encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
hdmi->connector =
@@ -1465,7 +1456,7 @@ err_destroy_conn:
vc4_hdmi_connector_destroy(hdmi->connector);
#endif
err_destroy_encoder:
- vc4_hdmi_encoder_destroy(hdmi->encoder);
+ drm_encoder_cleanup(hdmi->encoder);
err_unprepare_hsm:
clk_disable_unprepare(hdmi->hsm_clock);
pm_runtime_disable(dev);
@@ -1484,7 +1475,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master,
cec_unregister_adapter(hdmi->cec_adap);
vc4_hdmi_connector_destroy(hdmi->connector);
- vc4_hdmi_encoder_destroy(hdmi->encoder);
+ drm_encoder_cleanup(hdmi->encoder);
clk_disable_unprepare(hdmi->hsm_clock);
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 7402bc768664..bd5b8eb58b18 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -17,6 +17,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_graph.h>
@@ -374,10 +375,6 @@ static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
return connector;
}
-static const struct drm_encoder_funcs vc4_vec_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
{
struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
@@ -566,8 +563,7 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
pm_runtime_enable(dev);
- drm_encoder_init(drm, vec->encoder, &vc4_vec_encoder_funcs,
- DRM_MODE_ENCODER_TVDAC, NULL);
+ drm_simple_encoder_init(drm, vec->encoder, DRM_MODE_ENCODER_TVDAC);
drm_encoder_helper_add(vec->encoder, &vc4_vec_encoder_helper_funcs);
vec->connector = vc4_vec_connector_init(drm, vec);
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 909eba43664a..ec1a8ebb6f1b 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -39,6 +39,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
#include "vgem_drv.h"
@@ -431,9 +432,6 @@ static void vgem_release(struct drm_device *dev)
struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
platform_device_unregister(vgem->platform);
- drm_dev_fini(&vgem->drm);
-
- kfree(vgem);
}
static struct drm_driver vgem_driver = {
@@ -489,16 +487,19 @@ static int __init vgem_init(void)
&vgem_device->platform->dev);
if (ret)
goto out_unregister;
+ drmm_add_final_kfree(&vgem_device->drm, vgem_device);
/* Final step: expose the device/driver to userspace */
- ret = drm_dev_register(&vgem_device->drm, 0);
+ ret = drm_dev_register(&vgem_device->drm, 0);
if (ret)
- goto out_fini;
+ goto out_put;
return 0;
-out_fini:
- drm_dev_fini(&vgem_device->drm);
+out_put:
+ drm_dev_put(&vgem_device->drm);
+ return ret;
+
out_unregister:
platform_device_unregister(vgem_device->platform);
out_free:
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index e27120d512b0..3221520f61f0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -72,11 +72,10 @@ static struct drm_info_list virtio_gpu_debugfs_list[] = {
#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
-int
+void
virtio_gpu_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(virtio_gpu_debugfs_list,
VIRTIO_GPU_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
- return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 2b7e6ae65546..cc7fd957a307 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -30,6 +30,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "virtgpu_drv.h"
@@ -240,10 +241,6 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static const struct drm_encoder_funcs virtio_gpu_enc_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
{
struct drm_device *dev = vgdev->ddev;
@@ -276,8 +273,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
if (vgdev->has_edid)
drm_connector_attach_edid_property(connector);
- drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
encoder->possible_crtcs = 1 << index;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 7879ff58236f..9ff9f4ac0522 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -218,27 +218,19 @@ struct virtio_gpu_fpriv {
struct mutex context_lock;
};
-/* virtio_ioctl.c */
+/* virtgpu_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
-/* virtio_kms.c */
+/* virtgpu_kms.c */
int virtio_gpu_init(struct drm_device *dev);
void virtio_gpu_deinit(struct drm_device *dev);
void virtio_gpu_release(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
-/* virtio_gem.c */
-void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
-int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
-void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
-int virtio_gpu_gem_create(struct drm_file *file,
- struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct drm_gem_object **obj_p,
- uint32_t *handle_p);
+/* virtgpu_gem.c */
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file);
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
@@ -264,7 +256,7 @@ void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs);
void virtio_gpu_array_put_free_work(struct work_struct *work);
-/* virtio vg */
+/* virtgpu_vq.c */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
@@ -288,10 +280,10 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t scanout_id, uint32_t resource_id,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y);
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj,
- struct virtio_gpu_mem_entry *ents,
- unsigned int nents);
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -344,17 +336,17 @@ void virtio_gpu_dequeue_fence_func(struct work_struct *work);
void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
-/* virtio_gpu_display.c */
+/* virtgpu_display.c */
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
-/* virtio_gpu_plane.c */
+/* virtgpu_plane.c */
uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc);
struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
enum drm_plane_type type,
int index);
-/* virtio_gpu_fence.c */
+/* virtgpu_fence.c */
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
struct virtio_gpu_device *vgdev);
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@ -363,7 +355,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
-/* virtio_gpu_object */
+/* virtgpu_object.c */
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size);
@@ -379,7 +371,7 @@ struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
-/* virgl debugfs */
-int virtio_gpu_debugfs_init(struct drm_minor *minor);
+/* virtgpu_debugfs.c */
+void virtio_gpu_debugfs_init(struct drm_minor *minor);
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index f0d5a8974677..d6cb350ae52a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -28,11 +28,11 @@
#include "virtgpu_drv.h"
-int virtio_gpu_gem_create(struct drm_file *file,
- struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct drm_gem_object **obj_p,
- uint32_t *handle_p)
+static int virtio_gpu_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct virtio_gpu_object_params *params,
+ struct drm_gem_object **obj_p,
+ uint32_t *handle_p)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
@@ -117,7 +117,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
- return 0;
+ goto out_notify;
objs = virtio_gpu_array_alloc(1);
if (!objs)
@@ -126,6 +126,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
objs);
+out_notify:
virtio_gpu_notify(vgdev);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 512daff92038..5df722072ba0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -47,7 +47,6 @@ void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
get_task_comm(dbgname, current);
virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
strlen(dbgname), dbgname);
- virtio_gpu_notify(vgdev);
vfpriv->context_created = true;
out_unlock:
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index d9039bb7c5e3..6ccbd01cd888 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -235,13 +235,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
return ret;
}
- ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
- if (ret != 0) {
- virtio_gpu_free_object(&shmem_obj->base);
- return ret;
- }
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
- virtio_gpu_notify(vgdev);
*bo_ptr = bo;
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 73854915ec34..9e663a5d9952 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -1087,14 +1087,13 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj,
- struct virtio_gpu_mem_entry *ents,
- unsigned int nents)
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents)
{
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents, NULL);
- return 0;
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 860de052e820..1e8b2169d834 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -21,6 +21,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -34,7 +35,7 @@
static struct vkms_device *vkms_device;
-bool enable_cursor;
+bool enable_cursor = true;
module_param_named(enable_cursor, enable_cursor, bool, 0444);
MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
@@ -63,7 +64,6 @@ static void vkms_release(struct drm_device *dev)
platform_device_unregister(vkms->platform);
drm_atomic_helper_shutdown(&vkms->drm);
drm_mode_config_cleanup(&vkms->drm);
- drm_dev_fini(&vkms->drm);
destroy_workqueue(vkms->output.composer_workq);
}
@@ -158,13 +158,14 @@ static int __init vkms_init(void)
&vkms_device->platform->dev);
if (ret)
goto out_unregister;
+ drmm_add_final_kfree(&vkms_device->drm, vkms_device);
ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
DMA_BIT_MASK(64));
if (ret) {
DRM_ERROR("Could not initialize DMA support\n");
- goto out_fini;
+ goto out_put;
}
vkms_device->drm.irq_enabled = true;
@@ -172,25 +173,25 @@ static int __init vkms_init(void)
ret = drm_vblank_init(&vkms_device->drm, 1);
if (ret) {
DRM_ERROR("Failed to vblank\n");
- goto out_fini;
+ goto out_put;
}
ret = vkms_modeset_init(vkms_device);
if (ret)
- goto out_fini;
+ goto out_put;
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
- goto out_fini;
+ goto out_put;
return 0;
-out_fini:
- drm_dev_fini(&vkms_device->drm);
+out_put:
+ drm_dev_put(&vkms_device->drm);
+ return ret;
out_unregister:
platform_device_unregister(vkms_device->platform);
-
out_free:
kfree(vkms_device);
return ret;
@@ -205,8 +206,6 @@ static void __exit vkms_exit(void)
drm_dev_unregister(&vkms_device->drm);
drm_dev_put(&vkms_device->drm);
-
- kfree(vkms_device);
}
module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index eda04ffba7b1..f4036bb0b9a8 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -117,11 +117,6 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
enum drm_plane_type type, int index);
/* Gem stuff */
-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
- struct drm_file *file,
- u32 *handle,
- u64 size);
-
vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 2e01186fb943..c541fec57566 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -97,10 +97,10 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
return ret;
}
-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
- struct drm_file *file,
- u32 *handle,
- u64 size)
+static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ u32 *handle,
+ u64 size)
{
struct vkms_gem_object *obj;
int ret;
@@ -113,7 +113,6 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->gem, handle);
- drm_gem_object_put_unlocked(&obj->gem);
if (ret)
return ERR_PTR(ret);
@@ -142,6 +141,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_obj->size;
args->pitch = pitch;
+ drm_gem_object_put_unlocked(gem_obj);
+
DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
return 0;
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index fb1941a6522c..85afb77e97f0 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -3,6 +3,7 @@
#include "vkms_drv.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
static void vkms_connector_destroy(struct drm_connector *connector)
{
@@ -17,10 +18,6 @@ static const struct drm_connector_funcs vkms_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static const struct drm_encoder_funcs vkms_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int vkms_conn_get_modes(struct drm_connector *connector)
{
int count;
@@ -70,8 +67,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
- ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
if (ret) {
DRM_ERROR("Failed to init encoder\n");
goto err_encoder;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index bb46ca0c458f..1629427d5734 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -27,6 +27,7 @@
**************************************************************************/
#include "vmwgfx_drv.h"
+#include <linux/highmem.h>
/*
* Template that implements find_first_diff() for a generic
@@ -374,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
if (unmap_src) {
- ttm_kunmap_atomic_prot(d->src_addr, d->src_prot);
+ kunmap_atomic(d->src_addr);
d->src_addr = NULL;
}
if (unmap_dst) {
- ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot);
+ kunmap_atomic(d->dst_addr);
d->dst_addr = NULL;
}
@@ -388,8 +389,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return -EINVAL;
d->dst_addr =
- ttm_kmap_atomic_prot(d->dst_pages[dst_page],
- d->dst_prot);
+ kmap_atomic_prot(d->dst_pages[dst_page],
+ d->dst_prot);
if (!d->dst_addr)
return -ENOMEM;
@@ -401,8 +402,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return -EINVAL;
d->src_addr =
- ttm_kmap_atomic_prot(d->src_pages[src_page],
- d->src_prot);
+ kmap_atomic_prot(d->src_pages[src_page],
+ d->src_prot);
if (!d->src_addr)
return -ENOMEM;
@@ -499,9 +500,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
}
out:
if (d.src_addr)
- ttm_kunmap_atomic_prot(d.src_addr, d.src_prot);
+ kunmap_atomic(d.src_addr);
if (d.dst_addr)
- ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot);
+ kunmap_atomic(d.dst_addr);
return ret;
}
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 374142018171..1fd458e877ca 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -460,9 +460,6 @@ static void xen_drm_drv_release(struct drm_device *dev)
drm_atomic_helper_shutdown(dev);
drm_mode_config_cleanup(dev);
- drm_dev_fini(dev);
- kfree(dev);
-
if (front_info->cfg.be_alloc)
xenbus_switch_state(front_info->xb_dev,
XenbusStateInitialising);
@@ -561,6 +558,7 @@ fail_register:
fail_modeset:
drm_kms_helper_poll_fini(drm_dev);
drm_mode_config_cleanup(drm_dev);
+ drm_dev_put(drm_dev);
fail:
kfree(drm_info);
return ret;
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index b98a1420dcd3..76a16d997a23 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -20,6 +20,7 @@
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
+#include <drm/drm_simple_kms_helper.h>
#include <sound/hdmi-codec.h>
@@ -254,10 +255,6 @@ static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
.mode_set = zx_hdmi_encoder_mode_set,
};
-static const struct drm_encoder_funcs zx_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct zx_hdmi *hdmi = to_zx_hdmi(connector);
@@ -313,8 +310,7 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
encoder->possible_crtcs = VOU_CRTC_MASK;
- drm_encoder_init(drm, encoder, &zx_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
index c598b7daf1f1..d8a89ba383bc 100644
--- a/drivers/gpu/drm/zte/zx_tvenc.c
+++ b/drivers/gpu/drm/zte/zx_tvenc.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "zx_drm_drv.h"
#include "zx_tvenc_regs.h"
@@ -218,10 +219,6 @@ static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = {
.mode_set = zx_tvenc_encoder_mode_set,
};
-static const struct drm_encoder_funcs zx_tvenc_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int zx_tvenc_connector_get_modes(struct drm_connector *connector)
{
struct zx_tvenc *tvenc = to_zx_tvenc(connector);
@@ -285,8 +282,7 @@ static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
*/
encoder->possible_crtcs = BIT(1);
- drm_encoder_init(drm, encoder, &zx_tvenc_encoder_funcs,
- DRM_MODE_ENCODER_TVDAC, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TVDAC);
drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs);
connector->interlace_allowed = true;
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
index c4fa3bbaba78..a7ed7f5ca837 100644
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ b/drivers/gpu/drm/zte/zx_vga.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "zx_drm_drv.h"
#include "zx_vga_regs.h"
@@ -72,10 +73,6 @@ static const struct drm_encoder_helper_funcs zx_vga_encoder_helper_funcs = {
.disable = zx_vga_encoder_disable,
};
-static const struct drm_encoder_funcs zx_vga_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int zx_vga_connector_get_modes(struct drm_connector *connector)
{
struct zx_vga *vga = to_zx_vga(connector);
@@ -154,8 +151,7 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
encoder->possible_crtcs = VOU_CRTC_MASK;
- ret = drm_encoder_init(drm, encoder, &zx_vga_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_DAC);
if (ret) {
DRM_DEV_ERROR(dev, "failed to init encoder: %d\n", ret);
return ret;
diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig
index b84fcaf8b105..aeea082f1418 100644
--- a/drivers/greybus/Kconfig
+++ b/drivers/greybus/Kconfig
@@ -3,7 +3,7 @@ menuconfig GREYBUS
tristate "Greybus support"
depends on SYSFS
---help---
- This option enables the Greybus driver core. Greybus is an
+ This option enables the Greybus driver core. Greybus is a
hardware protocol that was designed to provide Unipro with a
sane application layer. It was originally designed for the
ARA project, a module phone system, but has shown up in other
@@ -12,7 +12,7 @@ menuconfig GREYBUS
Say Y here to enable support for these types of drivers.
- To compile this code as a module, chose M here: the module
+ To compile this code as a module, choose M here: the module
will be called greybus.ko
if GREYBUS
@@ -25,7 +25,7 @@ config GREYBUS_ES2
acts as a Greybus "host controller". This device is a bridge
from a USB device to a Unipro network.
- To compile this code as a module, chose M here: the module
+ To compile this code as a module, choose M here: the module
will be called gb-es2.ko
endif # GREYBUS
diff --git a/drivers/greybus/arpc.h b/drivers/greybus/arpc.h
index c8b83c5cfa79..b9ea81b55b29 100644
--- a/drivers/greybus/arpc.h
+++ b/drivers/greybus/arpc.h
@@ -21,7 +21,7 @@ struct arpc_request_message {
__le16 id; /* RPC unique id */
__le16 size; /* Size in bytes of header + payload */
__u8 type; /* RPC type */
- __u8 data[0]; /* ARPC data */
+ __u8 data[]; /* ARPC data */
} __packed;
struct arpc_response_message {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 34f07371716d..443c5cbbde04 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -42,7 +42,7 @@ config HIDRAW
---help---
Say Y here if you want to support HID devices (from the USB
specification standpoint) that aren't strictly user interface
- devices, like monitor controls and Uninterruptable Power Supplies.
+ devices, like monitor controls and Uninterruptible Power Supplies.
This module supports these devices separately using a separate
event interface on /dev/hidraw.
@@ -149,6 +149,7 @@ config HID_APPLEIR
config HID_ASUS
tristate "Asus"
+ depends on USB_HID
depends on LEDS_CLASS
depends on ASUS_WMI || ASUS_WMI=n
select POWER_SUPPLY
@@ -538,14 +539,14 @@ config HID_LOGITECH
Support for Logitech devices that are not fully compliant with HID standard.
config HID_LOGITECH_DJ
- tristate "Logitech Unifying receivers full support"
+ tristate "Logitech receivers full support"
depends on USB_HID
depends on HIDRAW
depends on HID_LOGITECH
select HID_LOGITECH_HIDPP
---help---
- Say Y if you want support for Logitech Unifying receivers and devices.
- Unifying receivers are capable of pairing up to 6 Logitech compliant
+ Say Y if you want support for Logitech receivers and devices.
+ Logitech receivers are capable of pairing multiple Logitech compliant
devices to the same receiver. Without this driver it will be handled by
generic USB_HID driver and all incoming events will be multiplexed
into a single mouse and a single keyboard device.
@@ -1140,7 +1141,7 @@ config HID_SENSOR_CUSTOM_SENSOR
to decide how to interpret these special sensor ids and process in
the user space. Currently some manufacturers are using these ids for
sensor calibration and debugging other sensors. Manufacturers
- should't use these special custom sensor ids to export any of the
+ shouldn't use these special custom sensor ids to export any of the
standard sensors.
Select this config option for custom/generic sensor support.
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index b2ad319a74b9..6f1fe7248d81 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -387,8 +387,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size)
input_report_abs(hdata->input,
ABS_MT_PRESSURE, z);
} else {
- input_mt_report_slot_state(hdata->input,
- MT_TOOL_FINGER, 0);
+ input_mt_report_slot_inactive(hdata->input);
}
}
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index d732d1d10caf..359bdfbe3701 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -51,6 +51,12 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
"(For people who want to keep Windows PC keyboard muscle memory. "
"[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+static unsigned int swap_fn_leftctrl;
+module_param(swap_fn_leftctrl, uint, 0644);
+MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. "
+ "(For people who want to keep PC keyboard muscle memory. "
+ "[0] = as-is, Mac layout, 1 = swapped, PC layout)");
+
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
@@ -162,6 +168,11 @@ static const struct apple_key_translation swapped_option_cmd_keys[] = {
{ }
};
+static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
+ { KEY_FN, KEY_LEFTCTRL },
+ { }
+};
+
static const struct apple_key_translation *apple_find_translation(
const struct apple_key_translation *table, u16 from)
{
@@ -183,9 +194,11 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
bool do_translate;
u16 code = 0;
- if (usage->code == KEY_FN) {
+ u16 fn_keycode = (swap_fn_leftctrl) ? (KEY_LEFTCTRL) : (KEY_FN);
+
+ if (usage->code == fn_keycode) {
asc->fn_on = !!value;
- input_event(input, usage->type, usage->code, value);
+ input_event(input, usage->type, KEY_FN, value);
return 1;
}
@@ -270,6 +283,14 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
}
+ if (swap_fn_leftctrl) {
+ trans = apple_find_translation(swapped_fn_leftctrl_keys, usage->code);
+ if (trans) {
+ input_event(input, usage->type, trans->to, value);
+ return 1;
+ }
+ }
+
return 0;
}
@@ -333,6 +354,11 @@ static void apple_setup_input(struct input_dev *input)
for (trans = apple_iso_keyboard; trans->from; trans++)
set_bit(trans->to, input->keybit);
+
+ if (swap_fn_leftctrl) {
+ for (trans = swapped_fn_leftctrl_keys; trans->from; trans++)
+ set_bit(trans->to, input->keybit);
+ }
}
static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index e6e4c841fb06..c183caf89d49 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -40,7 +40,9 @@ MODULE_AUTHOR("Frederik Wenigwieser <frederik.wenigwieser@gmail.com>");
MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define T100_TPAD_INTF 2
+#define MEDION_E1239T_TPAD_INTF 1
+#define E1239T_TP_TOGGLE_REPORT_ID 0x05
#define T100CHI_MOUSE_REPORT_ID 0x06
#define FEATURE_REPORT_ID 0x0d
#define INPUT_REPORT_ID 0x5d
@@ -77,6 +79,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_G752_KEYBOARD BIT(8)
#define QUIRK_T101HA_DOCK BIT(9)
#define QUIRK_T90CHI BIT(10)
+#define QUIRK_MEDION_E1239T BIT(11)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -102,12 +105,14 @@ struct asus_touchpad_info {
int res_y;
int contact_size;
int max_contacts;
+ int report_size;
};
struct asus_drvdata {
unsigned long quirks;
struct hid_device *hdev;
struct input_dev *input;
+ struct input_dev *tp_kbd_input;
struct asus_kbd_leds *kbd_backlight;
const struct asus_touchpad_info *tp;
bool enable_backlight;
@@ -126,6 +131,7 @@ static const struct asus_touchpad_info asus_i2c_tp = {
.max_y = 1758,
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t100ta_tp = {
@@ -135,6 +141,7 @@ static const struct asus_touchpad_info asus_t100ta_tp = {
.res_y = 27, /* units/mm */
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t100ha_tp = {
@@ -144,6 +151,7 @@ static const struct asus_touchpad_info asus_t100ha_tp = {
.res_y = 29, /* units/mm */
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t200ta_tp = {
@@ -153,6 +161,7 @@ static const struct asus_touchpad_info asus_t200ta_tp = {
.res_y = 28, /* units/mm */
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t100chi_tp = {
@@ -162,6 +171,17 @@ static const struct asus_touchpad_info asus_t100chi_tp = {
.res_y = 29, /* units/mm */
.contact_size = 3,
.max_contacts = 4,
+ .report_size = 15 /* 2 byte header + 3 * 4 + 1 byte footer */,
+};
+
+static const struct asus_touchpad_info medion_e1239t_tp = {
+ .max_x = 2640,
+ .max_y = 1380,
+ .res_x = 29, /* units/mm */
+ .res_y = 28, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+ .report_size = 32 /* 2 byte header + 5 * 5 + 5 byte footer */,
};
static void asus_report_contact_down(struct asus_drvdata *drvdat,
@@ -229,7 +249,7 @@ static int asus_report_input(struct asus_drvdata *drvdat, u8 *data, int size)
int i, toolType = MT_TOOL_FINGER;
u8 *contactData = data + 2;
- if (size != 3 + drvdat->tp->contact_size * drvdat->tp->max_contacts)
+ if (size != drvdat->tp->report_size)
return 0;
for (i = 0; i < drvdat->tp->max_contacts; i++) {
@@ -257,6 +277,34 @@ static int asus_report_input(struct asus_drvdata *drvdat, u8 *data, int size)
return 1;
}
+static int asus_e1239t_event(struct asus_drvdata *drvdat, u8 *data, int size)
+{
+ if (size != 3)
+ return 0;
+
+ /* Handle broken mute key which only sends press events */
+ if (!drvdat->tp &&
+ data[0] == 0x02 && data[1] == 0xe2 && data[2] == 0x00) {
+ input_report_key(drvdat->input, KEY_MUTE, 1);
+ input_sync(drvdat->input);
+ input_report_key(drvdat->input, KEY_MUTE, 0);
+ input_sync(drvdat->input);
+ return 1;
+ }
+
+ /* Handle custom touchpad toggle key which only sends press events */
+ if (drvdat->tp_kbd_input &&
+ data[0] == 0x05 && data[1] == 0x02 && data[2] == 0x28) {
+ input_report_key(drvdat->tp_kbd_input, KEY_F21, 1);
+ input_sync(drvdat->tp_kbd_input);
+ input_report_key(drvdat->tp_kbd_input, KEY_F21, 0);
+ input_sync(drvdat->tp_kbd_input);
+ return 1;
+ }
+
+ return 0;
+}
+
static int asus_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
@@ -281,6 +329,9 @@ static int asus_raw_event(struct hid_device *hdev,
if (drvdata->tp && data[0] == INPUT_REPORT_ID)
return asus_report_input(drvdata, data, size);
+ if (drvdata->quirks & QUIRK_MEDION_E1239T)
+ return asus_e1239t_event(drvdata, data, size);
+
return 0;
}
@@ -615,6 +666,21 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
hi->report->id != T100CHI_MOUSE_REPORT_ID)
return 0;
+ /* Handle MULTI_INPUT on E1239T mouse/touchpad USB interface */
+ if (drvdata->tp && (drvdata->quirks & QUIRK_MEDION_E1239T)) {
+ switch (hi->report->id) {
+ case E1239T_TP_TOGGLE_REPORT_ID:
+ input_set_capability(input, EV_KEY, KEY_F21);
+ input->name = "Asus Touchpad Keys";
+ drvdata->tp_kbd_input = input;
+ return 0;
+ case INPUT_REPORT_ID:
+ break; /* Touchpad report, handled below */
+ default:
+ return 0; /* Ignore other reports */
+ }
+ }
+
if (drvdata->tp) {
int ret;
@@ -677,24 +743,16 @@ static int asus_input_mapping(struct hid_device *hdev,
* This avoids a bunch of non-functional hid_input devices getting
* created because of the T100CHI using HID_QUIRK_MULTI_INPUT.
*/
- if (drvdata->quirks & (QUIRK_T100CHI | QUIRK_T90CHI)) {
- if (field->application == (HID_UP_GENDESK | 0x0080) ||
- usage->hid == (HID_UP_GENDEVCTRLS | 0x0024) ||
- usage->hid == (HID_UP_GENDEVCTRLS | 0x0025) ||
- usage->hid == (HID_UP_GENDEVCTRLS | 0x0026))
- return -1;
- /*
- * We use the hid_input for the mouse report for the touchpad,
- * keep the left button, to avoid the core removing it.
- */
- if (field->application == HID_GD_MOUSE &&
- usage->hid != (HID_UP_BUTTON | 1))
- return -1;
- }
+ if ((drvdata->quirks & (QUIRK_T100CHI | QUIRK_T90CHI)) &&
+ (field->application == (HID_UP_GENDESK | 0x0080) ||
+ field->application == HID_GD_MOUSE ||
+ usage->hid == (HID_UP_GENDEVCTRLS | 0x0024) ||
+ usage->hid == (HID_UP_GENDEVCTRLS | 0x0025) ||
+ usage->hid == (HID_UP_GENDEVCTRLS | 0x0026)))
+ return -1;
/* ASUS-specific keyboard hotkeys */
if ((usage->hid & HID_USAGE_PAGE) == 0xff310000) {
- set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
case 0x10: asus_map_key_clear(KEY_BRIGHTNESSDOWN); break;
case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP); break;
@@ -737,11 +795,11 @@ static int asus_input_mapping(struct hid_device *hdev,
if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT)
drvdata->enable_backlight = true;
+ set_bit(EV_REP, hi->input->evbit);
return 1;
}
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR) {
- set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
case 0xff01: asus_map_key_clear(BTN_1); break;
case 0xff02: asus_map_key_clear(BTN_2); break;
@@ -764,6 +822,7 @@ static int asus_input_mapping(struct hid_device *hdev,
return 0;
}
+ set_bit(EV_REP, hi->input->evbit);
return 1;
}
@@ -782,6 +841,16 @@ static int asus_input_mapping(struct hid_device *hdev,
}
}
+ /*
+ * The mute button is broken and only sends press events, we
+ * deal with this in our raw_event handler, so do not map it.
+ */
+ if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
+ usage->hid == (HID_UP_CONSUMER | 0xe2)) {
+ input_set_capability(hi->input, EV_KEY, KEY_MUTE);
+ return -1;
+ }
+
return 0;
}
@@ -849,7 +918,8 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
drvdata->tp = &asus_i2c_tp;
- if (drvdata->quirks & QUIRK_T100_KEYBOARD) {
+ if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
+ hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
@@ -877,6 +947,19 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
drvdata->tp = &asus_t100chi_tp;
}
+ if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
+ hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ struct usb_host_interface *alt =
+ to_usb_interface(hdev->dev.parent)->altsetting;
+
+ if (alt->desc.bInterfaceNumber == MEDION_E1239T_TPAD_INTF) {
+ /* For separate input-devs for tp and tp toggle key */
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ drvdata->quirks |= QUIRK_SKIP_INPUT_MAPPING;
+ drvdata->tp = &medion_e1239t_tp;
+ }
+ }
+
if (drvdata->quirks & QUIRK_NO_INIT_REPORTS)
hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
@@ -1056,7 +1139,8 @@ static const struct hid_device_id asus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI },
-
+ { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE_MEDION_E1239T),
+ QUIRK_MEDION_E1239T },
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 1c71a1aa76b2..874fc3791f3b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -76,12 +76,9 @@
#define USB_VENDOR_ID_ALPS_JP 0x044E
#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
-#define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F
-#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
#define HID_DEVICE_ID_ALPS_U1 0x1215
#define HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY 0x121E
#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
-#define HID_DEVICE_ID_ALPS_1222 0x1222
#define USB_VENDOR_ID_AMI 0x046b
#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
@@ -281,9 +278,6 @@
#define USB_VENDOR_ID_CIDC 0x1677
-#define I2C_VENDOR_ID_CIRQUE 0x0488
-#define I2C_PRODUCT_ID_CIRQUE_121F 0x121F
-
#define USB_VENDOR_ID_CJTOUCH 0x24b8
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
@@ -640,6 +634,7 @@
#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
#define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396
#define USB_DEVICE_ID_ITE8595 0x8595
+#define USB_DEVICE_ID_ITE_MEDION_E1239T 0xce50
#define USB_VENDOR_ID_JABRA 0x0b0e
#define USB_DEVICE_ID_JABRA_SPEAK_410 0x0412
@@ -730,8 +725,6 @@
#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
-#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
-#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
#define USB_VENDOR_ID_LG 0x1fd2
@@ -1157,6 +1150,9 @@
#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882 0x8882
#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883 0x8883
+#define USB_VENDOR_ID_TRUST 0x145f
+#define USB_DEVICE_ID_TRUST_PANORA_TABLET 0x0212
+
#define USB_VENDOR_ID_TURBOX 0x062a
#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
#define USB_DEVICE_ID_ASUS_MD_5110 0x5110
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index ed9b1c1f460d..48dff5d6b605 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * HID driver for Logitech Unifying receivers
+ * HID driver for Logitech receivers
*
* Copyright (c) 2011 Logitech
*/
@@ -701,7 +701,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
type_str, dj_hiddev->product);
} else {
snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
- "Logitech Unifying Device. Wireless PID:%04x",
+ "Logitech Wireless Device PID:%04x",
dj_hiddev->product);
}
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 094f4f1b6555..1e1cf8eae649 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * HIDPP protocol for Logitech Unifying receivers
+ * HIDPP protocol for Logitech receivers
*
* Copyright (c) 2011 Logitech (c)
* Copyright (c) 2012-2013 Google (c)
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index d958475f8c81..e1b93ce32e01 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -15,6 +15,7 @@
#include <linux/hid.h>
#include <linux/hidraw.h>
#include <linux/i2c.h>
+#include <linux/gpio/driver.h>
#include "hid-ids.h"
/* Commands codes in a raw output report */
@@ -27,6 +28,8 @@ enum {
MCP2221_I2C_PARAM_OR_STATUS = 0x10,
MCP2221_I2C_SET_SPEED = 0x20,
MCP2221_I2C_CANCEL = 0x10,
+ MCP2221_GPIO_SET = 0x50,
+ MCP2221_GPIO_GET = 0x51,
};
/* Response codes in a raw input report */
@@ -42,6 +45,8 @@ enum {
MCP2221_I2C_WRADDRL_SEND = 0x21,
MCP2221_I2C_ADDR_NACK = 0x25,
MCP2221_I2C_READ_COMPL = 0x55,
+ MCP2221_ALT_F_NOT_GPIOV = 0xEE,
+ MCP2221_ALT_F_NOT_GPIOD = 0xEF,
};
/*
@@ -59,6 +64,9 @@ struct mcp2221 {
int rxbuf_idx;
int status;
u8 cur_i2c_clk_div;
+ struct gpio_chip *gc;
+ u8 gp_idx;
+ u8 gpio_dir;
};
/*
@@ -526,6 +534,110 @@ static const struct i2c_algorithm mcp_i2c_algo = {
.functionality = mcp_i2c_func,
};
+static int mcp_gpio_get(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ int ret;
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ mcp->txbuf[0] = MCP2221_GPIO_GET;
+
+ mcp->gp_idx = (offset + 1) * 2;
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+ mutex_unlock(&mcp->lock);
+
+ return ret;
+}
+
+static void mcp_gpio_set(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ memset(mcp->txbuf, 0, 18);
+ mcp->txbuf[0] = MCP2221_GPIO_SET;
+
+ mcp->gp_idx = ((offset + 1) * 4) - 1;
+
+ mcp->txbuf[mcp->gp_idx - 1] = 1;
+ mcp->txbuf[mcp->gp_idx] = !!value;
+
+ mutex_lock(&mcp->lock);
+ mcp_send_data_req_status(mcp, mcp->txbuf, 18);
+ mutex_unlock(&mcp->lock);
+}
+
+static int mcp_gpio_dir_set(struct mcp2221 *mcp,
+ unsigned int offset, u8 val)
+{
+ memset(mcp->txbuf, 0, 18);
+ mcp->txbuf[0] = MCP2221_GPIO_SET;
+
+ mcp->gp_idx = (offset + 1) * 5;
+
+ mcp->txbuf[mcp->gp_idx - 1] = 1;
+ mcp->txbuf[mcp->gp_idx] = val;
+
+ return mcp_send_data_req_status(mcp, mcp->txbuf, 18);
+}
+
+static int mcp_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ int ret;
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_gpio_dir_set(mcp, offset, 0);
+ mutex_unlock(&mcp->lock);
+
+ return ret;
+}
+
+static int mcp_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ int ret;
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_gpio_dir_set(mcp, offset, 1);
+ mutex_unlock(&mcp->lock);
+
+ /* Can't configure as output, bailout early */
+ if (ret)
+ return ret;
+
+ mcp_gpio_set(gc, offset, value);
+
+ return 0;
+}
+
+static int mcp_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ int ret;
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ mcp->txbuf[0] = MCP2221_GPIO_GET;
+
+ mcp->gp_idx = (offset + 1) * 2;
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+ mutex_unlock(&mcp->lock);
+
+ if (ret)
+ return ret;
+
+ if (mcp->gpio_dir)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
/* Gives current state of i2c engine inside mcp2221 */
static int mcp_get_i2c_eng_state(struct mcp2221 *mcp,
u8 *data, u8 idx)
@@ -638,6 +750,39 @@ static int mcp2221_raw_event(struct hid_device *hdev,
complete(&mcp->wait_in_report);
break;
+ case MCP2221_GPIO_GET:
+ switch (data[1]) {
+ case MCP2221_SUCCESS:
+ if ((data[mcp->gp_idx] == MCP2221_ALT_F_NOT_GPIOV) ||
+ (data[mcp->gp_idx + 1] == MCP2221_ALT_F_NOT_GPIOD)) {
+ mcp->status = -ENOENT;
+ } else {
+ mcp->status = !!data[mcp->gp_idx];
+ mcp->gpio_dir = !!data[mcp->gp_idx + 1];
+ }
+ break;
+ default:
+ mcp->status = -EAGAIN;
+ }
+ complete(&mcp->wait_in_report);
+ break;
+
+ case MCP2221_GPIO_SET:
+ switch (data[1]) {
+ case MCP2221_SUCCESS:
+ if ((data[mcp->gp_idx] == MCP2221_ALT_F_NOT_GPIOV) ||
+ (data[mcp->gp_idx - 1] == MCP2221_ALT_F_NOT_GPIOV)) {
+ mcp->status = -ENOENT;
+ } else {
+ mcp->status = 0;
+ }
+ break;
+ default:
+ mcp->status = -EAGAIN;
+ }
+ complete(&mcp->wait_in_report);
+ break;
+
default:
mcp->status = -EIO;
complete(&mcp->wait_in_report);
@@ -702,8 +847,32 @@ static int mcp2221_probe(struct hid_device *hdev,
}
i2c_set_adapdata(&mcp->adapter, mcp);
+ /* Setup GPIO chip */
+ mcp->gc = devm_kzalloc(&hdev->dev, sizeof(*mcp->gc), GFP_KERNEL);
+ if (!mcp->gc) {
+ ret = -ENOMEM;
+ goto err_gc;
+ }
+
+ mcp->gc->label = "mcp2221_gpio";
+ mcp->gc->direction_input = mcp_gpio_direction_input;
+ mcp->gc->direction_output = mcp_gpio_direction_output;
+ mcp->gc->get_direction = mcp_gpio_get_direction;
+ mcp->gc->set = mcp_gpio_set;
+ mcp->gc->get = mcp_gpio_get;
+ mcp->gc->ngpio = 4;
+ mcp->gc->base = -1;
+ mcp->gc->can_sleep = 1;
+ mcp->gc->parent = &hdev->dev;
+
+ ret = devm_gpiochip_add_data(&hdev->dev, mcp->gc, mcp);
+ if (ret)
+ goto err_gc;
+
return 0;
+err_gc:
+ i2c_del_adapter(&mcp->adapter);
err_i2c:
hid_hw_close(mcp->hdev);
err_hstop:
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 03c720b47306..3f94b4954225 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -69,6 +69,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_ASUS_CUSTOM_UP BIT(17)
#define MT_QUIRK_WIN8_PTP_BUTTONS BIT(18)
#define MT_QUIRK_SEPARATE_APP_REPORT BIT(19)
+#define MT_QUIRK_FORCE_MULTI_INPUT BIT(20)
#define MT_INPUTMODE_TOUCHSCREEN 0x02
#define MT_INPUTMODE_TOUCHPAD 0x03
@@ -188,7 +189,8 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
/* reserved 0x0011 */
#define MT_CLS_WIN_8 0x0012
#define MT_CLS_EXPORT_ALL_INPUTS 0x0013
-#define MT_CLS_WIN_8_DUAL 0x0014
+/* reserved 0x0014 */
+#define MT_CLS_WIN_8_FORCE_MULTI_INPUT 0x0015
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -272,12 +274,14 @@ static const struct mt_class mt_classes[] = {
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_CONTACT_CNT_ACCURATE,
.export_all_inputs = true },
- { .name = MT_CLS_WIN_8_DUAL,
+ { .name = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_IGNORE_DUPLICATES |
MT_QUIRK_HOVERING |
MT_QUIRK_CONTACT_CNT_ACCURATE |
- MT_QUIRK_WIN8_PTP_BUTTONS,
+ MT_QUIRK_STICKY_FINGERS |
+ MT_QUIRK_WIN8_PTP_BUTTONS |
+ MT_QUIRK_FORCE_MULTI_INPUT,
.export_all_inputs = true },
/*
@@ -754,8 +758,7 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
MT_STORE_FIELD(inrange_state);
return 1;
case HID_DG_CONFIDENCE:
- if ((cls->name == MT_CLS_WIN_8 ||
- cls->name == MT_CLS_WIN_8_DUAL) &&
+ if (cls->name == MT_CLS_WIN_8 &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN))
app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -896,7 +899,7 @@ static void mt_release_pending_palms(struct mt_device *td,
clear_bit(slotnum, app->pending_palm_slots);
input_mt_slot(input, slotnum);
- input_mt_report_slot_state(input, MT_TOOL_PALM, false);
+ input_mt_report_slot_inactive(input);
need_sync = true;
}
@@ -1640,9 +1643,7 @@ static void mt_release_contacts(struct hid_device *hid)
if (mt) {
for (i = 0; i < mt->num_slots; i++) {
input_mt_slot(input_dev, i);
- input_mt_report_slot_state(input_dev,
- MT_TOOL_FINGER,
- false);
+ input_mt_report_slot_inactive(input_dev);
}
input_mt_sync_frame(input_dev);
input_sync(input_dev);
@@ -1714,6 +1715,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ if (mtclass->quirks & MT_QUIRK_FORCE_MULTI_INPUT) {
+ hdev->quirks &= ~HID_QUIRK_INPUT_PER_APP;
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ }
+
timer_setup(&td->release_timer, mt_expired_timeout, 0);
ret = hid_parse(hdev);
@@ -1786,32 +1792,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_3M,
USB_DEVICE_ID_3M3266) },
- /* Alps devices */
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_ALPS_JP,
- HID_DEVICE_ID_ALPS_U1_DUAL_PTP) },
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_ALPS_JP,
- HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_ALPS_JP,
- HID_DEVICE_ID_ALPS_1222) },
-
- /* Lenovo X1 TAB Gen 2 */
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_LENOVO,
- USB_DEVICE_ID_LENOVO_X1_TAB) },
-
- /* Lenovo X1 TAB Gen 3 */
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_LENOVO,
- USB_DEVICE_ID_LENOVO_X1_TAB3) },
-
/* Anton devices */
{ .driver_data = MT_CLS_EXPORT_ALL_INPUTS,
MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
@@ -1846,12 +1826,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
- /* Cirque devices */
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
- I2C_VENDOR_ID_CIRQUE,
- I2C_PRODUCT_ID_CIRQUE_121F) },
-
/* CJTouch panels */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
@@ -1926,6 +1900,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
+ /* Elan devices */
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_ELAN, 0x313a) },
+
/* Elitegroup panel */
{ .driver_data = MT_CLS_SERIAL,
MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
@@ -2056,6 +2035,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
USB_DEVICE_ID_MTP_STM)},
+ /* Synaptics devices */
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xce08) },
+
/* TopSeed panels */
{ .driver_data = MT_CLS_TOPSEED,
MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index e4cb543de0cd..ca8b5c261c7c 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -168,6 +168,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TRUST, USB_DEVICE_ID_TRUST_PANORA_TABLET), HID_QUIRK_MULTI_INPUT | HID_QUIRK_HIDINPUT_FORCE },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 4c6ed6ef31f1..2f073f536070 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -867,6 +867,23 @@ static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
if (sc->quirks & PS3REMOTE)
return ps3remote_fixup(hdev, rdesc, rsize);
+ /*
+ * Some knock-off USB dongles incorrectly report their button count
+ * as 13 instead of 16 causing three non-functional buttons.
+ */
+ if ((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize >= 45 &&
+ /* Report Count (13) */
+ rdesc[23] == 0x95 && rdesc[24] == 0x0D &&
+ /* Usage Maximum (13) */
+ rdesc[37] == 0x29 && rdesc[38] == 0x0D &&
+ /* Report Count (3) */
+ rdesc[43] == 0x95 && rdesc[44] == 0x03) {
+ hid_info(hdev, "Fixing up USB dongle report descriptor\n");
+ rdesc[24] = 0x10;
+ rdesc[38] = 0x10;
+ rdesc[44] = 0x00;
+ }
+
return rdesc;
}
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index a66f08041a1a..ec142bc8c1da 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -389,6 +389,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
+ {
+ .ident = "Schneider SCL142ALM",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SCHNEIDER"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SCL142ALM"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
{ } /* Terminate list */
};
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index aa2dbed30fc3..6cf59fd26ad7 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -480,6 +480,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data,
sizeof(ldr_xfer_query_resp));
if (rv < 0) {
client_data->flag_retry = true;
+ *fw_info = (struct shim_fw_info){};
return rv;
}
@@ -489,6 +490,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data,
"data size %d is not equal to size of loader_xfer_query_response %zu\n",
rv, sizeof(struct loader_xfer_query_response));
client_data->flag_retry = true;
+ *fw_info = (struct shim_fw_info){};
return -EMSGSIZE;
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 23f358cb7f49..90070b337c10 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -290,6 +290,34 @@ int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
/*
+ * Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
+ *
+ * CHANNELMSG_MODIFYCHANNEL messages are aynchronous. Also, Hyper-V does not
+ * ACK such messages. IOW we can't know when the host will stop interrupting
+ * the "old" vCPU and start interrupting the "new" vCPU for the given channel.
+ *
+ * The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
+ * VERSION_WIN10_V4_1.
+ */
+int vmbus_send_modifychannel(u32 child_relid, u32 target_vp)
+{
+ struct vmbus_channel_modifychannel conn_msg;
+ int ret;
+
+ memset(&conn_msg, 0, sizeof(conn_msg));
+ conn_msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
+ conn_msg.child_relid = child_relid;
+ conn_msg.target_vp = target_vp;
+
+ ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
+
+ trace_vmbus_send_modifychannel(&conn_msg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
+
+/*
* create_gpadl_header - Creates a gpadl for the specified buffer
*/
static int create_gpadl_header(void *kbuffer, u32 size,
@@ -594,35 +622,31 @@ post_msg_err:
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
-static void reset_channel_cb(void *arg)
-{
- struct vmbus_channel *channel = arg;
-
- channel->onchannel_callback = NULL;
-}
-
void vmbus_reset_channel_cb(struct vmbus_channel *channel)
{
+ unsigned long flags;
+
/*
* vmbus_on_event(), running in the per-channel tasklet, can race
* with vmbus_close_internal() in the case of SMP guest, e.g., when
* the former is accessing channel->inbound.ring_buffer, the latter
* could be freeing the ring_buffer pages, so here we must stop it
* first.
+ *
+ * vmbus_chan_sched() might call the netvsc driver callback function
+ * that ends up scheduling NAPI work that accesses the ring buffer.
+ * At this point, we have to ensure that any such work is completed
+ * and that the channel ring buffer is no longer being accessed, cf.
+ * the calls to napi_disable() in netvsc_device_remove().
*/
tasklet_disable(&channel->callback_event);
- channel->sc_creation_callback = NULL;
+ /* See the inline comments in vmbus_chan_sched(). */
+ spin_lock_irqsave(&channel->sched_lock, flags);
+ channel->onchannel_callback = NULL;
+ spin_unlock_irqrestore(&channel->sched_lock, flags);
- /* Stop the callback asap */
- if (channel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(channel->target_cpu, reset_channel_cb,
- channel, true);
- } else {
- reset_channel_cb(channel);
- put_cpu();
- }
+ channel->sc_creation_callback = NULL;
/* Re-enable tasklet for use on re-open */
tasklet_enable(&channel->callback_event);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 501c43c5851d..417a95e5094d 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -18,14 +18,15 @@
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/cpu.h>
#include <linux/hyperv.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
-static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
+static void init_vp_index(struct vmbus_channel *channel);
-static const struct vmbus_device vmbus_devs[] = {
+const struct vmbus_device vmbus_devs[] = {
/* IDE */
{ .dev_type = HV_IDE,
HV_IDE_GUID,
@@ -315,11 +316,11 @@ static struct vmbus_channel *alloc_channel(void)
if (!channel)
return NULL;
+ spin_lock_init(&channel->sched_lock);
spin_lock_init(&channel->lock);
init_completion(&channel->rescind_event);
INIT_LIST_HEAD(&channel->sc_list);
- INIT_LIST_HEAD(&channel->percpu_list);
tasklet_init(&channel->callback_event,
vmbus_on_event, (unsigned long)channel);
@@ -340,23 +341,49 @@ static void free_channel(struct vmbus_channel *channel)
kobject_put(&channel->kobj);
}
-static void percpu_channel_enq(void *arg)
+void vmbus_channel_map_relid(struct vmbus_channel *channel)
{
- struct vmbus_channel *channel = arg;
- struct hv_per_cpu_context *hv_cpu
- = this_cpu_ptr(hv_context.cpu_context);
-
- list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
+ if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
+ return;
+ /*
+ * The mapping of the channel's relid is visible from the CPUs that
+ * execute vmbus_chan_sched() by the time that vmbus_chan_sched() will
+ * execute:
+ *
+ * (a) In the "normal (i.e., not resuming from hibernation)" path,
+ * the full barrier in smp_store_mb() guarantees that the store
+ * is propagated to all CPUs before the add_channel_work work
+ * is queued. In turn, add_channel_work is queued before the
+ * channel's ring buffer is allocated/initialized and the
+ * OPENCHANNEL message for the channel is sent in vmbus_open().
+ * Hyper-V won't start sending the interrupts for the channel
+ * before the OPENCHANNEL message is acked. The memory barrier
+ * in vmbus_chan_sched() -> sync_test_and_clear_bit() ensures
+ * that vmbus_chan_sched() must find the channel's relid in
+ * recv_int_page before retrieving the channel pointer from the
+ * array of channels.
+ *
+ * (b) In the "resuming from hibernation" path, the smp_store_mb()
+ * guarantees that the store is propagated to all CPUs before
+ * the VMBus connection is marked as ready for the resume event
+ * (cf. check_ready_for_resume_event()). The interrupt handler
+ * of the VMBus driver and vmbus_chan_sched() can not run before
+ * vmbus_bus_resume() has completed execution (cf. resume_noirq).
+ */
+ smp_store_mb(
+ vmbus_connection.channels[channel->offermsg.child_relid],
+ channel);
}
-static void percpu_channel_deq(void *arg)
+void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
{
- struct vmbus_channel *channel = arg;
-
- list_del_rcu(&channel->percpu_list);
+ if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
+ return;
+ WRITE_ONCE(
+ vmbus_connection.channels[channel->offermsg.child_relid],
+ NULL);
}
-
static void vmbus_release_relid(u32 relid)
{
struct vmbus_channel_relid_released msg;
@@ -373,39 +400,43 @@ static void vmbus_release_relid(u32 relid)
void hv_process_channel_removal(struct vmbus_channel *channel)
{
- struct vmbus_channel *primary_channel;
unsigned long flags;
- BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
BUG_ON(!channel->rescind);
- if (channel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(channel->target_cpu,
- percpu_channel_deq, channel, true);
- } else {
- percpu_channel_deq(channel);
- put_cpu();
- }
+ /*
+ * hv_process_channel_removal() could find INVALID_RELID only for
+ * hv_sock channels. See the inline comments in vmbus_onoffer().
+ */
+ WARN_ON(channel->offermsg.child_relid == INVALID_RELID &&
+ !is_hvsock_channel(channel));
+
+ /*
+ * Upon suspend, an in-use hv_sock channel is removed from the array of
+ * channels and the relid is invalidated. After hibernation, when the
+ * user-space appplication destroys the channel, it's unnecessary and
+ * unsafe to remove the channel from the array of channels. See also
+ * the inline comments before the call of vmbus_release_relid() below.
+ */
+ if (channel->offermsg.child_relid != INVALID_RELID)
+ vmbus_channel_unmap_relid(channel);
if (channel->primary_channel == NULL) {
list_del(&channel->listentry);
-
- primary_channel = channel;
} else {
- primary_channel = channel->primary_channel;
+ struct vmbus_channel *primary_channel = channel->primary_channel;
spin_lock_irqsave(&primary_channel->lock, flags);
list_del(&channel->sc_list);
spin_unlock_irqrestore(&primary_channel->lock, flags);
}
/*
- * We need to free the bit for init_vp_index() to work in the case
- * of sub-channel, when we reload drivers like hv_netvsc.
+ * If this is a "perf" channel, updates the hv_numa_map[] masks so that
+ * init_vp_index() can (re-)use the CPU.
*/
- if (channel->affinity_policy == HV_LOCALIZED)
- cpumask_clear_cpu(channel->target_cpu,
- &primary_channel->alloced_cpus_in_node);
+ if (hv_is_perf_channel(channel))
+ hv_clear_alloced_cpu(channel->target_cpu);
/*
* Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
@@ -440,23 +471,8 @@ static void vmbus_add_channel_work(struct work_struct *work)
container_of(work, struct vmbus_channel, add_channel_work);
struct vmbus_channel *primary_channel = newchannel->primary_channel;
unsigned long flags;
- u16 dev_type;
int ret;
- dev_type = hv_get_dev_type(newchannel);
-
- init_vp_index(newchannel, dev_type);
-
- if (newchannel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(newchannel->target_cpu,
- percpu_channel_enq,
- newchannel, true);
- } else {
- percpu_channel_enq(newchannel);
- put_cpu();
- }
-
/*
* This state is used to indicate a successful open
* so that when we do close the channel normally, we
@@ -488,7 +504,7 @@ static void vmbus_add_channel_work(struct work_struct *work)
if (!newchannel->device_obj)
goto err_deq_chan;
- newchannel->device_obj->device_id = dev_type;
+ newchannel->device_obj->device_id = newchannel->device_id;
/*
* Add the new device to the bus. This will kick off device-driver
* binding which eventually invokes the device driver's AddDevice()
@@ -523,17 +539,10 @@ err_deq_chan:
spin_unlock_irqrestore(&primary_channel->lock, flags);
}
- mutex_unlock(&vmbus_connection.channel_mutex);
+ /* vmbus_process_offer() has mapped the channel. */
+ vmbus_channel_unmap_relid(newchannel);
- if (newchannel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(newchannel->target_cpu,
- percpu_channel_deq,
- newchannel, true);
- } else {
- percpu_channel_deq(newchannel);
- put_cpu();
- }
+ mutex_unlock(&vmbus_connection.channel_mutex);
vmbus_release_relid(newchannel->offermsg.child_relid);
@@ -551,8 +560,35 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
unsigned long flags;
bool fnew = true;
+ /*
+ * Synchronize vmbus_process_offer() and CPU hotplugging:
+ *
+ * CPU1 CPU2
+ *
+ * [vmbus_process_offer()] [Hot removal of the CPU]
+ *
+ * CPU_READ_LOCK CPUS_WRITE_LOCK
+ * LOAD cpu_online_mask SEARCH chn_list
+ * STORE target_cpu LOAD target_cpu
+ * INSERT chn_list STORE cpu_online_mask
+ * CPUS_READ_UNLOCK CPUS_WRITE_UNLOCK
+ *
+ * Forbids: CPU1's LOAD from *not* seing CPU2's STORE &&
+ * CPU2's SEARCH from *not* seeing CPU1's INSERT
+ *
+ * Forbids: CPU2's SEARCH from seeing CPU1's INSERT &&
+ * CPU2's LOAD from *not* seing CPU1's STORE
+ */
+ cpus_read_lock();
+
+ /*
+ * Serializes the modifications of the chn_list list as well as
+ * the accesses to next_numa_node_id in init_vp_index().
+ */
mutex_lock(&vmbus_connection.channel_mutex);
+ init_vp_index(newchannel);
+
/* Remember the channels that should be cleaned up upon suspend. */
if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
@@ -599,7 +635,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
spin_unlock_irqrestore(&channel->lock, flags);
}
+ vmbus_channel_map_relid(newchannel);
+
mutex_unlock(&vmbus_connection.channel_mutex);
+ cpus_read_unlock();
/*
* vmbus_process_offer() mustn't call channel->sc_creation_callback()
@@ -632,73 +671,61 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
* We use this state to statically distribute the channel interrupt load.
*/
static int next_numa_node_id;
-/*
- * init_vp_index() accesses global variables like next_numa_node_id, and
- * it can run concurrently for primary channels and sub-channels: see
- * vmbus_process_offer(), so we need the lock to protect the global
- * variables.
- */
-static DEFINE_SPINLOCK(bind_channel_to_cpu_lock);
/*
* Starting with Win8, we can statically distribute the incoming
* channel interrupt load by binding a channel to VCPU.
- * We distribute the interrupt loads to one or more NUMA nodes based on
- * the channel's affinity_policy.
*
* For pre-win8 hosts or non-performance critical channels we assign the
- * first CPU in the first NUMA node.
+ * VMBUS_CONNECT_CPU.
+ *
+ * Starting with win8, performance critical channels will be distributed
+ * evenly among all the available NUMA nodes. Once the node is assigned,
+ * we will assign the CPU based on a simple round robin scheme.
*/
-static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
+static void init_vp_index(struct vmbus_channel *channel)
{
- u32 cur_cpu;
- bool perf_chn = vmbus_devs[dev_type].perf_device;
- struct vmbus_channel *primary = channel->primary_channel;
- int next_node;
+ bool perf_chn = hv_is_perf_channel(channel);
cpumask_var_t available_mask;
struct cpumask *alloced_mask;
+ u32 target_cpu;
+ int numa_node;
if ((vmbus_proto_version == VERSION_WS2008) ||
(vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
!alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
/*
* Prior to win8, all channel interrupts are
- * delivered on cpu 0.
+ * delivered on VMBUS_CONNECT_CPU.
* Also if the channel is not a performance critical
- * channel, bind it to cpu 0.
- * In case alloc_cpumask_var() fails, bind it to cpu 0.
+ * channel, bind it to VMBUS_CONNECT_CPU.
+ * In case alloc_cpumask_var() fails, bind it to
+ * VMBUS_CONNECT_CPU.
*/
- channel->numa_node = 0;
- channel->target_cpu = 0;
- channel->target_vp = hv_cpu_number_to_vp_number(0);
+ channel->numa_node = cpu_to_node(VMBUS_CONNECT_CPU);
+ channel->target_cpu = VMBUS_CONNECT_CPU;
+ channel->target_vp =
+ hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
+ if (perf_chn)
+ hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
return;
}
- spin_lock(&bind_channel_to_cpu_lock);
-
- /*
- * Based on the channel affinity policy, we will assign the NUMA
- * nodes.
- */
-
- if ((channel->affinity_policy == HV_BALANCED) || (!primary)) {
- while (true) {
- next_node = next_numa_node_id++;
- if (next_node == nr_node_ids) {
- next_node = next_numa_node_id = 0;
- continue;
- }
- if (cpumask_empty(cpumask_of_node(next_node)))
- continue;
- break;
+ while (true) {
+ numa_node = next_numa_node_id++;
+ if (numa_node == nr_node_ids) {
+ next_numa_node_id = 0;
+ continue;
}
- channel->numa_node = next_node;
- primary = channel;
+ if (cpumask_empty(cpumask_of_node(numa_node)))
+ continue;
+ break;
}
- alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
+ channel->numa_node = numa_node;
+ alloced_mask = &hv_context.hv_numa_map[numa_node];
if (cpumask_weight(alloced_mask) ==
- cpumask_weight(cpumask_of_node(primary->numa_node))) {
+ cpumask_weight(cpumask_of_node(numa_node))) {
/*
* We have cycled through all the CPUs in the node;
* reset the alloced map.
@@ -706,59 +733,13 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
cpumask_clear(alloced_mask);
}
- cpumask_xor(available_mask, alloced_mask,
- cpumask_of_node(primary->numa_node));
-
- cur_cpu = -1;
-
- if (primary->affinity_policy == HV_LOCALIZED) {
- /*
- * Normally Hyper-V host doesn't create more subchannels
- * than there are VCPUs on the node but it is possible when not
- * all present VCPUs on the node are initialized by guest.
- * Clear the alloced_cpus_in_node to start over.
- */
- if (cpumask_equal(&primary->alloced_cpus_in_node,
- cpumask_of_node(primary->numa_node)))
- cpumask_clear(&primary->alloced_cpus_in_node);
- }
-
- while (true) {
- cur_cpu = cpumask_next(cur_cpu, available_mask);
- if (cur_cpu >= nr_cpu_ids) {
- cur_cpu = -1;
- cpumask_copy(available_mask,
- cpumask_of_node(primary->numa_node));
- continue;
- }
-
- if (primary->affinity_policy == HV_LOCALIZED) {
- /*
- * NOTE: in the case of sub-channel, we clear the
- * sub-channel related bit(s) in
- * primary->alloced_cpus_in_node in
- * hv_process_channel_removal(), so when we
- * reload drivers like hv_netvsc in SMP guest, here
- * we're able to re-allocate
- * bit from primary->alloced_cpus_in_node.
- */
- if (!cpumask_test_cpu(cur_cpu,
- &primary->alloced_cpus_in_node)) {
- cpumask_set_cpu(cur_cpu,
- &primary->alloced_cpus_in_node);
- cpumask_set_cpu(cur_cpu, alloced_mask);
- break;
- }
- } else {
- cpumask_set_cpu(cur_cpu, alloced_mask);
- break;
- }
- }
+ cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));
- channel->target_cpu = cur_cpu;
- channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
+ target_cpu = cpumask_first(available_mask);
+ cpumask_set_cpu(target_cpu, alloced_mask);
- spin_unlock(&bind_channel_to_cpu_lock);
+ channel->target_cpu = target_cpu;
+ channel->target_vp = hv_cpu_number_to_vp_number(target_cpu);
free_cpumask_var(available_mask);
}
@@ -890,6 +871,7 @@ static void vmbus_setup_channel_state(struct vmbus_channel *channel,
sizeof(struct vmbus_channel_offer_channel));
channel->monitor_grp = (u8)offer->monitorid / 32;
channel->monitor_bit = (u8)offer->monitorid % 32;
+ channel->device_id = hv_get_dev_type(channel);
}
/*
@@ -940,8 +922,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
oldchannel = find_primary_channel_by_offer(offer);
if (oldchannel != NULL) {
- atomic_dec(&vmbus_connection.offer_in_progress);
-
/*
* We're resuming from hibernation: all the sub-channel and
* hv_sock channels we had before the hibernation should have
@@ -949,36 +929,65 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
* primary channel that we had before the hibernation.
*/
+ /*
+ * { Initially: channel relid = INVALID_RELID,
+ * channels[valid_relid] = NULL }
+ *
+ * CPU1 CPU2
+ *
+ * [vmbus_onoffer()] [vmbus_device_release()]
+ *
+ * LOCK channel_mutex LOCK channel_mutex
+ * STORE channel relid = valid_relid LOAD r1 = channel relid
+ * MAP_RELID channel if (r1 != INVALID_RELID)
+ * UNLOCK channel_mutex UNMAP_RELID channel
+ * UNLOCK channel_mutex
+ *
+ * Forbids: r1 == valid_relid &&
+ * channels[valid_relid] == channel
+ *
+ * Note. r1 can be INVALID_RELID only for an hv_sock channel.
+ * None of the hv_sock channels which were present before the
+ * suspend are re-offered upon the resume. See the WARN_ON()
+ * in hv_process_channel_removal().
+ */
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ atomic_dec(&vmbus_connection.offer_in_progress);
+
WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
/* Fix up the relid. */
oldchannel->offermsg.child_relid = offer->child_relid;
offer_sz = sizeof(*offer);
- if (memcmp(offer, &oldchannel->offermsg, offer_sz) == 0) {
- check_ready_for_resume_event();
- return;
+ if (memcmp(offer, &oldchannel->offermsg, offer_sz) != 0) {
+ /*
+ * This is not an error, since the host can also change
+ * the other field(s) of the offer, e.g. on WS RS5
+ * (Build 17763), the offer->connection_id of the
+ * Mellanox VF vmbus device can change when the host
+ * reoffers the device upon resume.
+ */
+ pr_debug("vmbus offer changed: relid=%d\n",
+ offer->child_relid);
+
+ print_hex_dump_debug("Old vmbus offer: ",
+ DUMP_PREFIX_OFFSET, 16, 4,
+ &oldchannel->offermsg, offer_sz,
+ false);
+ print_hex_dump_debug("New vmbus offer: ",
+ DUMP_PREFIX_OFFSET, 16, 4,
+ offer, offer_sz, false);
+
+ /* Fix up the old channel. */
+ vmbus_setup_channel_state(oldchannel, offer);
}
- /*
- * This is not an error, since the host can also change the
- * other field(s) of the offer, e.g. on WS RS5 (Build 17763),
- * the offer->connection_id of the Mellanox VF vmbus device
- * can change when the host reoffers the device upon resume.
- */
- pr_debug("vmbus offer changed: relid=%d\n",
- offer->child_relid);
-
- print_hex_dump_debug("Old vmbus offer: ", DUMP_PREFIX_OFFSET,
- 16, 4, &oldchannel->offermsg, offer_sz,
- false);
- print_hex_dump_debug("New vmbus offer: ", DUMP_PREFIX_OFFSET,
- 16, 4, offer, offer_sz, false);
-
- /* Fix up the old channel. */
- vmbus_setup_channel_state(oldchannel, offer);
-
+ /* Add the channel back to the array of channels. */
+ vmbus_channel_map_relid(oldchannel);
check_ready_for_resume_event();
+ mutex_unlock(&vmbus_connection.channel_mutex);
return;
}
@@ -1028,11 +1037,22 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
* offer comes in first and then the rescind.
* Since we process these events in work elements,
* and with preemption, we may end up processing
- * the events out of order. Given that we handle these
- * work elements on the same CPU, this is possible only
- * in the case of preemption. In any case wait here
- * until the offer processing has moved beyond the
- * point where the channel is discoverable.
+ * the events out of order. We rely on the synchronization
+ * provided by offer_in_progress and by channel_mutex for
+ * ordering these events:
+ *
+ * { Initially: offer_in_progress = 1 }
+ *
+ * CPU1 CPU2
+ *
+ * [vmbus_onoffer()] [vmbus_onoffer_rescind()]
+ *
+ * LOCK channel_mutex WAIT_ON offer_in_progress == 0
+ * DECREMENT offer_in_progress LOCK channel_mutex
+ * STORE channels[] LOAD channels[]
+ * UNLOCK channel_mutex UNLOCK channel_mutex
+ *
+ * Forbids: CPU2's LOAD from *not* seeing CPU1's STORE
*/
while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
@@ -1332,30 +1352,36 @@ static void vmbus_onversion_response(
/* Channel message dispatch table */
const struct vmbus_channel_message_table_entry
channel_message_table[CHANNELMSG_COUNT] = {
- { CHANNELMSG_INVALID, 0, NULL },
- { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer },
- { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind },
- { CHANNELMSG_REQUESTOFFERS, 0, NULL },
- { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered },
- { CHANNELMSG_OPENCHANNEL, 0, NULL },
- { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result },
- { CHANNELMSG_CLOSECHANNEL, 0, NULL },
- { CHANNELMSG_GPADL_HEADER, 0, NULL },
- { CHANNELMSG_GPADL_BODY, 0, NULL },
- { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created },
- { CHANNELMSG_GPADL_TEARDOWN, 0, NULL },
- { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown },
- { CHANNELMSG_RELID_RELEASED, 0, NULL },
- { CHANNELMSG_INITIATE_CONTACT, 0, NULL },
- { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response },
- { CHANNELMSG_UNLOAD, 0, NULL },
- { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response },
- { CHANNELMSG_18, 0, NULL },
- { CHANNELMSG_19, 0, NULL },
- { CHANNELMSG_20, 0, NULL },
- { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL },
- { CHANNELMSG_22, 0, NULL },
- { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL },
+ { CHANNELMSG_INVALID, 0, NULL, 0},
+ { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer,
+ sizeof(struct vmbus_channel_offer_channel)},
+ { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind,
+ sizeof(struct vmbus_channel_rescind_offer) },
+ { CHANNELMSG_REQUESTOFFERS, 0, NULL, 0},
+ { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered, 0},
+ { CHANNELMSG_OPENCHANNEL, 0, NULL, 0},
+ { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result,
+ sizeof(struct vmbus_channel_open_result)},
+ { CHANNELMSG_CLOSECHANNEL, 0, NULL, 0},
+ { CHANNELMSG_GPADL_HEADER, 0, NULL, 0},
+ { CHANNELMSG_GPADL_BODY, 0, NULL, 0},
+ { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created,
+ sizeof(struct vmbus_channel_gpadl_created)},
+ { CHANNELMSG_GPADL_TEARDOWN, 0, NULL, 0},
+ { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown,
+ sizeof(struct vmbus_channel_gpadl_torndown) },
+ { CHANNELMSG_RELID_RELEASED, 0, NULL, 0},
+ { CHANNELMSG_INITIATE_CONTACT, 0, NULL, 0},
+ { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response,
+ sizeof(struct vmbus_channel_version_response)},
+ { CHANNELMSG_UNLOAD, 0, NULL, 0},
+ { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response, 0},
+ { CHANNELMSG_18, 0, NULL, 0},
+ { CHANNELMSG_19, 0, NULL, 0},
+ { CHANNELMSG_20, 0, NULL, 0},
+ { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL, 0},
+ { CHANNELMSG_MODIFYCHANNEL, 0, NULL, 0},
+ { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL, 0},
};
/*
@@ -1363,13 +1389,8 @@ channel_message_table[CHANNELMSG_COUNT] = {
*
* This is invoked in the vmbus worker thread context.
*/
-void vmbus_onmessage(void *context)
+void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
{
- struct hv_message *msg = context;
- struct vmbus_channel_message_header *hdr;
-
- hdr = (struct vmbus_channel_message_header *)msg->u.payload;
-
trace_vmbus_on_message(hdr);
/*
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 74e77de89b4f..11170d9a2e1a 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -69,7 +69,6 @@ MODULE_PARM_DESC(max_version,
int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
{
int ret = 0;
- unsigned int cur_cpu;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
@@ -102,24 +101,7 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
- /*
- * We want all channel messages to be delivered on CPU 0.
- * This has been the behavior pre-win8. This is not
- * perf issue and having all channel messages delivered on CPU 0
- * would be ok.
- * For post win8 hosts, we support receiving channel messagges on
- * all the CPUs. This is needed for kexec to work correctly where
- * the CPU attempting to connect may not be CPU 0.
- */
- if (version >= VERSION_WIN8_1) {
- cur_cpu = get_cpu();
- msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
- vmbus_connection.connect_cpu = cur_cpu;
- put_cpu();
- } else {
- msg->target_vcpu = 0;
- vmbus_connection.connect_cpu = 0;
- }
+ msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
/*
* Add to list before we send the request since we may
@@ -266,6 +248,14 @@ int vmbus_connect(void)
pr_info("Vmbus version:%d.%d\n",
version >> 16, version & 0xFFFF);
+ vmbus_connection.channels = kcalloc(MAX_CHANNEL_RELIDS,
+ sizeof(struct vmbus_channel *),
+ GFP_KERNEL);
+ if (vmbus_connection.channels == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
kfree(msginfo);
return 0;
@@ -313,33 +303,9 @@ void vmbus_disconnect(void)
*/
struct vmbus_channel *relid2channel(u32 relid)
{
- struct vmbus_channel *channel;
- struct vmbus_channel *found_channel = NULL;
- struct list_head *cur, *tmp;
- struct vmbus_channel *cur_sc;
-
- BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
-
- list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
- if (channel->offermsg.child_relid == relid) {
- found_channel = channel;
- break;
- } else if (!list_empty(&channel->sc_list)) {
- /*
- * Deal with sub-channels.
- */
- list_for_each_safe(cur, tmp, &channel->sc_list) {
- cur_sc = list_entry(cur, struct vmbus_channel,
- sc_list);
- if (cur_sc->offermsg.child_relid == relid) {
- found_channel = cur_sc;
- break;
- }
- }
- }
- }
-
- return found_channel;
+ if (WARN_ON(relid >= MAX_CHANNEL_RELIDS))
+ return NULL;
+ return READ_ONCE(vmbus_connection.channels[relid]);
}
/*
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 533c8b82b344..857290dcfd95 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -117,8 +117,6 @@ int hv_synic_alloc(void)
pr_err("Unable to allocate post msg page\n");
goto err;
}
-
- INIT_LIST_HEAD(&hv_cpu->chan_list);
}
return 0;
@@ -246,10 +244,18 @@ int hv_synic_cleanup(unsigned int cpu)
unsigned long flags;
/*
+ * Hyper-V does not provide a way to change the connect CPU once
+ * it is set; we must prevent the connect CPU from going offline.
+ */
+ if (cpu == VMBUS_CONNECT_CPU)
+ return -EBUSY;
+
+ /*
* Search for channels which are bound to the CPU we're about to
- * cleanup. In case we find one and vmbus is still connected we need to
- * fail, this will effectively prevent CPU offlining. There is no way
- * we can re-bind channels to different CPUs for now.
+ * cleanup. In case we find one and vmbus is still connected, we
+ * fail; this will effectively prevent CPU offlining.
+ *
+ * TODO: Re-bind the channels to different CPUs.
*/
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index bb9ba3f7c794..5040d7e0cd9e 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -71,7 +71,7 @@ static void fcopy_poll_wrapper(void *channel)
{
/* Transaction is finished, reset the state here to avoid races. */
fcopy_transaction.state = HVUTIL_READY;
- hv_fcopy_onchannelcallback(channel);
+ tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
}
static void fcopy_timeout_func(struct work_struct *dummy)
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 1c75b38f0d6d..783779e4cc1a 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -80,7 +80,7 @@ static void vss_poll_wrapper(void *channel)
{
/* Transaction is finished, reset the state here to avoid races. */
vss_transaction.state = HVUTIL_READY;
- hv_vss_onchannelcallback(channel);
+ tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
}
/*
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
index f9d14db980cb..6063bb21bb13 100644
--- a/drivers/hv/hv_trace.h
+++ b/drivers/hv/hv_trace.h
@@ -44,10 +44,8 @@ TRACE_EVENT(vmbus_onoffer,
__entry->monitorid = offer->monitorid;
__entry->is_ddc_int = offer->is_dedicated_interrupt;
__entry->connection_id = offer->connection_id;
- memcpy(__entry->if_type,
- &offer->offer.if_type.b, 16);
- memcpy(__entry->if_instance,
- &offer->offer.if_instance.b, 16);
+ export_guid(__entry->if_type, &offer->offer.if_type);
+ export_guid(__entry->if_instance, &offer->offer.if_instance);
__entry->chn_flags = offer->offer.chn_flags;
__entry->mmio_mb = offer->offer.mmio_megabytes;
__entry->sub_idx = offer->offer.sub_channel_index;
@@ -296,6 +294,25 @@ TRACE_EVENT(vmbus_send_tl_connect_request,
)
);
+TRACE_EVENT(vmbus_send_modifychannel,
+ TP_PROTO(const struct vmbus_channel_modifychannel *msg,
+ int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, target_vp)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->target_vp = msg->target_vp;
+ __entry->ret = ret;
+ ),
+ TP_printk("binding child_relid 0x%x to target_vp 0x%x, ret %d",
+ __entry->child_relid, __entry->target_vp, __entry->ret
+ )
+ );
+
DECLARE_EVENT_CLASS(vmbus_channel,
TP_PROTO(const struct vmbus_channel *channel),
TP_ARGS(channel),
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 70b30e223a57..40e2b9f91163 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -132,12 +132,6 @@ struct hv_per_cpu_context {
* basis.
*/
struct tasklet_struct msg_dpc;
-
- /*
- * To optimize the mapping of relid to channel, maintain
- * per-cpu list of the channels based on their CPU affinity.
- */
- struct list_head chan_list;
};
struct hv_context {
@@ -202,6 +196,8 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
/* TODO: Need to make this configurable */
#define MAX_NUM_CHANNELS_SUPPORTED 256
+#define MAX_CHANNEL_RELIDS \
+ max(MAX_NUM_CHANNELS_SUPPORTED, HV_EVENT_FLAGS_COUNT)
enum vmbus_connect_state {
DISCONNECTED,
@@ -212,12 +208,13 @@ enum vmbus_connect_state {
#define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT
-struct vmbus_connection {
- /*
- * CPU on which the initial host contact was made.
- */
- int connect_cpu;
+/*
+ * The CPU that Hyper-V will interrupt for VMBUS messages, such as
+ * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER.
+ */
+#define VMBUS_CONNECT_CPU 0
+struct vmbus_connection {
u32 msg_conn_id;
atomic_t offer_in_progress;
@@ -250,6 +247,9 @@ struct vmbus_connection {
struct list_head chn_list;
struct mutex channel_mutex;
+ /* Array of channels */
+ struct vmbus_channel **channels;
+
/*
* An offer message is handled first on the work_queue, and then
* is further handled on handle_primary_chan_wq or
@@ -317,6 +317,7 @@ struct vmbus_channel_message_table_entry {
enum vmbus_channel_message_type message_type;
enum vmbus_message_handler_type handler_type;
void (*message_handler)(struct vmbus_channel_message_header *msg);
+ u32 min_payload_len;
};
extern const struct vmbus_channel_message_table_entry
@@ -336,6 +337,9 @@ int vmbus_add_channel_kobj(struct hv_device *device_obj,
void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
+void vmbus_channel_map_relid(struct vmbus_channel *channel);
+void vmbus_channel_unmap_relid(struct vmbus_channel *channel);
+
struct vmbus_channel *relid2channel(u32 relid);
void vmbus_free_channels(void);
@@ -374,12 +378,7 @@ static inline void hv_poll_channel(struct vmbus_channel *channel,
{
if (!channel)
return;
-
- if (in_interrupt() && (channel->target_cpu == smp_processor_id())) {
- cb(channel);
- return;
- }
- smp_call_function_single(channel->target_cpu, cb, channel, true);
+ cb(channel);
}
enum hvutil_device_state {
@@ -396,6 +395,54 @@ enum delay {
MESSAGE_DELAY = 1,
};
+extern const struct vmbus_device vmbus_devs[];
+
+static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
+{
+ return vmbus_devs[channel->device_id].perf_device;
+}
+
+static inline bool hv_is_alloced_cpu(unsigned int cpu)
+{
+ struct vmbus_channel *channel, *sc;
+
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
+ /*
+ * List additions/deletions as well as updates of the target CPUs are
+ * protected by channel_mutex.
+ */
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (!hv_is_perf_channel(channel))
+ continue;
+ if (channel->target_cpu == cpu)
+ return true;
+ list_for_each_entry(sc, &channel->sc_list, sc_list) {
+ if (sc->target_cpu == cpu)
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline void hv_set_alloced_cpu(unsigned int cpu)
+{
+ cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
+}
+
+static inline void hv_clear_alloced_cpu(unsigned int cpu)
+{
+ if (hv_is_alloced_cpu(cpu))
+ return;
+ cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
+}
+
+static inline void hv_update_alloced_cpus(unsigned int old_cpu,
+ unsigned int new_cpu)
+{
+ hv_set_alloced_cpu(new_cpu);
+ hv_clear_alloced_cpu(old_cpu);
+}
+
#ifdef CONFIG_HYPERV_TESTING
int hv_debug_add_dev_dir(struct hv_device *dev);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index e06c6b9555cf..9147ee9d5f7d 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -117,14 +117,6 @@ static int vmbus_exists(void)
return 0;
}
-#define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
-static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
-{
- int i;
- for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
- sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
-}
-
static u8 channel_monitor_group(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid / 32;
@@ -201,7 +193,7 @@ static ssize_t class_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "{%pUl}\n",
- hv_dev->channel->offermsg.offer.if_type.b);
+ &hv_dev->channel->offermsg.offer.if_type);
}
static DEVICE_ATTR_RO(class_id);
@@ -213,7 +205,7 @@ static ssize_t device_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
return sprintf(buf, "{%pUl}\n",
- hv_dev->channel->offermsg.offer.if_instance.b);
+ &hv_dev->channel->offermsg.offer.if_instance);
}
static DEVICE_ATTR_RO(device_id);
@@ -221,10 +213,8 @@ static ssize_t modalias_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- char alias_name[VMBUS_ALIAS_LEN + 1];
- print_alias_name(hv_dev, alias_name);
- return sprintf(buf, "vmbus:%s\n", alias_name);
+ return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
}
static DEVICE_ATTR_RO(modalias);
@@ -693,12 +683,9 @@ __ATTRIBUTE_GROUPS(vmbus_dev);
static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
{
struct hv_device *dev = device_to_hv_device(device);
- int ret;
- char alias_name[VMBUS_ALIAS_LEN + 1];
+ const char *format = "MODALIAS=vmbus:%*phN";
- print_alias_name(dev, alias_name);
- ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
- return ret;
+ return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
}
static const struct hv_vmbus_device_id *
@@ -1033,7 +1020,10 @@ static struct bus_type hv_bus = {
struct onmessage_work_context {
struct work_struct work;
- struct hv_message msg;
+ struct {
+ struct hv_message_header header;
+ u8 payload[];
+ } msg;
};
static void vmbus_onmessage_work(struct work_struct *work)
@@ -1046,7 +1036,8 @@ static void vmbus_onmessage_work(struct work_struct *work)
ctx = container_of(work, struct onmessage_work_context,
work);
- vmbus_onmessage(&ctx->msg);
+ vmbus_onmessage((struct vmbus_channel_message_header *)
+ &ctx->msg.payload);
kfree(ctx);
}
@@ -1061,6 +1052,13 @@ void vmbus_on_msg_dpc(unsigned long data)
struct onmessage_work_context *ctx;
u32 message_type = msg->header.message_type;
+ /*
+ * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
+ * it is being used in 'struct vmbus_channel_message_header' definition
+ * which is supposed to match hypervisor ABI.
+ */
+ BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
+
if (message_type == HVMSG_NONE)
/* no msg */
return;
@@ -1074,41 +1072,88 @@ void vmbus_on_msg_dpc(unsigned long data)
goto msg_handled;
}
+ if (msg->header.payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
+ WARN_ONCE(1, "payload size is too large (%d)\n",
+ msg->header.payload_size);
+ goto msg_handled;
+ }
+
entry = &channel_message_table[hdr->msgtype];
if (!entry->message_handler)
goto msg_handled;
+ if (msg->header.payload_size < entry->min_payload_len) {
+ WARN_ONCE(1, "message too short: msgtype=%d len=%d\n",
+ hdr->msgtype, msg->header.payload_size);
+ goto msg_handled;
+ }
+
if (entry->handler_type == VMHT_BLOCKING) {
- ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+ ctx = kmalloc(sizeof(*ctx) + msg->header.payload_size,
+ GFP_ATOMIC);
if (ctx == NULL)
return;
INIT_WORK(&ctx->work, vmbus_onmessage_work);
- memcpy(&ctx->msg, msg, sizeof(*msg));
+ memcpy(&ctx->msg, msg, sizeof(msg->header) +
+ msg->header.payload_size);
/*
* The host can generate a rescind message while we
* may still be handling the original offer. We deal with
- * this condition by ensuring the processing is done on the
- * same CPU.
+ * this condition by relying on the synchronization provided
+ * by offer_in_progress and by channel_mutex. See also the
+ * inline comments in vmbus_onoffer_rescind().
*/
switch (hdr->msgtype) {
case CHANNELMSG_RESCIND_CHANNELOFFER:
/*
* If we are handling the rescind message;
* schedule the work on the global work queue.
+ *
+ * The OFFER message and the RESCIND message should
+ * not be handled by the same serialized work queue,
+ * because the OFFER handler may call vmbus_open(),
+ * which tries to open the channel by sending an
+ * OPEN_CHANNEL message to the host and waits for
+ * the host's response; however, if the host has
+ * rescinded the channel before it receives the
+ * OPEN_CHANNEL message, the host just silently
+ * ignores the OPEN_CHANNEL message; as a result,
+ * the guest's OFFER handler hangs for ever, if we
+ * handle the RESCIND message in the same serialized
+ * work queue: the RESCIND handler can not start to
+ * run before the OFFER handler finishes.
*/
- schedule_work_on(vmbus_connection.connect_cpu,
- &ctx->work);
+ schedule_work(&ctx->work);
break;
case CHANNELMSG_OFFERCHANNEL:
+ /*
+ * The host sends the offer message of a given channel
+ * before sending the rescind message of the same
+ * channel. These messages are sent to the guest's
+ * connect CPU; the guest then starts processing them
+ * in the tasklet handler on this CPU:
+ *
+ * VMBUS_CONNECT_CPU
+ *
+ * [vmbus_on_msg_dpc()]
+ * atomic_inc() // CHANNELMSG_OFFERCHANNEL
+ * queue_work()
+ * ...
+ * [vmbus_on_msg_dpc()]
+ * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
+ *
+ * We rely on the memory-ordering properties of the
+ * queue_work() and schedule_work() primitives, which
+ * guarantee that the atomic increment will be visible
+ * to the CPUs which will execute the offer & rescind
+ * works by the time these works will start execution.
+ */
atomic_inc(&vmbus_connection.offer_in_progress);
- queue_work_on(vmbus_connection.connect_cpu,
- vmbus_connection.work_queue,
- &ctx->work);
- break;
+ fallthrough;
default:
queue_work(vmbus_connection.work_queue, &ctx->work);
@@ -1133,10 +1178,11 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
WARN_ON(!is_hvsock_channel(channel));
/*
- * sizeof(*ctx) is small and the allocation should really not fail,
+ * Allocation size is small and the allocation should really not fail,
* otherwise the state of the hv_sock connections ends up in limbo.
*/
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
+ ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
+ GFP_KERNEL | __GFP_NOFAIL);
/*
* So far, these are not really used by Linux. Just set them to the
@@ -1146,31 +1192,17 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
ctx->msg.header.payload_size = sizeof(*rescind);
/* These values are actually used by Linux. */
- rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.u.payload;
+ rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
rescind->child_relid = channel->offermsg.child_relid;
INIT_WORK(&ctx->work, vmbus_onmessage_work);
- queue_work_on(vmbus_connection.connect_cpu,
- vmbus_connection.work_queue,
- &ctx->work);
+ queue_work(vmbus_connection.work_queue, &ctx->work);
}
#endif /* CONFIG_PM_SLEEP */
/*
- * Direct callback for channels using other deferred processing
- */
-static void vmbus_channel_isr(struct vmbus_channel *channel)
-{
- void (*callback_fn)(void *);
-
- callback_fn = READ_ONCE(channel->onchannel_callback);
- if (likely(callback_fn != NULL))
- (*callback_fn)(channel->channel_callback_context);
-}
-
-/*
* Schedule all channels with events pending
*/
static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
@@ -1200,6 +1232,7 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
return;
for_each_set_bit(relid, recv_int_page, maxbits) {
+ void (*callback_fn)(void *context);
struct vmbus_channel *channel;
if (!sync_test_and_clear_bit(relid, recv_int_page))
@@ -1209,33 +1242,54 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (relid == 0)
continue;
+ /*
+ * Pairs with the kfree_rcu() in vmbus_chan_release().
+ * Guarantees that the channel data structure doesn't
+ * get freed while the channel pointer below is being
+ * dereferenced.
+ */
rcu_read_lock();
/* Find channel based on relid */
- list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
- if (channel->offermsg.child_relid != relid)
- continue;
+ channel = relid2channel(relid);
+ if (channel == NULL)
+ goto sched_unlock_rcu;
- if (channel->rescind)
- continue;
+ if (channel->rescind)
+ goto sched_unlock_rcu;
- trace_vmbus_chan_sched(channel);
+ /*
+ * Make sure that the ring buffer data structure doesn't get
+ * freed while we dereference the ring buffer pointer. Test
+ * for the channel's onchannel_callback being NULL within a
+ * sched_lock critical section. See also the inline comments
+ * in vmbus_reset_channel_cb().
+ */
+ spin_lock(&channel->sched_lock);
- ++channel->interrupts;
+ callback_fn = channel->onchannel_callback;
+ if (unlikely(callback_fn == NULL))
+ goto sched_unlock;
- switch (channel->callback_mode) {
- case HV_CALL_ISR:
- vmbus_channel_isr(channel);
- break;
+ trace_vmbus_chan_sched(channel);
- case HV_CALL_BATCHED:
- hv_begin_read(&channel->inbound);
- /* fallthrough */
- case HV_CALL_DIRECT:
- tasklet_schedule(&channel->callback_event);
- }
+ ++channel->interrupts;
+
+ switch (channel->callback_mode) {
+ case HV_CALL_ISR:
+ (*callback_fn)(channel->channel_callback_context);
+ break;
+
+ case HV_CALL_BATCHED:
+ hv_begin_read(&channel->inbound);
+ fallthrough;
+ case HV_CALL_DIRECT:
+ tasklet_schedule(&channel->callback_event);
}
+sched_unlock:
+ spin_unlock(&channel->sched_lock);
+sched_unlock_rcu:
rcu_read_unlock();
}
}
@@ -1364,7 +1418,6 @@ static int vmbus_bus_init(void)
{
int ret;
- /* Hypervisor initialization...setup hypercall page..etc */
ret = hv_init();
if (ret != 0) {
pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
@@ -1553,8 +1606,24 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
return attribute->show(chan, buf);
}
+static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf,
+ size_t count)
+{
+ const struct vmbus_chan_attribute *attribute
+ = container_of(attr, struct vmbus_chan_attribute, attr);
+ struct vmbus_channel *chan
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ return attribute->store(chan, buf, count);
+}
+
static const struct sysfs_ops vmbus_chan_sysfs_ops = {
.show = vmbus_chan_attr_show,
+ .store = vmbus_chan_attr_store,
};
static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
@@ -1625,11 +1694,110 @@ static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
}
static VMBUS_CHAN_ATTR_RO(write_avail);
-static ssize_t show_target_cpu(struct vmbus_channel *channel, char *buf)
+static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%u\n", channel->target_cpu);
}
-static VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
+static ssize_t target_cpu_store(struct vmbus_channel *channel,
+ const char *buf, size_t count)
+{
+ u32 target_cpu, origin_cpu;
+ ssize_t ret = count;
+
+ if (vmbus_proto_version < VERSION_WIN10_V4_1)
+ return -EIO;
+
+ if (sscanf(buf, "%uu", &target_cpu) != 1)
+ return -EIO;
+
+ /* Validate target_cpu for the cpumask_test_cpu() operation below. */
+ if (target_cpu >= nr_cpumask_bits)
+ return -EINVAL;
+
+ /* No CPUs should come up or down during this. */
+ cpus_read_lock();
+
+ if (!cpumask_test_cpu(target_cpu, cpu_online_mask)) {
+ cpus_read_unlock();
+ return -EINVAL;
+ }
+
+ /*
+ * Synchronizes target_cpu_store() and channel closure:
+ *
+ * { Initially: state = CHANNEL_OPENED }
+ *
+ * CPU1 CPU2
+ *
+ * [target_cpu_store()] [vmbus_disconnect_ring()]
+ *
+ * LOCK channel_mutex LOCK channel_mutex
+ * LOAD r1 = state LOAD r2 = state
+ * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
+ * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
+ * [...] SEND CLOSECHANNEL
+ * UNLOCK channel_mutex UNLOCK channel_mutex
+ *
+ * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
+ * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
+ *
+ * Note. The host processes the channel messages "sequentially", in
+ * the order in which they are received on a per-partition basis.
+ */
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ /*
+ * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
+ * avoid sending the message and fail here for such channels.
+ */
+ if (channel->state != CHANNEL_OPENED_STATE) {
+ ret = -EIO;
+ goto cpu_store_unlock;
+ }
+
+ origin_cpu = channel->target_cpu;
+ if (target_cpu == origin_cpu)
+ goto cpu_store_unlock;
+
+ if (vmbus_send_modifychannel(channel->offermsg.child_relid,
+ hv_cpu_number_to_vp_number(target_cpu))) {
+ ret = -EIO;
+ goto cpu_store_unlock;
+ }
+
+ /*
+ * Warning. At this point, there is *no* guarantee that the host will
+ * have successfully processed the vmbus_send_modifychannel() request.
+ * See the header comment of vmbus_send_modifychannel() for more info.
+ *
+ * Lags in the processing of the above vmbus_send_modifychannel() can
+ * result in missed interrupts if the "old" target CPU is taken offline
+ * before Hyper-V starts sending interrupts to the "new" target CPU.
+ * But apart from this offlining scenario, the code tolerates such
+ * lags. It will function correctly even if a channel interrupt comes
+ * in on a CPU that is different from the channel target_cpu value.
+ */
+
+ channel->target_cpu = target_cpu;
+ channel->target_vp = hv_cpu_number_to_vp_number(target_cpu);
+ channel->numa_node = cpu_to_node(target_cpu);
+
+ /* See init_vp_index(). */
+ if (hv_is_perf_channel(channel))
+ hv_update_alloced_cpus(origin_cpu, target_cpu);
+
+ /* Currently set only for storvsc channels. */
+ if (channel->change_target_cpu_callback) {
+ (*channel->change_target_cpu_callback)(channel,
+ origin_cpu, target_cpu);
+ }
+
+cpu_store_unlock:
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ cpus_read_unlock();
+ return ret;
+}
+static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
static ssize_t channel_pending_show(struct vmbus_channel *channel,
char *buf)
@@ -1830,7 +1998,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
int ret;
dev_set_name(&child_device_obj->device, "%pUl",
- child_device_obj->channel->offermsg.offer.if_instance.b);
+ &child_device_obj->channel->offermsg.offer.if_instance);
child_device_obj->device.bus = &hv_bus;
child_device_obj->device.parent = &hv_acpi_dev->dev;
@@ -2221,9 +2389,12 @@ static int vmbus_bus_suspend(struct device *dev)
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
/*
- * Invalidate the field. Upon resume, vmbus_onoffer() will fix
- * up the field, and the other fields (if necessary).
+ * Remove the channel from the array of channels and invalidate
+ * the channel's relid. Upon resume, vmbus_onoffer() will fix
+ * up the relid (and other fields, if necessary) and add the
+ * channel back to the array.
*/
+ vmbus_channel_unmap_relid(channel);
channel->offermsg.child_relid = INVALID_RELID;
if (is_hvsock_channel(channel)) {
@@ -2470,6 +2641,7 @@ static void __exit vmbus_exit(void)
hv_debug_rm_all_dir();
vmbus_free_channels();
+ kfree(vmbus_connection.channels);
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
kmsg_dump_unregister(&hv_kmsg_dumper);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4c62f900bf7e..288ae9f63588 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -324,6 +324,16 @@ config SENSORS_FAM15H_POWER
This driver can also be built as a module. If so, the module
will be called fam15h_power.
+config SENSORS_AMD_ENERGY
+ tristate "AMD RAPL MSR based Energy driver"
+ depends on X86
+ help
+ If you say yes here you get support for core and package energy
+ sensors, based on RAPL MSR for AMD family 17h and above CPUs.
+
+ This driver can also be built as a module. If so, the module
+ will be called as amd_energy.
+
config SENSORS_APPLESMC
tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
depends on INPUT && X86
@@ -404,6 +414,31 @@ config SENSORS_ATXP1
This driver can also be built as a module. If so, the module
will be called atxp1.
+config SENSORS_BT1_PVT
+ tristate "Baikal-T1 Process, Voltage, Temperature sensor driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ help
+ If you say yes here you get support for Baikal-T1 PVT sensor
+ embedded into the SoC.
+
+ This driver can also be built as a module. If so, the module will be
+ called bt1-pvt.
+
+config SENSORS_BT1_PVT_ALARMS
+ bool "Enable Baikal-T1 PVT sensor alarms"
+ depends on SENSORS_BT1_PVT
+ help
+ Baikal-T1 PVT IP-block provides threshold registers for each
+ supported sensor. But the corresponding interrupts might be
+ generated by the thresholds comparator only in synchronization with
+ a data conversion. Additionally there is only one sensor data can
+ be converted at a time. All of these makes the interface impossible
+ to be used for the hwmon alarms implementation without periodic
+ switch between the PVT sensors. By default the data conversion is
+ performed on demand from the user-space. If this config is enabled
+ the data conversion will be periodically performed and the data will be
+ saved in the internal driver cache.
+
config SENSORS_DRIVETEMP
tristate "Hard disk drives with temperature sensors"
depends on SCSI && ATA
@@ -523,6 +558,15 @@ config SENSORS_F75375S
This driver can also be built as a module. If so, the module
will be called f75375s.
+config SENSORS_GSC
+ tristate "Gateworks System Controller ADC"
+ depends on MFD_GATEWORKS_GSC
+ help
+ Support for the Gateworks System Controller A/D converters.
+
+ To compile this driver as a module, choose M here:
+ the module will be called gsc-hwmon.
+
config SENSORS_MC13783_ADC
tristate "Freescale MC13783/MC13892 ADC"
depends on MFD_MC13XXX
@@ -1198,10 +1242,11 @@ config SENSORS_LM90
help
If you say yes here you get support for National Semiconductor LM90,
LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A,
- Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
- MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008,
- Winbond/Nuvoton W83L771W/G/AWG/ASG, Philips SA56004, GMT G781, and
- Texas Instruments TMP451 sensor chips.
+ Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6654, MAX6657, MAX6658,
+ MAX6659, MAX6680, MAX6681, MAX6692, MAX6695, MAX6696,
+ ON Semiconductor NCT1008, Winbond/Nuvoton W83L771W/G/AWG/ASG,
+ Philips SA56004, GMT G781, and Texas Instruments TMP451
+ sensor chips.
This driver can also be built as a module. If so, the module
will be called lm90.
@@ -1340,10 +1385,12 @@ config SENSORS_NCT7802
config SENSORS_NCT7904
tristate "Nuvoton NCT7904"
- depends on I2C
+ depends on I2C && WATCHDOG
+ select WATCHDOG_CORE
help
If you say yes here you get support for the Nuvoton NCT7904
- hardware monitoring chip, including manual fan speed control.
+ hardware monitoring chip, including manual fan speed control
+ and support for the integrated watchdog.
This driver can also be built as a module. If so, the module
will be called nct7904.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index b0b9c8e57176..3e32c21f5efe 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o
obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o
+obj-$(CONFIG_SENSORS_AMD_ENERGY) += amd_energy.o
obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o
obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o
@@ -53,6 +54,7 @@ obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o
obj-$(CONFIG_SENSORS_ASPEED) += aspeed-pwm-tacho.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_AXI_FAN_CONTROL) += axi-fan-control.o
+obj-$(CONFIG_SENSORS_BT1_PVT) += bt1-pvt.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o
@@ -74,6 +76,7 @@ obj-$(CONFIG_SENSORS_G760A) += g760a.o
obj-$(CONFIG_SENSORS_G762) += g762.o
obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o
+obj-$(CONFIG_SENSORS_GSC) += gsc-hwmon.o
obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index c7010b91bc13..5a839cc2ed1c 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -716,7 +716,6 @@ static struct i2c_driver adt7411_driver = {
module_i2c_driver(adt7411_driver);
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de> and "
- "Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Sascha Hauer, Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("ADT7411 driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c
new file mode 100644
index 000000000000..e95b7426106e
--- /dev/null
+++ b/drivers/hwmon/amd_energy.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ */
+#include <asm/cpu_device_id.h>
+
+#include <linux/bits.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/processor.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
+#include <linux/types.h>
+
+#define DRVNAME "amd_energy"
+
+#define ENERGY_PWR_UNIT_MSR 0xC0010299
+#define ENERGY_CORE_MSR 0xC001029A
+#define ENERGY_PKG_MSR 0xC001029B
+
+#define AMD_ENERGY_UNIT_MASK 0x01F00
+#define AMD_ENERGY_MASK 0xFFFFFFFF
+
+struct sensor_accumulator {
+ u64 energy_ctr;
+ u64 prev_value;
+ char label[10];
+};
+
+struct amd_energy_data {
+ struct hwmon_channel_info energy_info;
+ const struct hwmon_channel_info *info[2];
+ struct hwmon_chip_info chip;
+ struct task_struct *wrap_accumulate;
+ /* Lock around the accumulator */
+ struct mutex lock;
+ /* An accumulator for each core and socket */
+ struct sensor_accumulator *accums;
+ /* Energy Status Units */
+ u64 energy_units;
+ int nr_cpus;
+ int nr_socks;
+ int core_id;
+};
+
+static int amd_energy_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel,
+ const char **str)
+{
+ struct amd_energy_data *data = dev_get_drvdata(dev);
+
+ *str = data->accums[channel].label;
+ return 0;
+}
+
+static void get_energy_units(struct amd_energy_data *data)
+{
+ u64 rapl_units;
+
+ rdmsrl_safe(ENERGY_PWR_UNIT_MSR, &rapl_units);
+ data->energy_units = (rapl_units & AMD_ENERGY_UNIT_MASK) >> 8;
+}
+
+static void accumulate_socket_delta(struct amd_energy_data *data,
+ int sock, int cpu)
+{
+ struct sensor_accumulator *s_accum;
+ u64 input;
+
+ mutex_lock(&data->lock);
+ rdmsrl_safe_on_cpu(cpu, ENERGY_PKG_MSR, &input);
+ input &= AMD_ENERGY_MASK;
+
+ s_accum = &data->accums[data->nr_cpus + sock];
+ if (input >= s_accum->prev_value)
+ s_accum->energy_ctr +=
+ input - s_accum->prev_value;
+ else
+ s_accum->energy_ctr += UINT_MAX -
+ s_accum->prev_value + input;
+
+ s_accum->prev_value = input;
+ mutex_unlock(&data->lock);
+}
+
+static void accumulate_core_delta(struct amd_energy_data *data)
+{
+ struct sensor_accumulator *c_accum;
+ u64 input;
+ int cpu;
+
+ mutex_lock(&data->lock);
+ if (data->core_id >= data->nr_cpus)
+ data->core_id = 0;
+
+ cpu = data->core_id;
+
+ if (!cpu_online(cpu))
+ goto out;
+
+ rdmsrl_safe_on_cpu(cpu, ENERGY_CORE_MSR, &input);
+ input &= AMD_ENERGY_MASK;
+
+ c_accum = &data->accums[cpu];
+
+ if (input >= c_accum->prev_value)
+ c_accum->energy_ctr +=
+ input - c_accum->prev_value;
+ else
+ c_accum->energy_ctr += UINT_MAX -
+ c_accum->prev_value + input;
+
+ c_accum->prev_value = input;
+
+out:
+ data->core_id++;
+ mutex_unlock(&data->lock);
+}
+
+static void read_accumulate(struct amd_energy_data *data)
+{
+ int sock;
+
+ for (sock = 0; sock < data->nr_socks; sock++) {
+ int cpu;
+
+ cpu = cpumask_first_and(cpu_online_mask,
+ cpumask_of_node(sock));
+
+ accumulate_socket_delta(data, sock, cpu);
+ }
+
+ accumulate_core_delta(data);
+}
+
+static void amd_add_delta(struct amd_energy_data *data, int ch,
+ int cpu, long *val, bool is_core)
+{
+ struct sensor_accumulator *s_accum, *c_accum;
+ u64 input;
+
+ mutex_lock(&data->lock);
+ if (!is_core) {
+ rdmsrl_safe_on_cpu(cpu, ENERGY_PKG_MSR, &input);
+ input &= AMD_ENERGY_MASK;
+
+ s_accum = &data->accums[ch];
+ if (input >= s_accum->prev_value)
+ input += s_accum->energy_ctr -
+ s_accum->prev_value;
+ else
+ input += UINT_MAX - s_accum->prev_value +
+ s_accum->energy_ctr;
+ } else {
+ rdmsrl_safe_on_cpu(cpu, ENERGY_CORE_MSR, &input);
+ input &= AMD_ENERGY_MASK;
+
+ c_accum = &data->accums[ch];
+ if (input >= c_accum->prev_value)
+ input += c_accum->energy_ctr -
+ c_accum->prev_value;
+ else
+ input += UINT_MAX - c_accum->prev_value +
+ c_accum->energy_ctr;
+ }
+
+ /* Energy consumed = (1/(2^ESU) * RAW * 1000000UL) μJoules */
+ *val = div64_ul(input * 1000000UL, BIT(data->energy_units));
+
+ mutex_unlock(&data->lock);
+}
+
+static int amd_energy_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct amd_energy_data *data = dev_get_drvdata(dev);
+ int cpu;
+
+ if (channel >= data->nr_cpus) {
+ cpu = cpumask_first_and(cpu_online_mask,
+ cpumask_of_node
+ (channel - data->nr_cpus));
+ amd_add_delta(data, channel, cpu, val, false);
+ } else {
+ cpu = channel;
+ if (!cpu_online(cpu))
+ return -ENODEV;
+
+ amd_add_delta(data, channel, cpu, val, true);
+ }
+
+ return 0;
+}
+
+static umode_t amd_energy_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static int energy_accumulator(void *p)
+{
+ struct amd_energy_data *data = (struct amd_energy_data *)p;
+
+ while (!kthread_should_stop()) {
+ /*
+ * Ignoring the conditions such as
+ * cpu being offline or rdmsr failure
+ */
+ read_accumulate(data);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+
+ /*
+ * On a 240W system, with default resolution the
+ * Socket Energy status register may wrap around in
+ * 2^32*15.3 e-6/240 = 273.8041 secs (~4.5 mins)
+ *
+ * let us accumulate for every 100secs
+ */
+ schedule_timeout(msecs_to_jiffies(100000));
+ }
+ return 0;
+}
+
+static const struct hwmon_ops amd_energy_ops = {
+ .is_visible = amd_energy_is_visible,
+ .read = amd_energy_read,
+ .read_string = amd_energy_read_labels,
+};
+
+static int amd_create_sensor(struct device *dev,
+ struct amd_energy_data *data,
+ u8 type, u32 config)
+{
+ struct hwmon_channel_info *info = &data->energy_info;
+ struct sensor_accumulator *accums;
+ int i, num_siblings, cpus, sockets;
+ u32 *s_config;
+
+ /* Identify the number of siblings per core */
+ num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
+
+ sockets = num_possible_nodes();
+
+ /*
+ * Energy counter register is accessed at core level.
+ * Hence, filterout the siblings.
+ */
+ cpus = num_present_cpus() / num_siblings;
+
+ s_config = devm_kcalloc(dev, cpus + sockets,
+ sizeof(u32), GFP_KERNEL);
+ if (!s_config)
+ return -ENOMEM;
+
+ accums = devm_kcalloc(dev, cpus + sockets,
+ sizeof(struct sensor_accumulator),
+ GFP_KERNEL);
+ if (!accums)
+ return -ENOMEM;
+
+ info->type = type;
+ info->config = s_config;
+
+ data->nr_cpus = cpus;
+ data->nr_socks = sockets;
+ data->accums = accums;
+
+ for (i = 0; i < cpus + sockets; i++) {
+ s_config[i] = config;
+ if (i < cpus)
+ scnprintf(accums[i].label, 10,
+ "Ecore%03u", i);
+ else
+ scnprintf(accums[i].label, 10,
+ "Esocket%u", (i - cpus));
+ }
+
+ return 0;
+}
+
+static int amd_energy_probe(struct platform_device *pdev)
+{
+ struct device *hwmon_dev;
+ struct amd_energy_data *data;
+ struct device *dev = &pdev->dev;
+
+ data = devm_kzalloc(dev,
+ sizeof(struct amd_energy_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->chip.ops = &amd_energy_ops;
+ data->chip.info = data->info;
+
+ dev_set_drvdata(dev, data);
+ /* Populate per-core energy reporting */
+ data->info[0] = &data->energy_info;
+ amd_create_sensor(dev, data, hwmon_energy,
+ HWMON_E_INPUT | HWMON_E_LABEL);
+
+ mutex_init(&data->lock);
+ get_energy_units(data);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, DRVNAME,
+ data,
+ &data->chip,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ data->wrap_accumulate = kthread_run(energy_accumulator, data,
+ "%s", dev_name(hwmon_dev));
+ if (IS_ERR(data->wrap_accumulate))
+ return PTR_ERR(data->wrap_accumulate);
+
+ return PTR_ERR_OR_ZERO(data->wrap_accumulate);
+}
+
+static int amd_energy_remove(struct platform_device *pdev)
+{
+ struct amd_energy_data *data = dev_get_drvdata(&pdev->dev);
+
+ if (data && data->wrap_accumulate)
+ kthread_stop(data->wrap_accumulate);
+
+ return 0;
+}
+
+static const struct platform_device_id amd_energy_ids[] = {
+ { .name = DRVNAME, },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, amd_energy_ids);
+
+static struct platform_driver amd_energy_driver = {
+ .probe = amd_energy_probe,
+ .remove = amd_energy_remove,
+ .id_table = amd_energy_ids,
+ .driver = {
+ .name = DRVNAME,
+ },
+};
+
+static struct platform_device *amd_energy_platdev;
+
+static const struct x86_cpu_id cpu_ids[] __initconst = {
+ X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, cpu_ids);
+
+static int __init amd_energy_init(void)
+{
+ int ret;
+
+ if (!x86_match_cpu(cpu_ids))
+ return -ENODEV;
+
+ ret = platform_driver_register(&amd_energy_driver);
+ if (ret)
+ return ret;
+
+ amd_energy_platdev = platform_device_alloc(DRVNAME, 0);
+ if (!amd_energy_platdev) {
+ platform_driver_unregister(&amd_energy_driver);
+ return -ENOMEM;
+ }
+
+ ret = platform_device_add(amd_energy_platdev);
+ if (ret) {
+ platform_device_put(amd_energy_platdev);
+ platform_driver_unregister(&amd_energy_driver);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void __exit amd_energy_exit(void)
+{
+ platform_device_unregister(amd_energy_platdev);
+ platform_driver_unregister(&amd_energy_driver);
+}
+
+module_init(amd_energy_init);
+module_exit(amd_energy_exit);
+
+MODULE_DESCRIPTION("Driver for AMD Energy reporting from RAPL MSR via HWMON interface");
+MODULE_AUTHOR("Naveen Krishna Chatradhi <nchatrad@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index ec93b8d673f5..316618409315 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -156,14 +156,19 @@ static struct workqueue_struct *applesmc_led_wq;
*/
static int wait_read(void)
{
+ unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC;
u8 status;
int us;
+
for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
- udelay(us);
+ usleep_range(us, us * 16);
status = inb(APPLESMC_CMD_PORT);
/* read: wait for smc to settle */
if (status & 0x01)
return 0;
+ /* timeout: give up */
+ if (time_after(jiffies, end))
+ break;
}
pr_warn("wait_read() fail: 0x%02x\n", status);
@@ -178,10 +183,11 @@ static int send_byte(u8 cmd, u16 port)
{
u8 status;
int us;
+ unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC;
outb(cmd, port);
for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
- udelay(us);
+ usleep_range(us, us * 16);
status = inb(APPLESMC_CMD_PORT);
/* write: wait for smc to settle */
if (status & 0x02)
@@ -190,7 +196,7 @@ static int send_byte(u8 cmd, u16 port)
if (status & 0x04)
return 0;
/* timeout: give up */
- if (us << 1 == APPLESMC_MAX_WAIT)
+ if (time_after(jiffies, end))
break;
/* busy: long wait and resend */
udelay(APPLESMC_RETRY_WAIT);
diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c
new file mode 100644
index 000000000000..1a9772fb1f73
--- /dev/null
+++ b/drivers/hwmon/bt1-pvt.c
@@ -0,0 +1,1146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 Process, Voltage, Temperature sensor driver
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/seqlock.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include "bt1-pvt.h"
+
+/*
+ * For the sake of the code simplification we created the sensors info table
+ * with the sensor names, activation modes, threshold registers base address
+ * and the thresholds bit fields.
+ */
+static const struct pvt_sensor_info pvt_info[] = {
+ PVT_SENSOR_INFO(0, "CPU Core Temperature", hwmon_temp, TEMP, TTHRES),
+ PVT_SENSOR_INFO(0, "CPU Core Voltage", hwmon_in, VOLT, VTHRES),
+ PVT_SENSOR_INFO(1, "CPU Core Low-Vt", hwmon_in, LVT, LTHRES),
+ PVT_SENSOR_INFO(2, "CPU Core High-Vt", hwmon_in, HVT, HTHRES),
+ PVT_SENSOR_INFO(3, "CPU Core Standard-Vt", hwmon_in, SVT, STHRES),
+};
+
+/*
+ * The original translation formulae of the temperature (in degrees of Celsius)
+ * to PVT data and vice-versa are following:
+ * N = 1.8322e-8*(T^4) + 2.343e-5*(T^3) + 8.7018e-3*(T^2) + 3.9269*(T^1) +
+ * 1.7204e2,
+ * T = -1.6743e-11*(N^4) + 8.1542e-8*(N^3) + -1.8201e-4*(N^2) +
+ * 3.1020e-1*(N^1) - 4.838e1,
+ * where T = [-48.380, 147.438]C and N = [0, 1023].
+ * They must be accordingly altered to be suitable for the integer arithmetics.
+ * The technique is called 'factor redistribution', which just makes sure the
+ * multiplications and divisions are made so to have a result of the operations
+ * within the integer numbers limit. In addition we need to translate the
+ * formulae to accept millidegrees of Celsius. Here what they look like after
+ * the alterations:
+ * N = (18322e-20*(T^4) + 2343e-13*(T^3) + 87018e-9*(T^2) + 39269e-3*T +
+ * 17204e2) / 1e4,
+ * T = -16743e-12*(D^4) + 81542e-9*(D^3) - 182010e-6*(D^2) + 310200e-3*D -
+ * 48380,
+ * where T = [-48380, 147438] mC and N = [0, 1023].
+ */
+static const struct pvt_poly poly_temp_to_N = {
+ .total_divider = 10000,
+ .terms = {
+ {4, 18322, 10000, 10000},
+ {3, 2343, 10000, 10},
+ {2, 87018, 10000, 10},
+ {1, 39269, 1000, 1},
+ {0, 1720400, 1, 1}
+ }
+};
+
+static const struct pvt_poly poly_N_to_temp = {
+ .total_divider = 1,
+ .terms = {
+ {4, -16743, 1000, 1},
+ {3, 81542, 1000, 1},
+ {2, -182010, 1000, 1},
+ {1, 310200, 1000, 1},
+ {0, -48380, 1, 1}
+ }
+};
+
+/*
+ * Similar alterations are performed for the voltage conversion equations.
+ * The original formulae are:
+ * N = 1.8658e3*V - 1.1572e3,
+ * V = (N + 1.1572e3) / 1.8658e3,
+ * where V = [0.620, 1.168] V and N = [0, 1023].
+ * After the optimization they looks as follows:
+ * N = (18658e-3*V - 11572) / 10,
+ * V = N * 10^5 / 18658 + 11572 * 10^4 / 18658.
+ */
+static const struct pvt_poly poly_volt_to_N = {
+ .total_divider = 10,
+ .terms = {
+ {1, 18658, 1000, 1},
+ {0, -11572, 1, 1}
+ }
+};
+
+static const struct pvt_poly poly_N_to_volt = {
+ .total_divider = 10,
+ .terms = {
+ {1, 100000, 18658, 1},
+ {0, 115720000, 1, 18658}
+ }
+};
+
+/*
+ * Here is the polynomial calculation function, which performs the
+ * redistributed terms calculations. It's pretty straightforward. We walk
+ * over each degree term up to the free one, and perform the redistributed
+ * multiplication of the term coefficient, its divider (as for the rationale
+ * fraction representation), data power and the rational fraction divider
+ * leftover. Then all of this is collected in a total sum variable, which
+ * value is normalized by the total divider before being returned.
+ */
+static long pvt_calc_poly(const struct pvt_poly *poly, long data)
+{
+ const struct pvt_poly_term *term = poly->terms;
+ long tmp, ret = 0;
+ int deg;
+
+ do {
+ tmp = term->coef;
+ for (deg = 0; deg < term->deg; ++deg)
+ tmp = mult_frac(tmp, data, term->divider);
+ ret += tmp / term->divider_leftover;
+ } while ((term++)->deg);
+
+ return ret / poly->total_divider;
+}
+
+static inline u32 pvt_update(void __iomem *reg, u32 mask, u32 data)
+{
+ u32 old;
+
+ old = readl_relaxed(reg);
+ writel((old & ~mask) | (data & mask), reg);
+
+ return old & mask;
+}
+
+/*
+ * Baikal-T1 PVT mode can be updated only when the controller is disabled.
+ * So first we disable it, then set the new mode together with the controller
+ * getting back enabled. The same concerns the temperature trim and
+ * measurements timeout. If it is necessary the interface mutex is supposed
+ * to be locked at the time the operations are performed.
+ */
+static inline void pvt_set_mode(struct pvt_hwmon *pvt, u32 mode)
+{
+ u32 old;
+
+ mode = FIELD_PREP(PVT_CTRL_MODE_MASK, mode);
+
+ old = pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_MODE_MASK | PVT_CTRL_EN,
+ mode | old);
+}
+
+static inline u32 pvt_calc_trim(long temp)
+{
+ temp = clamp_val(temp, 0, PVT_TRIM_TEMP);
+
+ return DIV_ROUND_UP(temp, PVT_TRIM_STEP);
+}
+
+static inline void pvt_set_trim(struct pvt_hwmon *pvt, u32 trim)
+{
+ u32 old;
+
+ trim = FIELD_PREP(PVT_CTRL_TRIM_MASK, trim);
+
+ old = pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_TRIM_MASK | PVT_CTRL_EN,
+ trim | old);
+}
+
+static inline void pvt_set_tout(struct pvt_hwmon *pvt, u32 tout)
+{
+ u32 old;
+
+ old = pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ writel(tout, pvt->regs + PVT_TTIMEOUT);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, old);
+}
+
+/*
+ * This driver can optionally provide the hwmon alarms for each sensor the PVT
+ * controller supports. The alarms functionality is made compile-time
+ * configurable due to the hardware interface implementation peculiarity
+ * described further in this comment. So in case if alarms are unnecessary in
+ * your system design it's recommended to have them disabled to prevent the PVT
+ * IRQs being periodically raised to get the data cache/alarms status up to
+ * date.
+ *
+ * Baikal-T1 PVT embedded controller is based on the Analog Bits PVT sensor,
+ * but is equipped with a dedicated control wrapper. It exposes the PVT
+ * sub-block registers space via the APB3 bus. In addition the wrapper provides
+ * a common interrupt vector of the sensors conversion completion events and
+ * threshold value alarms. Alas the wrapper interface hasn't been fully thought
+ * through. There is only one sensor can be activated at a time, for which the
+ * thresholds comparator is enabled right after the data conversion is
+ * completed. Due to this if alarms need to be implemented for all available
+ * sensors we can't just set the thresholds and enable the interrupts. We need
+ * to enable the sensors one after another and let the controller to detect
+ * the alarms by itself at each conversion. This also makes pointless to handle
+ * the alarms interrupts, since in occasion they happen synchronously with
+ * data conversion completion. The best driver design would be to have the
+ * completion interrupts enabled only and keep the converted value in the
+ * driver data cache. This solution is implemented if hwmon alarms are enabled
+ * in this driver. In case if the alarms are disabled, the conversion is
+ * performed on demand at the time a sensors input file is read.
+ */
+
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+
+#define pvt_hard_isr NULL
+
+static irqreturn_t pvt_soft_isr(int irq, void *data)
+{
+ const struct pvt_sensor_info *info;
+ struct pvt_hwmon *pvt = data;
+ struct pvt_cache *cache;
+ u32 val, thres_sts, old;
+
+ /*
+ * DVALID bit will be cleared by reading the data. We need to save the
+ * status before the next conversion happens. Threshold events will be
+ * handled a bit later.
+ */
+ thres_sts = readl(pvt->regs + PVT_RAW_INTR_STAT);
+
+ /*
+ * Then lets recharge the PVT interface with the next sampling mode.
+ * Lock the interface mutex to serialize trim, timeouts and alarm
+ * thresholds settings.
+ */
+ cache = &pvt->cache[pvt->sensor];
+ info = &pvt_info[pvt->sensor];
+ pvt->sensor = (pvt->sensor == PVT_SENSOR_LAST) ?
+ PVT_SENSOR_FIRST : (pvt->sensor + 1);
+
+ /*
+ * For some reason we have to mask the interrupt before changing the
+ * mode, otherwise sometimes the temperature mode doesn't get
+ * activated even though the actual mode in the ctrl register
+ * corresponds to one. Then we read the data. By doing so we also
+ * recharge the data conversion. After this the mode corresponding
+ * to the next sensor in the row is set. Finally we enable the
+ * interrupts back.
+ */
+ mutex_lock(&pvt->iface_mtx);
+
+ old = pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
+ PVT_INTR_DVALID);
+
+ val = readl(pvt->regs + PVT_DATA);
+
+ pvt_set_mode(pvt, pvt_info[pvt->sensor].mode);
+
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, old);
+
+ mutex_unlock(&pvt->iface_mtx);
+
+ /*
+ * We can now update the data cache with data just retrieved from the
+ * sensor. Lock write-seqlock to make sure the reader has a coherent
+ * data.
+ */
+ write_seqlock(&cache->data_seqlock);
+
+ cache->data = FIELD_GET(PVT_DATA_DATA_MASK, val);
+
+ write_sequnlock(&cache->data_seqlock);
+
+ /*
+ * While PVT core is doing the next mode data conversion, we'll check
+ * whether the alarms were triggered for the current sensor. Note that
+ * according to the documentation only one threshold IRQ status can be
+ * set at a time, that's why if-else statement is utilized.
+ */
+ if ((thres_sts & info->thres_sts_lo) ^ cache->thres_sts_lo) {
+ WRITE_ONCE(cache->thres_sts_lo, thres_sts & info->thres_sts_lo);
+ hwmon_notify_event(pvt->hwmon, info->type, info->attr_min_alarm,
+ info->channel);
+ } else if ((thres_sts & info->thres_sts_hi) ^ cache->thres_sts_hi) {
+ WRITE_ONCE(cache->thres_sts_hi, thres_sts & info->thres_sts_hi);
+ hwmon_notify_event(pvt->hwmon, info->type, info->attr_max_alarm,
+ info->channel);
+ }
+
+ return IRQ_HANDLED;
+}
+
+inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+{
+ return 0644;
+}
+
+inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+{
+ return 0444;
+}
+
+static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ long *val)
+{
+ struct pvt_cache *cache = &pvt->cache[type];
+ unsigned int seq;
+ u32 data;
+
+ do {
+ seq = read_seqbegin(&cache->data_seqlock);
+ data = cache->data;
+ } while (read_seqretry(&cache->data_seqlock, seq));
+
+ if (type == PVT_TEMP)
+ *val = pvt_calc_poly(&poly_N_to_temp, data);
+ else
+ *val = pvt_calc_poly(&poly_N_to_volt, data);
+
+ return 0;
+}
+
+static int pvt_read_limit(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ bool is_low, long *val)
+{
+ u32 data;
+
+ /* No need in serialization, since it is just read from MMIO. */
+ data = readl(pvt->regs + pvt_info[type].thres_base);
+
+ if (is_low)
+ data = FIELD_GET(PVT_THRES_LO_MASK, data);
+ else
+ data = FIELD_GET(PVT_THRES_HI_MASK, data);
+
+ if (type == PVT_TEMP)
+ *val = pvt_calc_poly(&poly_N_to_temp, data);
+ else
+ *val = pvt_calc_poly(&poly_N_to_volt, data);
+
+ return 0;
+}
+
+static int pvt_write_limit(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ bool is_low, long val)
+{
+ u32 data, limit, mask;
+ int ret;
+
+ if (type == PVT_TEMP) {
+ val = clamp(val, PVT_TEMP_MIN, PVT_TEMP_MAX);
+ data = pvt_calc_poly(&poly_temp_to_N, val);
+ } else {
+ val = clamp(val, PVT_VOLT_MIN, PVT_VOLT_MAX);
+ data = pvt_calc_poly(&poly_volt_to_N, val);
+ }
+
+ /* Serialize limit update, since a part of the register is changed. */
+ ret = mutex_lock_interruptible(&pvt->iface_mtx);
+ if (ret)
+ return ret;
+
+ /* Make sure the upper and lower ranges don't intersect. */
+ limit = readl(pvt->regs + pvt_info[type].thres_base);
+ if (is_low) {
+ limit = FIELD_GET(PVT_THRES_HI_MASK, limit);
+ data = clamp_val(data, PVT_DATA_MIN, limit);
+ data = FIELD_PREP(PVT_THRES_LO_MASK, data);
+ mask = PVT_THRES_LO_MASK;
+ } else {
+ limit = FIELD_GET(PVT_THRES_LO_MASK, limit);
+ data = clamp_val(data, limit, PVT_DATA_MAX);
+ data = FIELD_PREP(PVT_THRES_HI_MASK, data);
+ mask = PVT_THRES_HI_MASK;
+ }
+
+ pvt_update(pvt->regs + pvt_info[type].thres_base, mask, data);
+
+ mutex_unlock(&pvt->iface_mtx);
+
+ return 0;
+}
+
+static int pvt_read_alarm(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ bool is_low, long *val)
+{
+ if (is_low)
+ *val = !!READ_ONCE(pvt->cache[type].thres_sts_lo);
+ else
+ *val = !!READ_ONCE(pvt->cache[type].thres_sts_hi);
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *pvt_channel_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_TYPE | HWMON_T_LABEL |
+ HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM |
+ HWMON_T_OFFSET),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL |
+ HWMON_I_MIN | HWMON_I_MIN_ALARM |
+ HWMON_I_MAX | HWMON_I_MAX_ALARM,
+ HWMON_I_INPUT | HWMON_I_LABEL |
+ HWMON_I_MIN | HWMON_I_MIN_ALARM |
+ HWMON_I_MAX | HWMON_I_MAX_ALARM,
+ HWMON_I_INPUT | HWMON_I_LABEL |
+ HWMON_I_MIN | HWMON_I_MIN_ALARM |
+ HWMON_I_MAX | HWMON_I_MAX_ALARM,
+ HWMON_I_INPUT | HWMON_I_LABEL |
+ HWMON_I_MIN | HWMON_I_MIN_ALARM |
+ HWMON_I_MAX | HWMON_I_MAX_ALARM),
+ NULL
+};
+
+#else /* !CONFIG_SENSORS_BT1_PVT_ALARMS */
+
+static irqreturn_t pvt_hard_isr(int irq, void *data)
+{
+ struct pvt_hwmon *pvt = data;
+ struct pvt_cache *cache;
+ u32 val;
+
+ /*
+ * Mask the DVALID interrupt so after exiting from the handler a
+ * repeated conversion wouldn't happen.
+ */
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
+ PVT_INTR_DVALID);
+
+ /*
+ * Nothing special for alarm-less driver. Just read the data, update
+ * the cache and notify a waiter of this event.
+ */
+ val = readl(pvt->regs + PVT_DATA);
+ if (!(val & PVT_DATA_VALID)) {
+ dev_err(pvt->dev, "Got IRQ when data isn't valid\n");
+ return IRQ_HANDLED;
+ }
+
+ cache = &pvt->cache[pvt->sensor];
+
+ WRITE_ONCE(cache->data, FIELD_GET(PVT_DATA_DATA_MASK, val));
+
+ complete(&cache->conversion);
+
+ return IRQ_HANDLED;
+}
+
+#define pvt_soft_isr NULL
+
+inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+{
+ return 0;
+}
+
+inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+{
+ return 0;
+}
+
+static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ long *val)
+{
+ struct pvt_cache *cache = &pvt->cache[type];
+ u32 data;
+ int ret;
+
+ /*
+ * Lock PVT conversion interface until data cache is updated. The
+ * data read procedure is following: set the requested PVT sensor
+ * mode, enable IRQ and conversion, wait until conversion is finished,
+ * then disable conversion and IRQ, and read the cached data.
+ */
+ ret = mutex_lock_interruptible(&pvt->iface_mtx);
+ if (ret)
+ return ret;
+
+ pvt->sensor = type;
+ pvt_set_mode(pvt, pvt_info[type].mode);
+
+ /*
+ * Unmask the DVALID interrupt and enable the sensors conversions.
+ * Do the reverse procedure when conversion is done.
+ */
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, 0);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
+
+ wait_for_completion(&cache->conversion);
+
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
+ PVT_INTR_DVALID);
+
+ data = READ_ONCE(cache->data);
+
+ mutex_unlock(&pvt->iface_mtx);
+
+ if (type == PVT_TEMP)
+ *val = pvt_calc_poly(&poly_N_to_temp, data);
+ else
+ *val = pvt_calc_poly(&poly_N_to_volt, data);
+
+ return 0;
+}
+
+static int pvt_read_limit(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ bool is_low, long *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static int pvt_write_limit(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ bool is_low, long val)
+{
+ return -EOPNOTSUPP;
+}
+
+static int pvt_read_alarm(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ bool is_low, long *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_channel_info *pvt_channel_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_TYPE | HWMON_T_LABEL |
+ HWMON_T_OFFSET),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ NULL
+};
+
+#endif /* !CONFIG_SENSORS_BT1_PVT_ALARMS */
+
+static inline bool pvt_hwmon_channel_is_valid(enum hwmon_sensor_types type,
+ int ch)
+{
+ switch (type) {
+ case hwmon_temp:
+ if (ch < 0 || ch >= PVT_TEMP_CHS)
+ return false;
+ break;
+ case hwmon_in:
+ if (ch < 0 || ch >= PVT_VOLT_CHS)
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ /* The rest of the types are independent from the channel number. */
+ return true;
+}
+
+static umode_t pvt_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int ch)
+{
+ if (!pvt_hwmon_channel_is_valid(type, ch))
+ return 0;
+
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_type:
+ case hwmon_temp_label:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ return pvt_limit_is_visible(ch);
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ return pvt_alarm_is_visible(ch);
+ case hwmon_temp_offset:
+ return 0644;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_label:
+ return 0444;
+ case hwmon_in_min:
+ case hwmon_in_max:
+ return pvt_limit_is_visible(PVT_VOLT + ch);
+ case hwmon_in_min_alarm:
+ case hwmon_in_max_alarm:
+ return pvt_alarm_is_visible(PVT_VOLT + ch);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int pvt_read_trim(struct pvt_hwmon *pvt, long *val)
+{
+ u32 data;
+
+ data = readl(pvt->regs + PVT_CTRL);
+ *val = FIELD_GET(PVT_CTRL_TRIM_MASK, data) * PVT_TRIM_STEP;
+
+ return 0;
+}
+
+static int pvt_write_trim(struct pvt_hwmon *pvt, long val)
+{
+ u32 trim;
+ int ret;
+
+ /*
+ * Serialize trim update, since a part of the register is changed and
+ * the controller is supposed to be disabled during this operation.
+ */
+ ret = mutex_lock_interruptible(&pvt->iface_mtx);
+ if (ret)
+ return ret;
+
+ trim = pvt_calc_trim(val);
+ pvt_set_trim(pvt, trim);
+
+ mutex_unlock(&pvt->iface_mtx);
+
+ return 0;
+}
+
+static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val)
+{
+ unsigned long rate;
+ ktime_t kt;
+ u32 data;
+
+ rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
+ if (!rate)
+ return -ENODEV;
+
+ /*
+ * Don't bother with mutex here, since we just read data from MMIO.
+ * We also have to scale the ticks timeout up to compensate the
+ * ms-ns-data translations.
+ */
+ data = readl(pvt->regs + PVT_TTIMEOUT) + 1;
+
+ /*
+ * Calculate ref-clock based delay (Ttotal) between two consecutive
+ * data samples of the same sensor. So we first must calculate the
+ * delay introduced by the internal ref-clock timer (Tref * Fclk).
+ * Then add the constant timeout cuased by each conversion latency
+ * (Tmin). The basic formulae for each conversion is following:
+ * Ttotal = Tref * Fclk + Tmin
+ * Note if alarms are enabled the sensors are polled one after
+ * another, so in order to have the delay being applicable for each
+ * sensor the requested value must be equally redistirbuted.
+ */
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ kt = ktime_set(PVT_SENSORS_NUM * (u64)data, 0);
+ kt = ktime_divns(kt, rate);
+ kt = ktime_add_ns(kt, PVT_SENSORS_NUM * PVT_TOUT_MIN);
+#else
+ kt = ktime_set(data, 0);
+ kt = ktime_divns(kt, rate);
+ kt = ktime_add_ns(kt, PVT_TOUT_MIN);
+#endif
+
+ /* Return the result in msec as hwmon sysfs interface requires. */
+ *val = ktime_to_ms(kt);
+
+ return 0;
+}
+
+static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
+{
+ unsigned long rate;
+ ktime_t kt;
+ u32 data;
+ int ret;
+
+ rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
+ if (!rate)
+ return -ENODEV;
+
+ /*
+ * If alarms are enabled, the requested timeout must be divided
+ * between all available sensors to have the requested delay
+ * applicable to each individual sensor.
+ */
+ kt = ms_to_ktime(val);
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ kt = ktime_divns(kt, PVT_SENSORS_NUM);
+#endif
+
+ /*
+ * Subtract a constant lag, which always persists due to the limited
+ * PVT sampling rate. Make sure the timeout is not negative.
+ */
+ kt = ktime_sub_ns(kt, PVT_TOUT_MIN);
+ if (ktime_to_ns(kt) < 0)
+ kt = ktime_set(0, 0);
+
+ /*
+ * Finally recalculate the timeout in terms of the reference clock
+ * period.
+ */
+ data = ktime_divns(kt * rate, NSEC_PER_SEC);
+
+ /*
+ * Update the measurements delay, but lock the interface first, since
+ * we have to disable PVT in order to have the new delay actually
+ * updated.
+ */
+ ret = mutex_lock_interruptible(&pvt->iface_mtx);
+ if (ret)
+ return ret;
+
+ pvt_set_tout(pvt, data);
+
+ mutex_unlock(&pvt->iface_mtx);
+
+ return 0;
+}
+
+static int pvt_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int ch, long *val)
+{
+ struct pvt_hwmon *pvt = dev_get_drvdata(dev);
+
+ if (!pvt_hwmon_channel_is_valid(type, ch))
+ return -EINVAL;
+
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return pvt_read_timeout(pvt, val);
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return pvt_read_data(pvt, ch, val);
+ case hwmon_temp_type:
+ *val = 1;
+ return 0;
+ case hwmon_temp_min:
+ return pvt_read_limit(pvt, ch, true, val);
+ case hwmon_temp_max:
+ return pvt_read_limit(pvt, ch, false, val);
+ case hwmon_temp_min_alarm:
+ return pvt_read_alarm(pvt, ch, true, val);
+ case hwmon_temp_max_alarm:
+ return pvt_read_alarm(pvt, ch, false, val);
+ case hwmon_temp_offset:
+ return pvt_read_trim(pvt, val);
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return pvt_read_data(pvt, PVT_VOLT + ch, val);
+ case hwmon_in_min:
+ return pvt_read_limit(pvt, PVT_VOLT + ch, true, val);
+ case hwmon_in_max:
+ return pvt_read_limit(pvt, PVT_VOLT + ch, false, val);
+ case hwmon_in_min_alarm:
+ return pvt_read_alarm(pvt, PVT_VOLT + ch, true, val);
+ case hwmon_in_max_alarm:
+ return pvt_read_alarm(pvt, PVT_VOLT + ch, false, val);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int pvt_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int ch, const char **str)
+{
+ if (!pvt_hwmon_channel_is_valid(type, ch))
+ return -EINVAL;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_label:
+ *str = pvt_info[ch].label;
+ return 0;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *str = pvt_info[PVT_VOLT + ch].label;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int pvt_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int ch, long val)
+{
+ struct pvt_hwmon *pvt = dev_get_drvdata(dev);
+
+ if (!pvt_hwmon_channel_is_valid(type, ch))
+ return -EINVAL;
+
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return pvt_write_timeout(pvt, val);
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_min:
+ return pvt_write_limit(pvt, ch, true, val);
+ case hwmon_temp_max:
+ return pvt_write_limit(pvt, ch, false, val);
+ case hwmon_temp_offset:
+ return pvt_write_trim(pvt, val);
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_min:
+ return pvt_write_limit(pvt, PVT_VOLT + ch, true, val);
+ case hwmon_in_max:
+ return pvt_write_limit(pvt, PVT_VOLT + ch, false, val);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops pvt_hwmon_ops = {
+ .is_visible = pvt_hwmon_is_visible,
+ .read = pvt_hwmon_read,
+ .read_string = pvt_hwmon_read_string,
+ .write = pvt_hwmon_write
+};
+
+static const struct hwmon_chip_info pvt_hwmon_info = {
+ .ops = &pvt_hwmon_ops,
+ .info = pvt_channel_info
+};
+
+static void pvt_clear_data(void *data)
+{
+ struct pvt_hwmon *pvt = data;
+#if !defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ int idx;
+
+ for (idx = 0; idx < PVT_SENSORS_NUM; ++idx)
+ complete_all(&pvt->cache[idx].conversion);
+#endif
+
+ mutex_destroy(&pvt->iface_mtx);
+}
+
+static struct pvt_hwmon *pvt_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pvt_hwmon *pvt;
+ int ret, idx;
+
+ pvt = devm_kzalloc(dev, sizeof(*pvt), GFP_KERNEL);
+ if (!pvt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, pvt_clear_data, pvt);
+ if (ret) {
+ dev_err(dev, "Can't add PVT data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ pvt->dev = dev;
+ pvt->sensor = PVT_SENSOR_FIRST;
+ mutex_init(&pvt->iface_mtx);
+
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ for (idx = 0; idx < PVT_SENSORS_NUM; ++idx)
+ seqlock_init(&pvt->cache[idx].data_seqlock);
+#else
+ for (idx = 0; idx < PVT_SENSORS_NUM; ++idx)
+ init_completion(&pvt->cache[idx].conversion);
+#endif
+
+ return pvt;
+}
+
+static int pvt_request_regs(struct pvt_hwmon *pvt)
+{
+ struct platform_device *pdev = to_platform_device(pvt->dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(pvt->dev, "Couldn't find PVT memresource\n");
+ return -EINVAL;
+ }
+
+ pvt->regs = devm_ioremap_resource(pvt->dev, res);
+ if (IS_ERR(pvt->regs)) {
+ dev_err(pvt->dev, "Couldn't map PVT registers\n");
+ return PTR_ERR(pvt->regs);
+ }
+
+ return 0;
+}
+
+static void pvt_disable_clks(void *data)
+{
+ struct pvt_hwmon *pvt = data;
+
+ clk_bulk_disable_unprepare(PVT_CLOCK_NUM, pvt->clks);
+}
+
+static int pvt_request_clks(struct pvt_hwmon *pvt)
+{
+ int ret;
+
+ pvt->clks[PVT_CLOCK_APB].id = "pclk";
+ pvt->clks[PVT_CLOCK_REF].id = "ref";
+
+ ret = devm_clk_bulk_get(pvt->dev, PVT_CLOCK_NUM, pvt->clks);
+ if (ret) {
+ dev_err(pvt->dev, "Couldn't get PVT clocks descriptors\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(PVT_CLOCK_NUM, pvt->clks);
+ if (ret) {
+ dev_err(pvt->dev, "Couldn't enable the PVT clocks\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(pvt->dev, pvt_disable_clks, pvt);
+ if (ret) {
+ dev_err(pvt->dev, "Can't add PVT clocks disable action\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pvt_init_iface(struct pvt_hwmon *pvt)
+{
+ u32 trim, temp;
+
+ /*
+ * Make sure all interrupts and controller are disabled so not to
+ * accidentally have ISR executed before the driver data is fully
+ * initialized. Clear the IRQ status as well.
+ */
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_ALL, PVT_INTR_ALL);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ readl(pvt->regs + PVT_CLR_INTR);
+ readl(pvt->regs + PVT_DATA);
+
+ /* Setup default sensor mode, timeout and temperature trim. */
+ pvt_set_mode(pvt, pvt_info[pvt->sensor].mode);
+ pvt_set_tout(pvt, PVT_TOUT_DEF);
+
+ trim = PVT_TRIM_DEF;
+ if (!of_property_read_u32(pvt->dev->of_node,
+ "baikal,pvt-temp-offset-millicelsius", &temp))
+ trim = pvt_calc_trim(temp);
+
+ pvt_set_trim(pvt, trim);
+}
+
+static int pvt_request_irq(struct pvt_hwmon *pvt)
+{
+ struct platform_device *pdev = to_platform_device(pvt->dev);
+ int ret;
+
+ pvt->irq = platform_get_irq(pdev, 0);
+ if (pvt->irq < 0)
+ return pvt->irq;
+
+ ret = devm_request_threaded_irq(pvt->dev, pvt->irq,
+ pvt_hard_isr, pvt_soft_isr,
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ IRQF_SHARED | IRQF_TRIGGER_HIGH |
+ IRQF_ONESHOT,
+#else
+ IRQF_SHARED | IRQF_TRIGGER_HIGH,
+#endif
+ "pvt", pvt);
+ if (ret) {
+ dev_err(pvt->dev, "Couldn't request PVT IRQ\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pvt_create_hwmon(struct pvt_hwmon *pvt)
+{
+ pvt->hwmon = devm_hwmon_device_register_with_info(pvt->dev, "pvt", pvt,
+ &pvt_hwmon_info, NULL);
+ if (IS_ERR(pvt->hwmon)) {
+ dev_err(pvt->dev, "Couldn't create hwmon device\n");
+ return PTR_ERR(pvt->hwmon);
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+
+static void pvt_disable_iface(void *data)
+{
+ struct pvt_hwmon *pvt = data;
+
+ mutex_lock(&pvt->iface_mtx);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
+ PVT_INTR_DVALID);
+ mutex_unlock(&pvt->iface_mtx);
+}
+
+static int pvt_enable_iface(struct pvt_hwmon *pvt)
+{
+ int ret;
+
+ ret = devm_add_action(pvt->dev, pvt_disable_iface, pvt);
+ if (ret) {
+ dev_err(pvt->dev, "Can't add PVT disable interface action\n");
+ return ret;
+ }
+
+ /*
+ * Enable sensors data conversion and IRQ. We need to lock the
+ * interface mutex since hwmon has just been created and the
+ * corresponding sysfs files are accessible from user-space,
+ * which theoretically may cause races.
+ */
+ mutex_lock(&pvt->iface_mtx);
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, 0);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
+ mutex_unlock(&pvt->iface_mtx);
+
+ return 0;
+}
+
+#else /* !CONFIG_SENSORS_BT1_PVT_ALARMS */
+
+static int pvt_enable_iface(struct pvt_hwmon *pvt)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_SENSORS_BT1_PVT_ALARMS */
+
+static int pvt_probe(struct platform_device *pdev)
+{
+ struct pvt_hwmon *pvt;
+ int ret;
+
+ pvt = pvt_create_data(pdev);
+ if (IS_ERR(pvt))
+ return PTR_ERR(pvt);
+
+ ret = pvt_request_regs(pvt);
+ if (ret)
+ return ret;
+
+ ret = pvt_request_clks(pvt);
+ if (ret)
+ return ret;
+
+ pvt_init_iface(pvt);
+
+ ret = pvt_request_irq(pvt);
+ if (ret)
+ return ret;
+
+ ret = pvt_create_hwmon(pvt);
+ if (ret)
+ return ret;
+
+ ret = pvt_enable_iface(pvt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id pvt_of_match[] = {
+ { .compatible = "baikal,bt1-pvt" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pvt_of_match);
+
+static struct platform_driver pvt_driver = {
+ .probe = pvt_probe,
+ .driver = {
+ .name = "bt1-pvt",
+ .of_match_table = pvt_of_match
+ }
+};
+module_platform_driver(pvt_driver);
+
+MODULE_AUTHOR("Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 PVT driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/bt1-pvt.h b/drivers/hwmon/bt1-pvt.h
new file mode 100644
index 000000000000..5eac73e94885
--- /dev/null
+++ b/drivers/hwmon/bt1-pvt.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 Process, Voltage, Temperature sensor driver
+ */
+#ifndef __HWMON_BT1_PVT_H__
+#define __HWMON_BT1_PVT_H__
+
+#include <linux/completion.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/seqlock.h>
+
+/* Baikal-T1 PVT registers and their bitfields */
+#define PVT_CTRL 0x00
+#define PVT_CTRL_EN BIT(0)
+#define PVT_CTRL_MODE_FLD 1
+#define PVT_CTRL_MODE_MASK GENMASK(3, PVT_CTRL_MODE_FLD)
+#define PVT_CTRL_MODE_TEMP 0x0
+#define PVT_CTRL_MODE_VOLT 0x1
+#define PVT_CTRL_MODE_LVT 0x2
+#define PVT_CTRL_MODE_HVT 0x4
+#define PVT_CTRL_MODE_SVT 0x6
+#define PVT_CTRL_TRIM_FLD 4
+#define PVT_CTRL_TRIM_MASK GENMASK(8, PVT_CTRL_TRIM_FLD)
+#define PVT_DATA 0x04
+#define PVT_DATA_VALID BIT(10)
+#define PVT_DATA_DATA_FLD 0
+#define PVT_DATA_DATA_MASK GENMASK(9, PVT_DATA_DATA_FLD)
+#define PVT_TTHRES 0x08
+#define PVT_VTHRES 0x0C
+#define PVT_LTHRES 0x10
+#define PVT_HTHRES 0x14
+#define PVT_STHRES 0x18
+#define PVT_THRES_LO_FLD 0
+#define PVT_THRES_LO_MASK GENMASK(9, PVT_THRES_LO_FLD)
+#define PVT_THRES_HI_FLD 10
+#define PVT_THRES_HI_MASK GENMASK(19, PVT_THRES_HI_FLD)
+#define PVT_TTIMEOUT 0x1C
+#define PVT_INTR_STAT 0x20
+#define PVT_INTR_MASK 0x24
+#define PVT_RAW_INTR_STAT 0x28
+#define PVT_INTR_DVALID BIT(0)
+#define PVT_INTR_TTHRES_LO BIT(1)
+#define PVT_INTR_TTHRES_HI BIT(2)
+#define PVT_INTR_VTHRES_LO BIT(3)
+#define PVT_INTR_VTHRES_HI BIT(4)
+#define PVT_INTR_LTHRES_LO BIT(5)
+#define PVT_INTR_LTHRES_HI BIT(6)
+#define PVT_INTR_HTHRES_LO BIT(7)
+#define PVT_INTR_HTHRES_HI BIT(8)
+#define PVT_INTR_STHRES_LO BIT(9)
+#define PVT_INTR_STHRES_HI BIT(10)
+#define PVT_INTR_ALL GENMASK(10, 0)
+#define PVT_CLR_INTR 0x2C
+
+/*
+ * PVT sensors-related limits and default values
+ * @PVT_TEMP_MIN: Minimal temperature in millidegrees of Celsius.
+ * @PVT_TEMP_MAX: Maximal temperature in millidegrees of Celsius.
+ * @PVT_TEMP_CHS: Number of temperature hwmon channels.
+ * @PVT_VOLT_MIN: Minimal voltage in mV.
+ * @PVT_VOLT_MAX: Maximal voltage in mV.
+ * @PVT_VOLT_CHS: Number of voltage hwmon channels.
+ * @PVT_DATA_MIN: Minimal PVT raw data value.
+ * @PVT_DATA_MAX: Maximal PVT raw data value.
+ * @PVT_TRIM_MIN: Minimal temperature sensor trim value.
+ * @PVT_TRIM_MAX: Maximal temperature sensor trim value.
+ * @PVT_TRIM_DEF: Default temperature sensor trim value (set a proper value
+ * when one is determined for Baikal-T1 SoC).
+ * @PVT_TRIM_TEMP: Maximum temperature encoded by the trim factor.
+ * @PVT_TRIM_STEP: Temperature stride corresponding to the trim value.
+ * @PVT_TOUT_MIN: Minimal timeout between samples in nanoseconds.
+ * @PVT_TOUT_DEF: Default data measurements timeout. In case if alarms are
+ * activated the PVT IRQ is enabled to be raised after each
+ * conversion in order to have the thresholds checked and the
+ * converted value cached. Too frequent conversions may cause
+ * the system CPU overload. Lets set the 50ms delay between
+ * them by default to prevent this.
+ */
+#define PVT_TEMP_MIN -48380L
+#define PVT_TEMP_MAX 147438L
+#define PVT_TEMP_CHS 1
+#define PVT_VOLT_MIN 620L
+#define PVT_VOLT_MAX 1168L
+#define PVT_VOLT_CHS 4
+#define PVT_DATA_MIN 0
+#define PVT_DATA_MAX (PVT_DATA_DATA_MASK >> PVT_DATA_DATA_FLD)
+#define PVT_TRIM_MIN 0
+#define PVT_TRIM_MAX (PVT_CTRL_TRIM_MASK >> PVT_CTRL_TRIM_FLD)
+#define PVT_TRIM_TEMP 7130
+#define PVT_TRIM_STEP (PVT_TRIM_TEMP / PVT_TRIM_MAX)
+#define PVT_TRIM_DEF 0
+#define PVT_TOUT_MIN (NSEC_PER_SEC / 3000)
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+# define PVT_TOUT_DEF 60000
+#else
+# define PVT_TOUT_DEF 0
+#endif
+
+/*
+ * enum pvt_sensor_type - Baikal-T1 PVT sensor types (correspond to each PVT
+ * sampling mode)
+ * @PVT_SENSOR*: helpers to traverse the sensors in loops.
+ * @PVT_TEMP: PVT Temperature sensor.
+ * @PVT_VOLT: PVT Voltage sensor.
+ * @PVT_LVT: PVT Low-Voltage threshold sensor.
+ * @PVT_HVT: PVT High-Voltage threshold sensor.
+ * @PVT_SVT: PVT Standard-Voltage threshold sensor.
+ */
+enum pvt_sensor_type {
+ PVT_SENSOR_FIRST,
+ PVT_TEMP = PVT_SENSOR_FIRST,
+ PVT_VOLT,
+ PVT_LVT,
+ PVT_HVT,
+ PVT_SVT,
+ PVT_SENSOR_LAST = PVT_SVT,
+ PVT_SENSORS_NUM
+};
+
+/*
+ * enum pvt_clock_type - Baikal-T1 PVT clocks.
+ * @PVT_CLOCK_APB: APB clock.
+ * @PVT_CLOCK_REF: PVT reference clock.
+ */
+enum pvt_clock_type {
+ PVT_CLOCK_APB,
+ PVT_CLOCK_REF,
+ PVT_CLOCK_NUM
+};
+
+/*
+ * struct pvt_sensor_info - Baikal-T1 PVT sensor informational structure
+ * @channel: Sensor channel ID.
+ * @label: hwmon sensor label.
+ * @mode: PVT mode corresponding to the channel.
+ * @thres_base: upper and lower threshold values of the sensor.
+ * @thres_sts_lo: low threshold status bitfield.
+ * @thres_sts_hi: high threshold status bitfield.
+ * @type: Sensor type.
+ * @attr_min_alarm: Min alarm attribute ID.
+ * @attr_min_alarm: Max alarm attribute ID.
+ */
+struct pvt_sensor_info {
+ int channel;
+ const char *label;
+ u32 mode;
+ unsigned long thres_base;
+ u32 thres_sts_lo;
+ u32 thres_sts_hi;
+ enum hwmon_sensor_types type;
+ u32 attr_min_alarm;
+ u32 attr_max_alarm;
+};
+
+#define PVT_SENSOR_INFO(_ch, _label, _type, _mode, _thres) \
+ { \
+ .channel = _ch, \
+ .label = _label, \
+ .mode = PVT_CTRL_MODE_ ##_mode, \
+ .thres_base = PVT_ ##_thres, \
+ .thres_sts_lo = PVT_INTR_ ##_thres## _LO, \
+ .thres_sts_hi = PVT_INTR_ ##_thres## _HI, \
+ .type = _type, \
+ .attr_min_alarm = _type## _min, \
+ .attr_max_alarm = _type## _max, \
+ }
+
+/*
+ * struct pvt_cache - PVT sensors data cache
+ * @data: data cache in raw format.
+ * @thres_sts_lo: low threshold status saved on the previous data conversion.
+ * @thres_sts_hi: high threshold status saved on the previous data conversion.
+ * @data_seqlock: cached data seq-lock.
+ * @conversion: data conversion completion.
+ */
+struct pvt_cache {
+ u32 data;
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ seqlock_t data_seqlock;
+ u32 thres_sts_lo;
+ u32 thres_sts_hi;
+#else
+ struct completion conversion;
+#endif
+};
+
+/*
+ * struct pvt_hwmon - Baikal-T1 PVT private data
+ * @dev: device structure of the PVT platform device.
+ * @hwmon: hwmon device structure.
+ * @regs: pointer to the Baikal-T1 PVT registers region.
+ * @irq: PVT events IRQ number.
+ * @clks: Array of the PVT clocks descriptor (APB/ref clocks).
+ * @ref_clk: Pointer to the reference clocks descriptor.
+ * @iface_mtx: Generic interface mutex (used to lock the alarm registers
+ * when the alarms enabled, or the data conversion interface
+ * if alarms are disabled).
+ * @sensor: current PVT sensor the data conversion is being performed for.
+ * @cache: data cache descriptor.
+ */
+struct pvt_hwmon {
+ struct device *dev;
+ struct device *hwmon;
+
+ void __iomem *regs;
+ int irq;
+
+ struct clk_bulk_data clks[PVT_CLOCK_NUM];
+
+ struct mutex iface_mtx;
+ enum pvt_sensor_type sensor;
+ struct pvt_cache cache[PVT_SENSORS_NUM];
+};
+
+/*
+ * struct pvt_poly_term - a term descriptor of the PVT data translation
+ * polynomial
+ * @deg: degree of the term.
+ * @coef: multiplication factor of the term.
+ * @divider: distributed divider per each degree.
+ * @divider_leftover: divider leftover, which couldn't be redistributed.
+ */
+struct pvt_poly_term {
+ unsigned int deg;
+ long coef;
+ long divider;
+ long divider_leftover;
+};
+
+/*
+ * struct pvt_poly - PVT data translation polynomial descriptor
+ * @total_divider: total data divider.
+ * @terms: polynomial terms up to a free one.
+ */
+struct pvt_poly {
+ long total_divider;
+ struct pvt_poly_term terms[];
+};
+
+#endif /* __HWMON_BT1_PVT_H__ */
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index ab719d372b0d..16be012a95ed 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1073,13 +1073,6 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
},
},
{
- .ident = "Dell XPS421",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
- },
- },
- {
.ident = "Dell Studio",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1088,14 +1081,6 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
.driver_data = (void *)&i8k_config_data[DELL_STUDIO],
},
{
- .ident = "Dell XPS 13",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS13"),
- },
- .driver_data = (void *)&i8k_config_data[DELL_XPS],
- },
- {
.ident = "Dell XPS M140",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1104,17 +1089,10 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
.driver_data = (void *)&i8k_config_data[DELL_XPS],
},
{
- .ident = "Dell XPS 15 9560",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9560"),
- },
- },
- {
- .ident = "Dell XPS 15 9570",
+ .ident = "Dell XPS",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS"),
},
},
{ }
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
new file mode 100644
index 000000000000..2137bc65829d
--- /dev/null
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Gateworks System Controller Hardware Monitor module
+ *
+ * Copyright (C) 2020 Gateworks Corporation
+ */
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/mfd/gsc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <linux/platform_data/gsc_hwmon.h>
+
+#define GSC_HWMON_MAX_TEMP_CH 16
+#define GSC_HWMON_MAX_IN_CH 16
+
+#define GSC_HWMON_RESOLUTION 12
+#define GSC_HWMON_VREF 2500
+
+struct gsc_hwmon_data {
+ struct gsc_dev *gsc;
+ struct gsc_hwmon_platform_data *pdata;
+ struct regmap *regmap;
+ const struct gsc_hwmon_channel *temp_ch[GSC_HWMON_MAX_TEMP_CH];
+ const struct gsc_hwmon_channel *in_ch[GSC_HWMON_MAX_IN_CH];
+ u32 temp_config[GSC_HWMON_MAX_TEMP_CH + 1];
+ u32 in_config[GSC_HWMON_MAX_IN_CH + 1];
+ struct hwmon_channel_info temp_info;
+ struct hwmon_channel_info in_info;
+ const struct hwmon_channel_info *info[3];
+ struct hwmon_chip_info chip;
+};
+
+static struct regmap_bus gsc_hwmon_regmap_bus = {
+ .reg_read = gsc_read,
+ .reg_write = gsc_write,
+};
+
+static const struct regmap_config gsc_hwmon_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_NONE,
+};
+
+static ssize_t pwm_auto_point_temp_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct gsc_hwmon_data *hwmon = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ u8 reg = hwmon->pdata->fan_base + (2 * attr->index);
+ u8 regs[2];
+ int ret;
+
+ ret = regmap_bulk_read(hwmon->regmap, reg, regs, 2);
+ if (ret)
+ return ret;
+
+ ret = regs[0] | regs[1] << 8;
+ return sprintf(buf, "%d\n", ret * 10);
+}
+
+static ssize_t pwm_auto_point_temp_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct gsc_hwmon_data *hwmon = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ u8 reg = hwmon->pdata->fan_base + (2 * attr->index);
+ u8 regs[2];
+ long temp;
+ int err;
+
+ if (kstrtol(buf, 10, &temp))
+ return -EINVAL;
+
+ temp = clamp_val(temp, 0, 10000);
+ temp = DIV_ROUND_CLOSEST(temp, 10);
+
+ regs[0] = temp & 0xff;
+ regs[1] = (temp >> 8) & 0xff;
+ err = regmap_bulk_write(hwmon->regmap, reg, regs, 2);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static ssize_t pwm_auto_point_pwm_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)) / 100);
+}
+
+static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point1_pwm, pwm_auto_point_pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point1_temp, pwm_auto_point_temp, 0);
+
+static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point2_pwm, pwm_auto_point_pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point2_temp, pwm_auto_point_temp, 1);
+
+static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point3_pwm, pwm_auto_point_pwm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point3_temp, pwm_auto_point_temp, 2);
+
+static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point4_pwm, pwm_auto_point_pwm, 3);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point4_temp, pwm_auto_point_temp, 3);
+
+static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point5_pwm, pwm_auto_point_pwm, 4);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point5_temp, pwm_auto_point_temp, 4);
+
+static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point6_pwm, pwm_auto_point_pwm, 5);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_point6_temp, pwm_auto_point_temp, 5);
+
+static struct attribute *gsc_hwmon_attributes[] = {
+ &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group gsc_hwmon_group = {
+ .attrs = gsc_hwmon_attributes,
+};
+__ATTRIBUTE_GROUPS(gsc_hwmon);
+
+static int
+gsc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct gsc_hwmon_data *hwmon = dev_get_drvdata(dev);
+ const struct gsc_hwmon_channel *ch;
+ int sz, ret;
+ long tmp;
+ u8 buf[3];
+
+ switch (type) {
+ case hwmon_in:
+ ch = hwmon->in_ch[channel];
+ break;
+ case hwmon_temp:
+ ch = hwmon->temp_ch[channel];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ sz = (ch->mode == mode_voltage) ? 3 : 2;
+ ret = regmap_bulk_read(hwmon->regmap, ch->reg, buf, sz);
+ if (ret)
+ return ret;
+
+ tmp = 0;
+ while (sz-- > 0)
+ tmp |= (buf[sz] << (8 * sz));
+
+ switch (ch->mode) {
+ case mode_temperature:
+ if (tmp > 0x8000)
+ tmp -= 0xffff;
+ break;
+ case mode_voltage_raw:
+ tmp = clamp_val(tmp, 0, BIT(GSC_HWMON_RESOLUTION));
+ /* scale based on ref voltage and ADC resolution */
+ tmp *= GSC_HWMON_VREF;
+ tmp >>= GSC_HWMON_RESOLUTION;
+ /* scale based on optional voltage divider */
+ if (ch->vdiv[0] && ch->vdiv[1]) {
+ tmp *= (ch->vdiv[0] + ch->vdiv[1]);
+ tmp /= ch->vdiv[1];
+ }
+ /* adjust by uV offset */
+ tmp += ch->mvoffset;
+ break;
+ case mode_voltage:
+ /* no adjustment needed */
+ break;
+ }
+
+ *val = tmp;
+
+ return 0;
+}
+
+static int
+gsc_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **buf)
+{
+ struct gsc_hwmon_data *hwmon = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_in:
+ *buf = hwmon->in_ch[channel]->name;
+ break;
+ case hwmon_temp:
+ *buf = hwmon->temp_ch[channel]->name;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t
+gsc_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr,
+ int ch)
+{
+ return 0444;
+}
+
+static const struct hwmon_ops gsc_hwmon_ops = {
+ .is_visible = gsc_hwmon_is_visible,
+ .read = gsc_hwmon_read,
+ .read_string = gsc_hwmon_read_string,
+};
+
+static struct gsc_hwmon_platform_data *
+gsc_hwmon_get_devtree_pdata(struct device *dev)
+{
+ struct gsc_hwmon_platform_data *pdata;
+ struct gsc_hwmon_channel *ch;
+ struct fwnode_handle *child;
+ struct device_node *fan;
+ int nchannels;
+
+ nchannels = device_get_child_node_count(dev);
+ if (nchannels == 0)
+ return ERR_PTR(-ENODEV);
+
+ pdata = devm_kzalloc(dev,
+ sizeof(*pdata) + nchannels * sizeof(*ch),
+ GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+ ch = (struct gsc_hwmon_channel *)(pdata + 1);
+ pdata->channels = ch;
+ pdata->nchannels = nchannels;
+
+ /* fan controller base address */
+ fan = of_find_compatible_node(dev->parent->of_node, NULL, "gw,gsc-fan");
+ if (fan && of_property_read_u32(fan, "reg", &pdata->fan_base)) {
+ dev_err(dev, "fan node without base\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* allocate structures for channels and count instances of each type */
+ device_for_each_child_node(dev, child) {
+ if (fwnode_property_read_string(child, "label", &ch->name)) {
+ dev_err(dev, "channel without label\n");
+ fwnode_handle_put(child);
+ return ERR_PTR(-EINVAL);
+ }
+ if (fwnode_property_read_u32(child, "reg", &ch->reg)) {
+ dev_err(dev, "channel without reg\n");
+ fwnode_handle_put(child);
+ return ERR_PTR(-EINVAL);
+ }
+ if (fwnode_property_read_u32(child, "gw,mode", &ch->mode)) {
+ dev_err(dev, "channel without mode\n");
+ fwnode_handle_put(child);
+ return ERR_PTR(-EINVAL);
+ }
+ if (ch->mode > mode_max) {
+ dev_err(dev, "invalid channel mode\n");
+ fwnode_handle_put(child);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!fwnode_property_read_u32(child,
+ "gw,voltage-offset-microvolt",
+ &ch->mvoffset))
+ ch->mvoffset /= 1000;
+ fwnode_property_read_u32_array(child,
+ "gw,voltage-divider-ohms",
+ ch->vdiv, ARRAY_SIZE(ch->vdiv));
+ ch++;
+ }
+
+ return pdata;
+}
+
+static int gsc_hwmon_probe(struct platform_device *pdev)
+{
+ struct gsc_dev *gsc = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct device *hwmon_dev;
+ struct gsc_hwmon_platform_data *pdata = dev_get_platdata(dev);
+ struct gsc_hwmon_data *hwmon;
+ const struct attribute_group **groups;
+ int i, i_in, i_temp;
+
+ if (!pdata) {
+ pdata = gsc_hwmon_get_devtree_pdata(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ }
+
+ hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+ hwmon->gsc = gsc;
+ hwmon->pdata = pdata;
+
+ hwmon->regmap = devm_regmap_init(dev, &gsc_hwmon_regmap_bus,
+ gsc->i2c_hwmon,
+ &gsc_hwmon_regmap_config);
+ if (IS_ERR(hwmon->regmap))
+ return PTR_ERR(hwmon->regmap);
+
+ for (i = 0, i_in = 0, i_temp = 0; i < hwmon->pdata->nchannels; i++) {
+ const struct gsc_hwmon_channel *ch = &pdata->channels[i];
+
+ switch (ch->mode) {
+ case mode_temperature:
+ if (i_temp == GSC_HWMON_MAX_TEMP_CH) {
+ dev_err(gsc->dev, "too many temp channels\n");
+ return -EINVAL;
+ }
+ hwmon->temp_ch[i_temp] = ch;
+ hwmon->temp_config[i_temp] = HWMON_T_INPUT |
+ HWMON_T_LABEL;
+ i_temp++;
+ break;
+ case mode_voltage:
+ case mode_voltage_raw:
+ if (i_in == GSC_HWMON_MAX_IN_CH) {
+ dev_err(gsc->dev, "too many input channels\n");
+ return -EINVAL;
+ }
+ hwmon->in_ch[i_in] = ch;
+ hwmon->in_config[i_in] =
+ HWMON_I_INPUT | HWMON_I_LABEL;
+ i_in++;
+ break;
+ default:
+ dev_err(gsc->dev, "invalid mode: %d\n", ch->mode);
+ return -EINVAL;
+ }
+ }
+
+ /* setup config structures */
+ hwmon->chip.ops = &gsc_hwmon_ops;
+ hwmon->chip.info = hwmon->info;
+ hwmon->info[0] = &hwmon->temp_info;
+ hwmon->info[1] = &hwmon->in_info;
+ hwmon->temp_info.type = hwmon_temp;
+ hwmon->temp_info.config = hwmon->temp_config;
+ hwmon->in_info.type = hwmon_in;
+ hwmon->in_info.config = hwmon->in_config;
+
+ groups = pdata->fan_base ? gsc_hwmon_groups : NULL;
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ KBUILD_MODNAME, hwmon,
+ &hwmon->chip, groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct of_device_id gsc_hwmon_of_match[] = {
+ { .compatible = "gw,gsc-adc", },
+ {}
+};
+
+static struct platform_driver gsc_hwmon_driver = {
+ .driver = {
+ .name = "gsc-hwmon",
+ .of_match_table = gsc_hwmon_of_match,
+ },
+ .probe = gsc_hwmon_probe,
+};
+
+module_platform_driver(gsc_hwmon_driver);
+
+MODULE_AUTHOR("Tim Harvey <tharvey@gateworks.com>");
+MODULE_DESCRIPTION("GSC hardware monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 6a30fb453f7a..3f596a5328da 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -15,6 +15,7 @@
#include <linux/gfp.h>
#include <linux/hwmon.h>
#include <linux/idr.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -31,7 +32,7 @@ struct hwmon_device {
const char *name;
struct device dev;
const struct hwmon_chip_info *chip;
-
+ struct list_head tzdata;
struct attribute_group group;
const struct attribute_group **groups;
};
@@ -55,12 +56,12 @@ struct hwmon_device_attribute {
/*
* Thermal zone information
- * In addition to the reference to the hwmon device,
- * also provides the sensor index.
*/
struct hwmon_thermal_data {
+ struct list_head node; /* hwmon tzdata list entry */
struct device *dev; /* Reference to hwmon device */
int index; /* sensor index */
+ struct thermal_zone_device *tzd;/* thermal zone device */
};
static ssize_t
@@ -156,10 +157,17 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
.get_temp = hwmon_thermal_get_temp,
};
+static void hwmon_thermal_remove_sensor(void *data)
+{
+ list_del(data);
+}
+
static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
struct hwmon_thermal_data *tdata;
struct thermal_zone_device *tzd;
+ int err;
tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL);
if (!tdata)
@@ -177,13 +185,68 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index)
if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
return PTR_ERR(tzd);
+ err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node);
+ if (err)
+ return err;
+
+ tdata->tzd = tzd;
+ list_add(&tdata->node, &hwdev->tzdata);
+
return 0;
}
+
+static int hwmon_thermal_register_sensors(struct device *dev)
+{
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+ const struct hwmon_chip_info *chip = hwdev->chip;
+ const struct hwmon_channel_info **info = chip->info;
+ void *drvdata = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 1; info[i]; i++) {
+ int j;
+
+ if (info[i]->type != hwmon_temp)
+ continue;
+
+ for (j = 0; info[i]->config[j]; j++) {
+ int err;
+
+ if (!(info[i]->config[j] & HWMON_T_INPUT) ||
+ !chip->ops->is_visible(drvdata, hwmon_temp,
+ hwmon_temp_input, j))
+ continue;
+
+ err = hwmon_thermal_add_sensor(dev, j);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void hwmon_thermal_notify(struct device *dev, int index)
+{
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+ struct hwmon_thermal_data *tzdata;
+
+ list_for_each_entry(tzdata, &hwdev->tzdata, node) {
+ if (tzdata->index == index) {
+ thermal_zone_device_update(tzdata->tzd,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+ }
+}
+
#else
-static int hwmon_thermal_add_sensor(struct device *dev, int index)
+static int hwmon_thermal_register_sensors(struct device *dev)
{
return 0;
}
+
+static void hwmon_thermal_notify(struct device *dev, int index) { }
+
#endif /* IS_REACHABLE(CONFIG_THERMAL) && ... */
static int hwmon_attr_base(enum hwmon_sensor_types type)
@@ -511,6 +574,35 @@ static const int __templates_size[] = {
[hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates),
};
+int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ char sattr[MAX_SYSFS_ATTR_NAME_LENGTH];
+ const char * const *templates;
+ const char *template;
+ int base;
+
+ if (type >= ARRAY_SIZE(__templates))
+ return -EINVAL;
+ if (attr >= __templates_size[type])
+ return -EINVAL;
+
+ templates = __templates[type];
+ template = templates[attr];
+
+ base = hwmon_attr_base(type);
+
+ scnprintf(sattr, MAX_SYSFS_ATTR_NAME_LENGTH, template, base + channel);
+ sysfs_notify(&dev->kobj, NULL, sattr);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+
+ if (type == hwmon_temp)
+ hwmon_thermal_notify(dev, channel);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hwmon_notify_event);
+
static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
{
int i, n;
@@ -596,7 +688,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
{
struct hwmon_device *hwdev;
struct device *hdev;
- int i, j, err, id;
+ int i, err, id;
/* Complain about invalid characters in hwmon name attribute */
if (name && (!strlen(name) || strpbrk(name, "-* \t\n")))
@@ -661,33 +753,19 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (err)
goto free_hwmon;
+ INIT_LIST_HEAD(&hwdev->tzdata);
+
if (dev && dev->of_node && chip && chip->ops->read &&
chip->info[0]->type == hwmon_chip &&
(chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
- const struct hwmon_channel_info **info = chip->info;
-
- for (i = 1; info[i]; i++) {
- if (info[i]->type != hwmon_temp)
- continue;
-
- for (j = 0; info[i]->config[j]; j++) {
- if (!chip->ops->is_visible(drvdata, hwmon_temp,
- hwmon_temp_input, j))
- continue;
- if (info[i]->config[j] & HWMON_T_INPUT) {
- err = hwmon_thermal_add_sensor(hdev, j);
- if (err) {
- device_unregister(hdev);
- /*
- * Don't worry about hwdev;
- * hwmon_dev_release(), called
- * from device_unregister(),
- * will free it.
- */
- goto ida_remove;
- }
- }
- }
+ err = hwmon_thermal_register_sensors(hdev);
+ if (err) {
+ device_unregister(hdev);
+ /*
+ * Don't worry about hwdev; hwmon_dev_release(), called
+ * from device_unregister(), will free it.
+ */
+ goto ida_remove;
}
}
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index e9e78c0b7212..55d474ec7c35 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -74,6 +74,17 @@
#define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9)
#define INA226_SHIFT_AVG(val) ((val) << 9)
+/* bit number of alert functions in Mask/Enable Register */
+#define INA226_SHUNT_OVER_VOLTAGE_BIT 15
+#define INA226_SHUNT_UNDER_VOLTAGE_BIT 14
+#define INA226_BUS_OVER_VOLTAGE_BIT 13
+#define INA226_BUS_UNDER_VOLTAGE_BIT 12
+#define INA226_POWER_OVER_LIMIT_BIT 11
+
+/* bit mask for alert config bits of Mask/Enable Register */
+#define INA226_ALERT_CONFIG_MASK 0xFC00
+#define INA226_ALERT_FUNCTION_FLAG BIT(4)
+
/* common attrs, ina226 attrs and NULL */
#define INA2XX_MAX_ATTRIBUTE_GROUPS 3
@@ -303,6 +314,145 @@ static ssize_t ina2xx_value_show(struct device *dev,
ina2xx_get_value(data, attr->index, regval));
}
+static int ina226_reg_to_alert(struct ina2xx_data *data, u8 bit, u16 regval)
+{
+ int reg;
+
+ switch (bit) {
+ case INA226_SHUNT_OVER_VOLTAGE_BIT:
+ case INA226_SHUNT_UNDER_VOLTAGE_BIT:
+ reg = INA2XX_SHUNT_VOLTAGE;
+ break;
+ case INA226_BUS_OVER_VOLTAGE_BIT:
+ case INA226_BUS_UNDER_VOLTAGE_BIT:
+ reg = INA2XX_BUS_VOLTAGE;
+ break;
+ case INA226_POWER_OVER_LIMIT_BIT:
+ reg = INA2XX_POWER;
+ break;
+ default:
+ /* programmer goofed */
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ return ina2xx_get_value(data, reg, regval);
+}
+
+/*
+ * Turns alert limit values into register values.
+ * Opposite of the formula in ina2xx_get_value().
+ */
+static s16 ina226_alert_to_reg(struct ina2xx_data *data, u8 bit, int val)
+{
+ switch (bit) {
+ case INA226_SHUNT_OVER_VOLTAGE_BIT:
+ case INA226_SHUNT_UNDER_VOLTAGE_BIT:
+ val *= data->config->shunt_div;
+ return clamp_val(val, SHRT_MIN, SHRT_MAX);
+ case INA226_BUS_OVER_VOLTAGE_BIT:
+ case INA226_BUS_UNDER_VOLTAGE_BIT:
+ val = (val * 1000) << data->config->bus_voltage_shift;
+ val = DIV_ROUND_CLOSEST(val, data->config->bus_voltage_lsb);
+ return clamp_val(val, 0, SHRT_MAX);
+ case INA226_POWER_OVER_LIMIT_BIT:
+ val = DIV_ROUND_CLOSEST(val, data->power_lsb_uW);
+ return clamp_val(val, 0, USHRT_MAX);
+ default:
+ /* programmer goofed */
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static ssize_t ina226_alert_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ int regval;
+ int val = 0;
+ int ret;
+
+ mutex_lock(&data->config_lock);
+ ret = regmap_read(data->regmap, INA226_MASK_ENABLE, &regval);
+ if (ret)
+ goto abort;
+
+ if (regval & BIT(attr->index)) {
+ ret = regmap_read(data->regmap, INA226_ALERT_LIMIT, &regval);
+ if (ret)
+ goto abort;
+ val = ina226_reg_to_alert(data, attr->index, regval);
+ }
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", val);
+abort:
+ mutex_unlock(&data->config_lock);
+ return ret;
+}
+
+static ssize_t ina226_alert_store(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Clear all alerts first to avoid accidentally triggering ALERT pin
+ * due to register write sequence. Then, only enable the alert
+ * if the value is non-zero.
+ */
+ mutex_lock(&data->config_lock);
+ ret = regmap_update_bits(data->regmap, INA226_MASK_ENABLE,
+ INA226_ALERT_CONFIG_MASK, 0);
+ if (ret < 0)
+ goto abort;
+
+ ret = regmap_write(data->regmap, INA226_ALERT_LIMIT,
+ ina226_alert_to_reg(data, attr->index, val));
+ if (ret < 0)
+ goto abort;
+
+ if (val != 0) {
+ ret = regmap_update_bits(data->regmap, INA226_MASK_ENABLE,
+ INA226_ALERT_CONFIG_MASK,
+ BIT(attr->index));
+ if (ret < 0)
+ goto abort;
+ }
+
+ ret = count;
+abort:
+ mutex_unlock(&data->config_lock);
+ return ret;
+}
+
+static ssize_t ina226_alarm_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+ int regval;
+ int alarm = 0;
+ int ret;
+
+ ret = regmap_read(data->regmap, INA226_MASK_ENABLE, &regval);
+ if (ret)
+ return ret;
+
+ alarm = (regval & BIT(attr->index)) &&
+ (regval & INA226_ALERT_FUNCTION_FLAG);
+ return snprintf(buf, PAGE_SIZE, "%d\n", alarm);
+}
+
/*
* In order to keep calibration register value fixed, the product
* of current_lsb and shunt_resistor should also be fixed and equal
@@ -392,15 +542,38 @@ static ssize_t ina226_interval_show(struct device *dev,
/* shunt voltage */
static SENSOR_DEVICE_ATTR_RO(in0_input, ina2xx_value, INA2XX_SHUNT_VOLTAGE);
+/* shunt voltage over/under voltage alert setting and alarm */
+static SENSOR_DEVICE_ATTR_RW(in0_crit, ina226_alert,
+ INA226_SHUNT_OVER_VOLTAGE_BIT);
+static SENSOR_DEVICE_ATTR_RW(in0_lcrit, ina226_alert,
+ INA226_SHUNT_UNDER_VOLTAGE_BIT);
+static SENSOR_DEVICE_ATTR_RO(in0_crit_alarm, ina226_alarm,
+ INA226_SHUNT_OVER_VOLTAGE_BIT);
+static SENSOR_DEVICE_ATTR_RO(in0_lcrit_alarm, ina226_alarm,
+ INA226_SHUNT_UNDER_VOLTAGE_BIT);
/* bus voltage */
static SENSOR_DEVICE_ATTR_RO(in1_input, ina2xx_value, INA2XX_BUS_VOLTAGE);
+/* bus voltage over/under voltage alert setting and alarm */
+static SENSOR_DEVICE_ATTR_RW(in1_crit, ina226_alert,
+ INA226_BUS_OVER_VOLTAGE_BIT);
+static SENSOR_DEVICE_ATTR_RW(in1_lcrit, ina226_alert,
+ INA226_BUS_UNDER_VOLTAGE_BIT);
+static SENSOR_DEVICE_ATTR_RO(in1_crit_alarm, ina226_alarm,
+ INA226_BUS_OVER_VOLTAGE_BIT);
+static SENSOR_DEVICE_ATTR_RO(in1_lcrit_alarm, ina226_alarm,
+ INA226_BUS_UNDER_VOLTAGE_BIT);
/* calculated current */
static SENSOR_DEVICE_ATTR_RO(curr1_input, ina2xx_value, INA2XX_CURRENT);
/* calculated power */
static SENSOR_DEVICE_ATTR_RO(power1_input, ina2xx_value, INA2XX_POWER);
+/* over-limit power alert setting and alarm */
+static SENSOR_DEVICE_ATTR_RW(power1_crit, ina226_alert,
+ INA226_POWER_OVER_LIMIT_BIT);
+static SENSOR_DEVICE_ATTR_RO(power1_crit_alarm, ina226_alarm,
+ INA226_POWER_OVER_LIMIT_BIT);
/* shunt resistance */
static SENSOR_DEVICE_ATTR_RW(shunt_resistor, ina2xx_shunt, INA2XX_CALIBRATION);
@@ -423,6 +596,16 @@ static const struct attribute_group ina2xx_group = {
};
static struct attribute *ina226_attrs[] = {
+ &sensor_dev_attr_in0_crit.dev_attr.attr,
+ &sensor_dev_attr_in0_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in0_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_in0_lcrit_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_crit.dev_attr.attr,
+ &sensor_dev_attr_in1_lcrit.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_lcrit_alarm.dev_attr.attr,
+ &sensor_dev_attr_power1_crit.dev_attr.attr,
+ &sensor_dev_attr_power1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_update_interval.dev_attr.attr,
NULL,
};
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 4122e59f0bb4..ae2b84263a44 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -25,7 +25,7 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/of_device.h>
-
+#include <linux/acpi.h>
#define DRVNAME "lm70"
@@ -148,18 +148,50 @@ static const struct of_device_id lm70_of_ids[] = {
MODULE_DEVICE_TABLE(of, lm70_of_ids);
#endif
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id lm70_acpi_ids[] = {
+ {
+ .id = "LM000070",
+ .driver_data = LM70_CHIP_LM70,
+ },
+ {
+ .id = "TMP00121",
+ .driver_data = LM70_CHIP_TMP121,
+ },
+ {
+ .id = "LM000071",
+ .driver_data = LM70_CHIP_LM71,
+ },
+ {
+ .id = "LM000074",
+ .driver_data = LM70_CHIP_LM74,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, lm70_acpi_ids);
+#endif
+
static int lm70_probe(struct spi_device *spi)
{
- const struct of_device_id *match;
+ const struct of_device_id *of_match;
struct device *hwmon_dev;
struct lm70 *p_lm70;
int chip;
- match = of_match_device(lm70_of_ids, &spi->dev);
- if (match)
- chip = (int)(uintptr_t)match->data;
- else
- chip = spi_get_device_id(spi)->driver_data;
+ of_match = of_match_device(lm70_of_ids, &spi->dev);
+ if (of_match)
+ chip = (int)(uintptr_t)of_match->data;
+ else {
+#ifdef CONFIG_ACPI
+ const struct acpi_device_id *acpi_match;
+
+ acpi_match = acpi_match_device(lm70_acpi_ids, &spi->dev);
+ if (acpi_match)
+ chip = (int)(uintptr_t)acpi_match->driver_data;
+ else
+#endif
+ chip = spi_get_device_id(spi)->driver_data;
+ }
/* signaling is SPI_MODE_0 */
if (spi->mode & (SPI_CPOL | SPI_CPHA))
@@ -195,6 +227,7 @@ static struct spi_driver lm70_driver = {
.driver = {
.name = "lm70",
.of_match_table = of_match_ptr(lm70_of_ids),
+ .acpi_match_table = ACPI_PTR(lm70_acpi_ids),
},
.id_table = lm70_ids,
.probe = lm70_probe,
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 5e6392294c03..ba0be48aeadd 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -797,8 +797,10 @@ static int lm75_detect(struct i2c_client *new_client,
/* First check for LM75A */
if (i2c_smbus_read_byte_data(new_client, 7) == LM75A_ID) {
- /* LM75A returns 0xff on unused registers so
- just to be sure we check for that too. */
+ /*
+ * LM75A returns 0xff on unused registers so
+ * just to be sure we check for that too.
+ */
if (i2c_smbus_read_byte_data(new_client, 4) != 0xff
|| i2c_smbus_read_byte_data(new_client, 5) != 0xff
|| i2c_smbus_read_byte_data(new_client, 6) != 0xff)
@@ -849,6 +851,7 @@ static int lm75_suspend(struct device *dev)
{
int status;
struct i2c_client *client = to_i2c_client(dev);
+
status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(&client->dev, "Can't read config? %d\n", status);
@@ -863,6 +866,7 @@ static int lm75_resume(struct device *dev)
{
int status;
struct i2c_client *client = to_i2c_client(dev);
+
status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(&client->dev, "Can't read config? %d\n", status);
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index b614e6328566..a398171162a8 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -1,17 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- lm75.h - Part of lm_sensors, Linux kernel modules for hardware
- monitoring
- Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com>
-
-*/
+ * lm75.h - Part of lm_sensors, Linux kernel modules for hardware monitoring
+ * Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com>
+ */
/*
- This file contains common code for encoding/decoding LM75 type
- temperature readings, which are emulated by many of the chips
- we support. As the user is unlikely to load more than one driver
- which contains this code, we don't worry about the wasted space.
-*/
+ * This file contains common code for encoding/decoding LM75 type
+ * temperature readings, which are emulated by many of the chips
+ * we support. As the user is unlikely to load more than one driver
+ * which contains this code, we don't worry about the wasted space.
+ */
#include <linux/kernel.h>
@@ -20,18 +18,23 @@
#define LM75_TEMP_MAX 125000
#define LM75_SHUTDOWN 0x01
-/* TEMP: 0.001C/bit (-55C to +125C)
- REG: (0.5C/bit, two's complement) << 7 */
+/*
+ * TEMP: 0.001C/bit (-55C to +125C)
+ * REG: (0.5C/bit, two's complement) << 7
+ */
static inline u16 LM75_TEMP_TO_REG(long temp)
{
int ntemp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
+
ntemp += (ntemp < 0 ? -250 : 250);
return (u16)((ntemp / 500) << 7);
}
static inline int LM75_TEMP_FROM_REG(u16 reg)
{
- /* use integer division instead of equivalent right shift to
- guarantee arithmetic shift and preserve the sign */
+ /*
+ * use integer division instead of equivalent right shift to
+ * guarantee arithmetic shift and preserve the sign
+ */
return ((s16)reg / 128) * 500;
}
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 9b3c9f390ef8..7bdc664af55b 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -35,6 +35,14 @@
* explicitly as max6659, or if its address is not 0x4c.
* These chips lack the remote temperature offset feature.
*
+ * This driver also supports the MAX6654 chip made by Maxim. This chip can
+ * be at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is
+ * otherwise similar to MAX6657/MAX6658/MAX6659. Extended range is available
+ * by setting the configuration register accordingly, and is done during
+ * initialization. Extended precision is only available at conversion rates
+ * of 1 Hz and slower. Note that extended precision is not enabled by
+ * default, as this driver initializes all chips to 2 Hz by design.
+ *
* This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and
* MAX6692 chips made by Maxim. These are again similar to the LM86,
* but they use unsigned temperature values and can report temperatures
@@ -94,8 +102,8 @@
* have address 0x4d.
* MAX6647 has address 0x4e.
* MAX6659 can have address 0x4c, 0x4d or 0x4e.
- * MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
- * 0x4c, 0x4d or 0x4e.
+ * MAX6654, MAX6680, and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29,
+ * 0x2a, 0x2b, 0x4c, 0x4d or 0x4e.
* SA56004 can have address 0x48 through 0x4F.
*/
@@ -104,7 +112,7 @@ static const unsigned short normal_i2c[] = {
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
- max6646, w83l771, max6696, sa56004, g781, tmp451 };
+ max6646, w83l771, max6696, sa56004, g781, tmp451, max6654 };
/*
* The LM90 registers
@@ -145,7 +153,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_REG_R_TCRIT_HYST 0x21
#define LM90_REG_W_TCRIT_HYST 0x21
-/* MAX6646/6647/6649/6657/6658/6659/6695/6696 registers */
+/* MAX6646/6647/6649/6654/6657/6658/6659/6695/6696 registers */
#define MAX6657_REG_R_LOCAL_TEMPL 0x11
#define MAX6696_REG_R_STATUS2 0x12
@@ -209,6 +217,7 @@ static const struct i2c_device_id lm90_id[] = {
{ "max6646", max6646 },
{ "max6647", max6646 },
{ "max6649", max6646 },
+ { "max6654", max6654 },
{ "max6657", max6657 },
{ "max6658", max6657 },
{ "max6659", max6659 },
@@ -270,6 +279,10 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = {
.data = (void *)max6646
},
{
+ .compatible = "dallas,max6654",
+ .data = (void *)max6654
+ },
+ {
.compatible = "dallas,max6657",
.data = (void *)max6657
},
@@ -367,6 +380,11 @@ static const struct lm90_params lm90_params[] = {
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
+ [max6654] = {
+ .alert_alarms = 0x7c,
+ .max_convrate = 7,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
+ },
[max6657] = {
.flags = LM90_PAUSE_FOR_CONFIG,
.alert_alarms = 0x7c,
@@ -1557,6 +1575,16 @@ static int lm90_detect(struct i2c_client *client,
&& (config1 & 0x3f) == 0x00
&& convrate <= 0x07) {
name = "max6646";
+ } else
+ /*
+ * The chip_id of the MAX6654 holds the revision of the chip.
+ * The lowest 3 bits of the config1 register are unused and
+ * should return zero when read.
+ */
+ if (chip_id == 0x08
+ && (config1 & 0x07) == 0x00
+ && convrate <= 0x07) {
+ name = "max6654";
}
} else
if (address == 0x4C
@@ -1661,6 +1689,15 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
config |= 0x18;
/*
+ * Put MAX6654 into extended range (0x20, extend minimum range from
+ * 0 degrees to -64 degrees). Note that extended resolution is not
+ * possible on the MAX6654 unless conversion rate is set to 1 Hz or
+ * slower, which is intentionally not done by default.
+ */
+ if (data->kind == max6654)
+ config |= 0x20;
+
+ /*
* Select external channel 0 for max6695/96
*/
if (data->kind == max6696)
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 7efa6bfef060..e7e1ddc1d631 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -2047,7 +2047,7 @@ store_temp_beep(struct device *dev, struct device_attribute *attr,
static umode_t nct6775_in_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
int in = index / 5; /* voltage index */
@@ -2253,7 +2253,7 @@ store_fan_pulses(struct device *dev, struct device_attribute *attr,
static umode_t nct6775_fan_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
int fan = index / 6; /* fan index */
int nr = index % 6; /* attribute index */
@@ -2440,7 +2440,7 @@ store_temp_type(struct device *dev, struct device_attribute *attr,
static umode_t nct6775_temp_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
int temp = index / 10; /* temp index */
int nr = index % 10; /* attribute index */
@@ -3257,7 +3257,7 @@ store_auto_temp(struct device *dev, struct device_attribute *attr,
static umode_t nct6775_pwm_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
int pwm = index / 36; /* pwm index */
int nr = index % 36; /* attribute index */
@@ -3459,7 +3459,7 @@ static SENSOR_DEVICE_ATTR(beep_enable, S_IWUSR | S_IRUGO, show_beep,
static umode_t nct6775_other_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
if (index == 0 && !data->have_vid)
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 2e97e56c72c7..570df8eb5272 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -679,7 +679,7 @@ static struct attribute *nct7802_temp_attrs[] = {
static umode_t nct7802_temp_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct7802_data *data = dev_get_drvdata(dev);
unsigned int reg;
int err;
@@ -778,7 +778,7 @@ static struct attribute *nct7802_in_attrs[] = {
static umode_t nct7802_in_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct7802_data *data = dev_get_drvdata(dev);
unsigned int reg;
int err;
@@ -853,7 +853,7 @@ static struct attribute *nct7802_fan_attrs[] = {
static umode_t nct7802_fan_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nct7802_data *data = dev_get_drvdata(dev);
int fan = index / 4; /* 4 attributes per fan */
unsigned int reg;
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index a7eb10d2a053..b0425694f702 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -8,6 +8,9 @@
* Copyright (c) 2019 Advantech
* Author: Amy.Shih <amy.shih@advantech.com.tw>
*
+ * Copyright (c) 2020 Advantech
+ * Author: Yuechao Zhao <yuechao.zhao@advantech.com.cn>
+ *
* Supports the following chips:
*
* Chip #vin #fan #pwm #temp #dts chip ID
@@ -20,6 +23,7 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/hwmon.h>
+#include <linux/watchdog.h>
#define VENDOR_ID_REG 0x7A /* Any bank */
#define NUVOTON_ID 0x50
@@ -88,18 +92,42 @@
#define FANCTL1_FMR_REG 0x00 /* Bank 3; 1 reg per channel */
#define FANCTL1_OUT_REG 0x10 /* Bank 3; 1 reg per channel */
+#define WDT_LOCK_REG 0xE0 /* W/O Lock Watchdog Register */
+#define WDT_EN_REG 0xE1 /* R/O Watchdog Enable Register */
+#define WDT_STS_REG 0xE2 /* R/O Watchdog Status Register */
+#define WDT_TIMER_REG 0xE3 /* R/W Watchdog Timer Register */
+#define WDT_SOFT_EN 0x55 /* Enable soft watchdog timer */
+#define WDT_SOFT_DIS 0xAA /* Disable soft watchdog timer */
+
#define VOLT_MONITOR_MODE 0x0
#define THERMAL_DIODE_MODE 0x1
#define THERMISTOR_MODE 0x3
#define ENABLE_TSI BIT(1)
+#define WATCHDOG_TIMEOUT 1 /* 1 minute default timeout */
+
+/*The timeout range is 1-255 minutes*/
+#define MIN_TIMEOUT (1 * 60)
+#define MAX_TIMEOUT (255 * 60)
+
+static int timeout;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in minutes. 1 <= timeout <= 255, default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
static const unsigned short normal_i2c[] = {
0x2d, 0x2e, I2C_CLIENT_END
};
struct nct7904_data {
struct i2c_client *client;
+ struct watchdog_device wdt;
struct mutex bank_lock;
int bank_sel;
u32 fanin_mask;
@@ -892,6 +920,95 @@ static const struct hwmon_chip_info nct7904_chip_info = {
.info = nct7904_info,
};
+/*
+ * Watchdog Function
+ */
+static int nct7904_wdt_start(struct watchdog_device *wdt)
+{
+ struct nct7904_data *data = watchdog_get_drvdata(wdt);
+
+ /* Enable soft watchdog timer */
+ return nct7904_write_reg(data, BANK_0, WDT_LOCK_REG, WDT_SOFT_EN);
+}
+
+static int nct7904_wdt_stop(struct watchdog_device *wdt)
+{
+ struct nct7904_data *data = watchdog_get_drvdata(wdt);
+
+ return nct7904_write_reg(data, BANK_0, WDT_LOCK_REG, WDT_SOFT_DIS);
+}
+
+static int nct7904_wdt_set_timeout(struct watchdog_device *wdt,
+ unsigned int timeout)
+{
+ struct nct7904_data *data = watchdog_get_drvdata(wdt);
+ /*
+ * The NCT7904 is very special in watchdog function.
+ * Its minimum unit is minutes. And wdt->timeout needs
+ * to match the actual timeout selected. So, this needs
+ * to be: wdt->timeout = timeout / 60 * 60.
+ * For example, if the user configures a timeout of
+ * 119 seconds, the actual timeout will be 60 seconds.
+ * So, wdt->timeout must then be set to 60 seconds.
+ */
+ wdt->timeout = timeout / 60 * 60;
+
+ return nct7904_write_reg(data, BANK_0, WDT_TIMER_REG,
+ wdt->timeout / 60);
+}
+
+static int nct7904_wdt_ping(struct watchdog_device *wdt)
+{
+ /*
+ * Note:
+ * NCT7904 does not support refreshing WDT_TIMER_REG register when
+ * the watchdog is active. Please disable watchdog before feeding
+ * the watchdog and enable it again.
+ */
+ struct nct7904_data *data = watchdog_get_drvdata(wdt);
+ int ret;
+
+ /* Disable soft watchdog timer */
+ ret = nct7904_write_reg(data, BANK_0, WDT_LOCK_REG, WDT_SOFT_DIS);
+ if (ret < 0)
+ return ret;
+
+ /* feed watchdog */
+ ret = nct7904_write_reg(data, BANK_0, WDT_TIMER_REG, wdt->timeout / 60);
+ if (ret < 0)
+ return ret;
+
+ /* Enable soft watchdog timer */
+ return nct7904_write_reg(data, BANK_0, WDT_LOCK_REG, WDT_SOFT_EN);
+}
+
+static unsigned int nct7904_wdt_get_timeleft(struct watchdog_device *wdt)
+{
+ struct nct7904_data *data = watchdog_get_drvdata(wdt);
+ int ret;
+
+ ret = nct7904_read_reg(data, BANK_0, WDT_TIMER_REG);
+ if (ret < 0)
+ return 0;
+
+ return ret * 60;
+}
+
+static const struct watchdog_info nct7904_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .identity = "nct7904 watchdog",
+};
+
+static const struct watchdog_ops nct7904_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = nct7904_wdt_start,
+ .stop = nct7904_wdt_stop,
+ .ping = nct7904_wdt_ping,
+ .set_timeout = nct7904_wdt_set_timeout,
+ .get_timeleft = nct7904_wdt_get_timeleft,
+};
+
static int nct7904_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1022,7 +1139,26 @@ static int nct7904_probe(struct i2c_client *client,
hwmon_dev =
devm_hwmon_device_register_with_info(dev, client->name, data,
&nct7904_chip_info, NULL);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ ret = PTR_ERR_OR_ZERO(hwmon_dev);
+ if (ret)
+ return ret;
+
+ /* Watchdog initialization */
+ data->wdt.ops = &nct7904_wdt_ops;
+ data->wdt.info = &nct7904_wdt_info;
+
+ data->wdt.timeout = WATCHDOG_TIMEOUT * 60; /* Set default timeout */
+ data->wdt.min_timeout = MIN_TIMEOUT;
+ data->wdt.max_timeout = MAX_TIMEOUT;
+ data->wdt.parent = &client->dev;
+
+ watchdog_init_timeout(&data->wdt, timeout * 60, &client->dev);
+ watchdog_set_nowayout(&data->wdt, nowayout);
+ watchdog_set_drvdata(&data->wdt, data);
+
+ watchdog_stop_on_unregister(&data->wdt);
+
+ return devm_watchdog_register_device(dev, &data->wdt);
}
static const struct i2c_device_id nct7904_id[] = {
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index de12a565006d..a337195b1c39 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -146,6 +146,15 @@ config SENSORS_MAX16064
This driver can also be built as a module. If so, the module will
be called max16064.
+config SENSORS_MAX16601
+ tristate "Maxim MAX16601"
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX16601.
+
+ This driver can also be built as a module. If so, the module will
+ be called max16601.
+
config SENSORS_MAX20730
tristate "Maxim MAX20730, MAX20734, MAX20743"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 5feb45806123..c4b15db996ad 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX16601) += max16601.o
obj-$(CONFIG_SENSORS_MAX20730) += max20730.o
obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
diff --git a/drivers/hwmon/pmbus/max16601.c b/drivers/hwmon/pmbus/max16601.c
new file mode 100644
index 000000000000..51cdfaf9023c
--- /dev/null
+++ b/drivers/hwmon/pmbus/max16601.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hardware monitoring driver for Maxim MAX16601
+ *
+ * Implementation notes:
+ *
+ * Ths chip supports two rails, VCORE and VSA. Telemetry information for the
+ * two rails is reported in two subsequent I2C addresses. The driver
+ * instantiates a dummy I2C client at the second I2C address to report
+ * information for the VSA rail in a single instance of the driver.
+ * Telemetry for the VSA rail is reported to the PMBus core in PMBus page 2.
+ *
+ * The chip reports input current using two separate methods. The input current
+ * reported with the standard READ_IIN command is derived from the output
+ * current. The first method is reported to the PMBus core with PMBus page 0,
+ * the second method is reported with PMBus page 1.
+ *
+ * The chip supports reading per-phase temperatures and per-phase input/output
+ * currents for VCORE. Telemetry is reported in vendor specific registers.
+ * The driver translates the vendor specific register values to PMBus standard
+ * register values and reports per-phase information in PMBus page 0.
+ *
+ * Copyright 2019, 2020 Google LLC.
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+#define REG_SETPT_DVID 0xd1
+#define DAC_10MV_MODE BIT(4)
+#define REG_IOUT_AVG_PK 0xee
+#define REG_IIN_SENSOR 0xf1
+#define REG_TOTAL_INPUT_POWER 0xf2
+#define REG_PHASE_ID 0xf3
+#define CORE_RAIL_INDICATOR BIT(7)
+#define REG_PHASE_REPORTING 0xf4
+
+struct max16601_data {
+ struct pmbus_driver_info info;
+ struct i2c_client *vsa;
+ int iout_avg_pkg;
+};
+
+#define to_max16601_data(x) container_of(x, struct max16601_data, info)
+
+static int max16601_read_byte(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct max16601_data *data = to_max16601_data(info);
+
+ if (page > 0) {
+ if (page == 2) /* VSA */
+ return i2c_smbus_read_byte_data(data->vsa, reg);
+ return -EOPNOTSUPP;
+ }
+ return -ENODATA;
+}
+
+static int max16601_read_word(struct i2c_client *client, int page, int phase,
+ int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct max16601_data *data = to_max16601_data(info);
+ u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+ int ret;
+
+ switch (page) {
+ case 0: /* VCORE */
+ if (phase == 0xff)
+ return -ENODATA;
+ switch (reg) {
+ case PMBUS_READ_IIN:
+ case PMBUS_READ_IOUT:
+ case PMBUS_READ_TEMPERATURE_1:
+ ret = i2c_smbus_write_byte_data(client, REG_PHASE_ID,
+ phase);
+ if (ret)
+ return ret;
+ ret = i2c_smbus_read_block_data(client,
+ REG_PHASE_REPORTING,
+ buf);
+ if (ret < 0)
+ return ret;
+ if (ret < 6)
+ return -EIO;
+ switch (reg) {
+ case PMBUS_READ_TEMPERATURE_1:
+ return buf[1] << 8 | buf[0];
+ case PMBUS_READ_IOUT:
+ return buf[3] << 8 | buf[2];
+ case PMBUS_READ_IIN:
+ return buf[5] << 8 | buf[4];
+ default:
+ break;
+ }
+ }
+ return -EOPNOTSUPP;
+ case 1: /* VCORE, read IIN/PIN from sensor element */
+ switch (reg) {
+ case PMBUS_READ_IIN:
+ return i2c_smbus_read_word_data(client, REG_IIN_SENSOR);
+ case PMBUS_READ_PIN:
+ return i2c_smbus_read_word_data(client,
+ REG_TOTAL_INPUT_POWER);
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+ case 2: /* VSA */
+ switch (reg) {
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ ret = i2c_smbus_read_word_data(data->vsa,
+ REG_IOUT_AVG_PK);
+ if (ret < 0)
+ return ret;
+ if (sign_extend32(ret, 10) >
+ sign_extend32(data->iout_avg_pkg, 10))
+ data->iout_avg_pkg = ret;
+ return data->iout_avg_pkg;
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ return 0;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ case PMBUS_READ_IIN:
+ case PMBUS_READ_IOUT:
+ case PMBUS_READ_TEMPERATURE_1:
+ case PMBUS_STATUS_WORD:
+ return i2c_smbus_read_word_data(data->vsa, reg);
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int max16601_write_byte(struct i2c_client *client, int page, u8 reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct max16601_data *data = to_max16601_data(info);
+
+ if (page == 2) {
+ if (reg == PMBUS_CLEAR_FAULTS)
+ return i2c_smbus_write_byte(data->vsa, reg);
+ return -EOPNOTSUPP;
+ }
+ return -ENODATA;
+}
+
+static int max16601_write_word(struct i2c_client *client, int page, int reg,
+ u16 value)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct max16601_data *data = to_max16601_data(info);
+
+ switch (page) {
+ case 0: /* VCORE */
+ return -ENODATA;
+ case 1: /* VCORE IIN/PIN from sensor element */
+ default:
+ return -EOPNOTSUPP;
+ case 2: /* VSA */
+ switch (reg) {
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ data->iout_avg_pkg = 0xfc00;
+ return 0;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ return i2c_smbus_write_word_data(data->vsa, reg, value);
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+}
+
+static int max16601_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int reg;
+
+ reg = i2c_smbus_read_byte_data(client, REG_SETPT_DVID);
+ if (reg < 0)
+ return reg;
+ if (reg & DAC_10MV_MODE)
+ info->vrm_version[0] = vr13;
+ else
+ info->vrm_version[0] = vr12;
+
+ return 0;
+}
+
+static struct pmbus_driver_info max16601_info = {
+ .pages = 3,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = vid,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_POWER] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN |
+ PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT | PMBUS_PAGE_VIRTUAL | PMBUS_PHASE_VIRTUAL,
+ .func[1] = PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | PMBUS_PAGE_VIRTUAL,
+ .func[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_PAGE_VIRTUAL,
+ .phases[0] = 8,
+ .pfunc[0] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
+ .pfunc[1] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
+ .pfunc[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
+ .pfunc[3] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
+ .pfunc[4] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
+ .pfunc[5] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
+ .pfunc[6] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
+ .pfunc[7] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
+ .identify = max16601_identify,
+ .read_byte_data = max16601_read_byte,
+ .read_word_data = max16601_read_word,
+ .write_byte = max16601_write_byte,
+ .write_word_data = max16601_write_word,
+};
+
+static void max16601_remove(void *_data)
+{
+ struct max16601_data *data = _data;
+
+ i2c_unregister_device(data->vsa);
+}
+
+static int max16601_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+ struct max16601_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_IC_DEVICE_ID, buf);
+ if (ret < 0)
+ return -ENODEV;
+
+ /* PMBUS_IC_DEVICE_ID is expected to return "MAX16601y.xx" */
+ if (ret < 11 || strncmp(buf, "MAX16601", 8)) {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported chip '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, REG_PHASE_ID);
+ if (ret < 0)
+ return ret;
+ if (!(ret & CORE_RAIL_INDICATOR)) {
+ dev_err(dev,
+ "Driver must be instantiated on CORE rail I2C address\n");
+ return -ENODEV;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->iout_avg_pkg = 0xfc00;
+ data->vsa = i2c_new_dummy_device(client->adapter, client->addr + 1);
+ if (IS_ERR(data->vsa)) {
+ dev_err(dev, "Failed to register VSA client\n");
+ return PTR_ERR(data->vsa);
+ }
+ ret = devm_add_action_or_reset(dev, max16601_remove, data);
+ if (ret)
+ return ret;
+
+ data->info = max16601_info;
+
+ return pmbus_do_probe(client, id, &data->info);
+}
+
+static const struct i2c_device_id max16601_id[] = {
+ {"max16601", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max16601_id);
+
+static struct i2c_driver max16601_driver = {
+ .driver = {
+ .name = "max16601",
+ },
+ .probe = max16601_probe,
+ .remove = pmbus_do_remove,
+ .id_table = max16601_id,
+};
+
+module_i2c_driver(max16601_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX16601");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 8d321bf7d15b..a420877ba533 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -109,8 +109,8 @@ struct pmbus_data {
bool has_status_word; /* device uses STATUS_WORD register */
int (*read_status)(struct i2c_client *client, int page);
- u8 currpage;
- u8 currphase; /* current phase, 0xff for all */
+ s16 currpage; /* current page, -1 for unknown/unset */
+ s16 currphase; /* current phase, 0xff for all, -1 for unknown/unset */
};
struct pmbus_debugfs_entry {
@@ -2529,8 +2529,8 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
if (pdata)
data->flags = pdata->flags;
data->info = info;
- data->currpage = 0xff;
- data->currphase = 0xfe;
+ data->currpage = -1;
+ data->currphase = -1;
ret = pmbus_init_common(client, data, info);
if (ret < 0)
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 83e841be1081..02dbb5ca3bcf 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -107,7 +107,7 @@ config CORESIGHT_CPU_DEBUG
can quickly get to know program counter (PC), secure state,
exception level, etc. Before use debugging functionality, platform
needs to ensure the clock domain and power domain are enabled
- properly, please refer Documentation/trace/coresight-cpu-debug.rst
+ properly, please refer Documentation/trace/coresight/coresight-cpu-debug.rst
for detailed description and the example for usage.
config CORESIGHT_CTI
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 0e3e72f0f510..19497d1d92bf 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -2,7 +2,8 @@
#
# Makefile for CoreSight drivers.
#
-obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o coresight-platform.o
+obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o \
+ coresight-platform.o coresight-sysfs.o
obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \
coresight-tmc-etf.o \
coresight-tmc-etr.o
diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c
index 2fdaeec80ee5..98f830c6ed50 100644
--- a/drivers/hwtracing/coresight/coresight-cti-platform.c
+++ b/drivers/hwtracing/coresight/coresight-cti-platform.c
@@ -2,11 +2,17 @@
/*
* Copyright (c) 2019, The Linaro Limited. All rights reserved.
*/
+#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/slab.h>
#include <dt-bindings/arm/coresight-cti-dt.h>
-#include <linux/of.h>
#include "coresight-cti.h"
+#include "coresight-priv.h"
/* Number of CTI signals in the v8 architecturally defined connection */
#define NR_V8PE_IN_SIGS 2
@@ -429,8 +435,7 @@ static int cti_plat_create_impdef_connections(struct device *dev,
}
/* get the hardware configuration & connection data. */
-int cti_plat_get_hw_data(struct device *dev,
- struct cti_drvdata *drvdata)
+static int cti_plat_get_hw_data(struct device *dev, struct cti_drvdata *drvdata)
{
int rc = 0;
struct cti_device *cti_dev = &drvdata->ctidev;
diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
index 1f8fb7c15e80..392757f3a019 100644
--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
@@ -4,7 +4,13 @@
* Author: Mike Leach <mike.leach@linaro.org>
*/
+#include <linux/atomic.h>
#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
#include "coresight-cti.h"
@@ -1036,8 +1042,8 @@ static int cti_create_con_sysfs_attr(struct device *dev,
enum cti_conn_attr_type attr_type,
int attr_idx)
{
- struct dev_ext_attribute *eattr = 0;
- char *name = 0;
+ struct dev_ext_attribute *eattr;
+ char *name;
eattr = devm_kzalloc(dev, sizeof(struct dev_ext_attribute),
GFP_KERNEL);
@@ -1139,7 +1145,7 @@ static int cti_create_con_attr_set(struct device *dev, int con_idx,
}
/* create the array of group pointers for the CTI sysfs groups */
-int cti_create_cons_groups(struct device *dev, struct cti_device *ctidev)
+static int cti_create_cons_groups(struct device *dev, struct cti_device *ctidev)
{
int nr_groups;
@@ -1156,8 +1162,8 @@ int cti_create_cons_groups(struct device *dev, struct cti_device *ctidev)
int cti_create_cons_sysfs(struct device *dev, struct cti_drvdata *drvdata)
{
struct cti_device *ctidev = &drvdata->ctidev;
- int err = 0, con_idx = 0, i;
- struct cti_trig_con *tc = NULL;
+ int err, con_idx = 0, i;
+ struct cti_trig_con *tc;
err = cti_create_cons_groups(dev, ctidev);
if (err)
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
index aa6e0249bd70..40387d58c8e7 100644
--- a/drivers/hwtracing/coresight/coresight-cti.c
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -4,7 +4,22 @@
* Author: Mike Leach <mike.leach@linaro.org>
*/
+#include <linux/amba/bus.h>
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/coresight.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpuhotplug.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/spinlock.h>
+
+#include "coresight-priv.h"
#include "coresight-cti.h"
/**
@@ -19,7 +34,7 @@
*/
/* net of CTI devices connected via CTM */
-LIST_HEAD(ect_net);
+static LIST_HEAD(ect_net);
/* protect the list */
static DEFINE_MUTEX(ect_mutex);
@@ -27,6 +42,12 @@ static DEFINE_MUTEX(ect_mutex);
#define csdev_to_cti_drvdata(csdev) \
dev_get_drvdata(csdev->dev.parent)
+/* power management handling */
+static int nr_cti_cpu;
+
+/* quick lookup list for CPU bound CTIs when power handling */
+static struct cti_drvdata *cti_cpu_drvdata[NR_CPUS];
+
/*
* CTI naming. CTI bound to cores will have the name cti_cpu<N> where
* N is the CPU ID. System CTIs will have the name cti_sys<I> where I
@@ -116,6 +137,35 @@ cti_err_not_enabled:
return rc;
}
+/* re-enable CTI on CPU when using CPU hotplug */
+static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
+{
+ struct cti_config *config = &drvdata->config;
+ struct device *dev = &drvdata->csdev->dev;
+
+ pm_runtime_get_sync(dev->parent);
+ spin_lock(&drvdata->spinlock);
+ config->hw_powered = true;
+
+ /* no need to do anything if no enable request */
+ if (!atomic_read(&drvdata->config.enable_req_count))
+ goto cti_hp_not_enabled;
+
+ /* try to claim the device */
+ if (coresight_claim_device(drvdata->base))
+ goto cti_hp_not_enabled;
+
+ cti_write_all_hw_regs(drvdata);
+ config->hw_enabled = true;
+ spin_unlock(&drvdata->spinlock);
+ return;
+
+ /* did not re-enable due to no claim / no request */
+cti_hp_not_enabled:
+ spin_unlock(&drvdata->spinlock);
+ pm_runtime_put(dev->parent);
+}
+
/* disable hardware */
static int cti_disable_hw(struct cti_drvdata *drvdata)
{
@@ -442,6 +492,34 @@ int cti_channel_setop(struct device *dev, enum cti_chan_set_op op,
return err;
}
+static bool cti_add_sysfs_link(struct cti_drvdata *drvdata,
+ struct cti_trig_con *tc)
+{
+ struct coresight_sysfs_link link_info;
+ int link_err = 0;
+
+ link_info.orig = drvdata->csdev;
+ link_info.orig_name = tc->con_dev_name;
+ link_info.target = tc->con_dev;
+ link_info.target_name = dev_name(&drvdata->csdev->dev);
+
+ link_err = coresight_add_sysfs_link(&link_info);
+ if (link_err)
+ dev_warn(&drvdata->csdev->dev,
+ "Failed to set CTI sysfs link %s<=>%s\n",
+ link_info.orig_name, link_info.target_name);
+ return !link_err;
+}
+
+static void cti_remove_sysfs_link(struct cti_trig_con *tc)
+{
+ struct coresight_sysfs_link link_info;
+
+ link_info.orig_name = tc->con_dev_name;
+ link_info.target = tc->con_dev;
+ coresight_remove_sysfs_link(&link_info);
+}
+
/*
* Look for a matching connection device name in the list of connections.
* If found then swap in the csdev name, set trig con association pointer
@@ -452,6 +530,8 @@ cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name,
struct coresight_device *csdev)
{
struct cti_trig_con *tc;
+ struct cti_drvdata *drvdata = container_of(ctidev, struct cti_drvdata,
+ ctidev);
list_for_each_entry(tc, &ctidev->trig_cons, node) {
if (tc->con_dev_name) {
@@ -459,7 +539,12 @@ cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name,
/* match: so swap in csdev name & dev */
tc->con_dev_name = dev_name(&csdev->dev);
tc->con_dev = csdev;
- return true;
+ /* try to set sysfs link */
+ if (cti_add_sysfs_link(drvdata, tc))
+ return true;
+ /* link failed - remove CTI reference */
+ tc->con_dev = NULL;
+ break;
}
}
}
@@ -522,6 +607,7 @@ void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
ctidev = &ctidrv->ctidev;
list_for_each_entry(tc, &ctidev->trig_cons, node) {
if (tc->con_dev == csdev->ect_dev) {
+ cti_remove_sysfs_link(tc);
tc->con_dev = NULL;
break;
}
@@ -543,10 +629,16 @@ static void cti_update_conn_xrefs(struct cti_drvdata *drvdata)
struct cti_device *ctidev = &drvdata->ctidev;
list_for_each_entry(tc, &ctidev->trig_cons, node) {
- if (tc->con_dev)
- /* set tc->con_dev->ect_dev */
- coresight_set_assoc_ectdev_mutex(tc->con_dev,
+ if (tc->con_dev) {
+ /* if we can set the sysfs link */
+ if (cti_add_sysfs_link(drvdata, tc))
+ /* set the CTI/csdev association */
+ coresight_set_assoc_ectdev_mutex(tc->con_dev,
drvdata->csdev);
+ else
+ /* otherwise remove reference from CTI */
+ tc->con_dev = NULL;
+ }
}
}
@@ -559,7 +651,113 @@ static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata)
if (tc->con_dev) {
coresight_set_assoc_ectdev_mutex(tc->con_dev,
NULL);
+ cti_remove_sysfs_link(tc);
+ tc->con_dev = NULL;
+ }
+ }
+}
+
+/** cti PM callbacks **/
+static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct cti_drvdata *drvdata;
+ unsigned int cpu = smp_processor_id();
+ int notify_res = NOTIFY_OK;
+
+ if (!cti_cpu_drvdata[cpu])
+ return NOTIFY_OK;
+
+ drvdata = cti_cpu_drvdata[cpu];
+
+ if (WARN_ON_ONCE(drvdata->ctidev.cpu != cpu))
+ return NOTIFY_BAD;
+
+ spin_lock(&drvdata->spinlock);
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /* CTI regs all static - we have a copy & nothing to save */
+ drvdata->config.hw_powered = false;
+ if (drvdata->config.hw_enabled)
+ coresight_disclaim_device(drvdata->base);
+ break;
+
+ case CPU_PM_ENTER_FAILED:
+ drvdata->config.hw_powered = true;
+ if (drvdata->config.hw_enabled) {
+ if (coresight_claim_device(drvdata->base))
+ drvdata->config.hw_enabled = false;
+ }
+ break;
+
+ case CPU_PM_EXIT:
+ /* write hardware registers to re-enable. */
+ drvdata->config.hw_powered = true;
+ drvdata->config.hw_enabled = false;
+
+ /* check enable reference count to enable HW */
+ if (atomic_read(&drvdata->config.enable_req_count)) {
+ /* check we can claim the device as we re-power */
+ if (coresight_claim_device(drvdata->base))
+ goto cti_notify_exit;
+
+ drvdata->config.hw_enabled = true;
+ cti_write_all_hw_regs(drvdata);
+ }
+ break;
+
+ default:
+ notify_res = NOTIFY_DONE;
+ break;
+ }
+
+cti_notify_exit:
+ spin_unlock(&drvdata->spinlock);
+ return notify_res;
+}
+
+static struct notifier_block cti_cpu_pm_nb = {
+ .notifier_call = cti_cpu_pm_notify,
+};
+
+/* CPU HP handlers */
+static int cti_starting_cpu(unsigned int cpu)
+{
+ struct cti_drvdata *drvdata = cti_cpu_drvdata[cpu];
+
+ if (!drvdata)
+ return 0;
+
+ cti_cpuhp_enable_hw(drvdata);
+ return 0;
+}
+
+static int cti_dying_cpu(unsigned int cpu)
+{
+ struct cti_drvdata *drvdata = cti_cpu_drvdata[cpu];
+
+ if (!drvdata)
+ return 0;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->config.hw_powered = false;
+ coresight_disclaim_device(drvdata->base);
+ spin_unlock(&drvdata->spinlock);
+ return 0;
+}
+
+/* release PM registrations */
+static void cti_pm_release(struct cti_drvdata *drvdata)
+{
+ if (drvdata->ctidev.cpu >= 0) {
+ if (--nr_cti_cpu == 0) {
+ cpu_pm_unregister_notifier(&cti_cpu_pm_nb);
+
+ cpuhp_remove_state_nocalls(
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING);
}
+ cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL;
}
}
@@ -578,12 +776,12 @@ int cti_disable(struct coresight_device *csdev)
return cti_disable_hw(drvdata);
}
-const struct coresight_ops_ect cti_ops_ect = {
+static const struct coresight_ops_ect cti_ops_ect = {
.enable = cti_enable,
.disable = cti_disable,
};
-const struct coresight_ops cti_ops = {
+static const struct coresight_ops cti_ops = {
.ect_ops = &cti_ops_ect,
};
@@ -598,6 +796,7 @@ static void cti_device_release(struct device *dev)
mutex_lock(&ect_mutex);
cti_remove_conn_xrefs(drvdata);
+ cti_pm_release(drvdata);
/* remove from the list */
list_for_each_entry_safe(ect_item, ect_tmp, &ect_net, node) {
@@ -673,6 +872,24 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
goto err_out;
}
+ /* setup CPU power management handling for CPU bound CTI devices. */
+ if (drvdata->ctidev.cpu >= 0) {
+ cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata;
+ if (!nr_cti_cpu++) {
+ cpus_read_lock();
+ ret = cpuhp_setup_state_nocalls_cpuslocked(
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ "arm/coresight_cti:starting",
+ cti_starting_cpu, cti_dying_cpu);
+
+ if (!ret)
+ ret = cpu_pm_register_notifier(&cti_cpu_pm_nb);
+ cpus_read_unlock();
+ if (ret)
+ goto err_out;
+ }
+ }
+
/* create dynamic attributes for connections */
ret = cti_create_cons_sysfs(dev, drvdata);
if (ret) {
@@ -711,6 +928,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_out:
+ cti_pm_release(drvdata);
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h
index 004df3ab9dd0..acf7b545e6b9 100644
--- a/drivers/hwtracing/coresight/coresight-cti.h
+++ b/drivers/hwtracing/coresight/coresight-cti.h
@@ -7,8 +7,14 @@
#ifndef _CORESIGHT_CORESIGHT_CTI_H
#define _CORESIGHT_CORESIGHT_CTI_H
-#include <asm/local.h>
+#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/fwnode.h>
+#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
#include "coresight-priv.h"
/*
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 3810290e6d07..03e3f2590191 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -717,7 +717,7 @@ static const struct attribute_group coresight_etb_mgmt_group = {
.name = "mgmt",
};
-const struct attribute_group *coresight_etb_groups[] = {
+static const struct attribute_group *coresight_etb_groups[] = {
&coresight_etb_group,
&coresight_etb_mgmt_group,
NULL,
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index e2cb6873c3f2..bf22dcfd3327 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -504,7 +504,7 @@ static int etm_enable_perf(struct coresight_device *csdev,
static int etm_enable_sysfs(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- struct etm_enable_arg arg = { 0 };
+ struct etm_enable_arg arg = { };
int ret;
spin_lock(&drvdata->spinlock);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index ce41482431f9..b673e738bc9a 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -205,7 +205,7 @@ static ssize_t reset_store(struct device *dev,
* started state. ARM recommends start-stop logic is set before
* each trace run.
*/
- config->vinst_ctrl |= BIT(0);
+ config->vinst_ctrl = BIT(0);
if (drvdata->nr_addr_cmp == true) {
config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* SSSTATUS, bit[9] */
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a90d757f7043..747afc875f91 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -412,7 +412,7 @@ out:
static int etm4_enable_sysfs(struct coresight_device *csdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- struct etm4_enable_arg arg = { 0 };
+ struct etm4_enable_arg arg = { };
int ret;
spin_lock(&drvdata->spinlock);
@@ -791,7 +791,7 @@ static void etm4_set_default_config(struct etmv4_config *config)
config->ts_ctrl = 0x0;
/* TRCVICTLR::EVENT = 0x01, select the always on logic */
- config->vinst_ctrl |= BIT(0);
+ config->vinst_ctrl = BIT(0);
}
static u64 etm4_get_ns_access_type(struct etmv4_config *config)
@@ -894,17 +894,8 @@ static void etm4_set_start_stop_filter(struct etmv4_config *config,
static void etm4_set_default_filter(struct etmv4_config *config)
{
- u64 start, stop;
-
- /*
- * Configure address range comparator '0' to encompass all
- * possible addresses.
- */
- start = 0x0;
- stop = ~0x0;
-
- etm4_set_comparator_filter(config, start, stop,
- ETM_DEFAULT_ADDR_COMP);
+ /* Trace everything 'default' filter achieved by no filtering */
+ config->viiectlr = 0x0;
/*
* TRCVICTLR::SSSTATUS == 1, the start-stop logic is
@@ -925,11 +916,9 @@ static void etm4_set_default(struct etmv4_config *config)
/*
* Make default initialisation trace everything
*
- * Select the "always true" resource selector on the
- * "Enablign Event" line and configure address range comparator
- * '0' to trace all the possible address range. From there
- * configure the "include/exclude" engine to include address
- * range comparator '0'.
+ * This is done by a minimum default config sufficient to enable
+ * full instruction trace - with a default filter for trace all
+ * achieved by having no filtering.
*/
etm4_set_default_config(config);
etm4_set_default_filter(config);
@@ -1527,6 +1516,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_arch_supported:
+ etmdrvdata[drvdata->cpu] = NULL;
if (--etm4_count == 0) {
etm4_cpu_pm_unregister();
@@ -1552,10 +1542,13 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
+ CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */
CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
- CS_AMBA_ID(0x000bb802), /* Qualcomm Kryo 385 Cortex-A55 */
- CS_AMBA_ID(0x000bb803), /* Qualcomm Kryo 385 Cortex-A75 */
+ CS_AMBA_UCI_ID(0x000bb802, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A55 */
+ CS_AMBA_UCI_ID(0x000bb803, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A75 */
+ CS_AMBA_UCI_ID(0x000bb805, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A55 */
+ CS_AMBA_UCI_ID(0x000bb804, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A76 */
CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 43418a2126ff..e4912abda3aa 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -87,6 +87,7 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
int *nr_inport, int *nr_outport)
{
struct device_node *ep = NULL;
+ struct of_endpoint endpoint;
int in = 0, out = 0;
do {
@@ -94,10 +95,16 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
if (!ep)
break;
- if (of_coresight_legacy_ep_is_input(ep))
- in++;
- else
- out++;
+ if (of_graph_parse_endpoint(ep, &endpoint))
+ continue;
+
+ if (of_coresight_legacy_ep_is_input(ep)) {
+ in = (endpoint.port + 1 > in) ?
+ endpoint.port + 1 : in;
+ } else {
+ out = (endpoint.port + 1) > out ?
+ endpoint.port + 1 : out;
+ }
} while (ep);
@@ -137,9 +144,16 @@ of_coresight_count_ports(struct device_node *port_parent)
{
int i = 0;
struct device_node *ep = NULL;
+ struct of_endpoint endpoint;
+
+ while ((ep = of_graph_get_next_endpoint(port_parent, ep))) {
+ /* Defer error handling to parsing */
+ if (of_graph_parse_endpoint(ep, &endpoint))
+ continue;
+ if (endpoint.port + 1 > i)
+ i = endpoint.port + 1;
+ }
- while ((ep = of_graph_get_next_endpoint(port_parent, ep)))
- i++;
return i;
}
@@ -191,14 +205,12 @@ static int of_coresight_get_cpu(struct device *dev)
* Parses the local port, remote device name and the remote port.
*
* Returns :
- * 1 - If the parsing is successful and a connection record
- * was created for an output connection.
* 0 - If the parsing completed without any fatal errors.
* -Errno - Fatal error, abort the scanning.
*/
static int of_coresight_parse_endpoint(struct device *dev,
struct device_node *ep,
- struct coresight_connection *conn)
+ struct coresight_platform_data *pdata)
{
int ret = 0;
struct of_endpoint endpoint, rendpoint;
@@ -206,6 +218,7 @@ static int of_coresight_parse_endpoint(struct device *dev,
struct device_node *rep = NULL;
struct device *rdev = NULL;
struct fwnode_handle *rdev_fwnode;
+ struct coresight_connection *conn;
do {
/* Parse the local port details */
@@ -232,6 +245,13 @@ static int of_coresight_parse_endpoint(struct device *dev,
break;
}
+ conn = &pdata->conns[endpoint.port];
+ if (conn->child_fwnode) {
+ dev_warn(dev, "Duplicate output port %d\n",
+ endpoint.port);
+ ret = -EINVAL;
+ break;
+ }
conn->outport = endpoint.port;
/*
* Hold the refcount to the target device. This could be
@@ -244,7 +264,6 @@ static int of_coresight_parse_endpoint(struct device *dev,
conn->child_fwnode = fwnode_handle_get(rdev_fwnode);
conn->child_port = rendpoint.port;
/* Connection record updated */
- ret = 1;
} while (0);
of_node_put(rparent);
@@ -258,7 +277,6 @@ static int of_get_coresight_platform_data(struct device *dev,
struct coresight_platform_data *pdata)
{
int ret = 0;
- struct coresight_connection *conn;
struct device_node *ep = NULL;
const struct device_node *parent = NULL;
bool legacy_binding = false;
@@ -287,8 +305,6 @@ static int of_get_coresight_platform_data(struct device *dev,
dev_warn_once(dev, "Uses obsolete Coresight DT bindings\n");
}
- conn = pdata->conns;
-
/* Iterate through each output port to discover topology */
while ((ep = of_graph_get_next_endpoint(parent, ep))) {
/*
@@ -300,15 +316,9 @@ static int of_get_coresight_platform_data(struct device *dev,
if (legacy_binding && of_coresight_legacy_ep_is_input(ep))
continue;
- ret = of_coresight_parse_endpoint(dev, ep, conn);
- switch (ret) {
- case 1:
- conn++; /* Fall through */
- case 0:
- break;
- default:
+ ret = of_coresight_parse_endpoint(dev, ep, pdata);
+ if (ret)
return ret;
- }
}
return 0;
@@ -501,7 +511,7 @@ static inline bool acpi_validate_dsd_graph(const union acpi_object *graph)
}
/* acpi_get_dsd_graph - Find the _DSD Graph property for the given device. */
-const union acpi_object *
+static const union acpi_object *
acpi_get_dsd_graph(struct acpi_device *adev)
{
int i;
@@ -564,7 +574,7 @@ acpi_validate_coresight_graph(const union acpi_object *cs_graph)
* Returns the pointer to the CoreSight Graph Package when found. Otherwise
* returns NULL.
*/
-const union acpi_object *
+static const union acpi_object *
acpi_get_coresight_graph(struct acpi_device *adev)
{
const union acpi_object *graph_list, *graph;
@@ -647,6 +657,16 @@ static int acpi_coresight_parse_link(struct acpi_device *adev,
* coresight_remove_match().
*/
conn->child_fwnode = fwnode_handle_get(&r_adev->fwnode);
+ } else if (dir == ACPI_CORESIGHT_LINK_SLAVE) {
+ /*
+ * We are only interested in the port number
+ * for the input ports at this component.
+ * Store the port number in child_port.
+ */
+ conn->child_port = fields[0].integer.value;
+ } else {
+ /* Invalid direction */
+ return -EINVAL;
}
return dir;
@@ -692,10 +712,20 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
return dir;
if (dir == ACPI_CORESIGHT_LINK_MASTER) {
- pdata->nr_outport++;
+ if (ptr->outport > pdata->nr_outport)
+ pdata->nr_outport = ptr->outport;
ptr++;
} else {
- pdata->nr_inport++;
+ WARN_ON(pdata->nr_inport == ptr->child_port);
+ /*
+ * We do not track input port connections for a device.
+ * However we need the highest port number described,
+ * which can be recorded now and reuse this connection
+ * record for an output connection. Hence, do not move
+ * the ptr for input connections
+ */
+ if (ptr->child_port > pdata->nr_inport)
+ pdata->nr_inport = ptr->child_port;
}
}
@@ -704,8 +734,13 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
return rc;
/* Copy the connection information to the final location */
- for (i = 0; i < pdata->nr_outport; i++)
- pdata->conns[i] = conns[i];
+ for (i = 0; conns + i < ptr; i++) {
+ int port = conns[i].outport;
+
+ /* Duplicate output port */
+ WARN_ON(pdata->conns[port].child_fwnode);
+ pdata->conns[port] = conns[i];
+ }
devm_kfree(&adev->dev, conns);
return 0;
@@ -822,7 +857,7 @@ coresight_get_platform_data(struct device *dev)
error:
if (!IS_ERR_OR_NULL(pdata))
/* Cleanup the connection information */
- coresight_release_platform_data(pdata);
+ coresight_release_platform_data(NULL, pdata);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(coresight_get_platform_data);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 890f9a5c97c6..36c943ae94d5 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -153,6 +153,15 @@ struct coresight_device *coresight_get_sink_by_id(u32 id);
struct list_head *coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink);
void coresight_release_path(struct list_head *path);
+int coresight_add_sysfs_link(struct coresight_sysfs_link *info);
+void coresight_remove_sysfs_link(struct coresight_sysfs_link *info);
+int coresight_create_conns_sysfs_group(struct coresight_device *csdev);
+void coresight_remove_conns_sysfs_group(struct coresight_device *csdev);
+int coresight_make_links(struct coresight_device *orig,
+ struct coresight_connection *conn,
+ struct coresight_device *target);
+void coresight_remove_links(struct coresight_device *orig,
+ struct coresight_connection *conn);
#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
extern int etm_readl_cp14(u32 off, unsigned int *val);
@@ -206,12 +215,16 @@ cti_remove_assoc_from_csdev(struct coresight_device *csdev) {}
/* extract the data value from a UCI structure given amba_id pointer. */
static inline void *coresight_get_uci_data(const struct amba_id *id)
{
- if (id->data)
- return ((struct amba_cs_uci_id *)(id->data))->data;
- return 0;
+ struct amba_cs_uci_id *uci_id = id->data;
+
+ if (!uci_id)
+ return NULL;
+
+ return uci_id->data;
}
-void coresight_release_platform_data(struct coresight_platform_data *pdata);
+void coresight_release_platform_data(struct coresight_device *csdev,
+ struct coresight_platform_data *pdata);
struct coresight_device *
coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode);
void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
new file mode 100644
index 000000000000..82afeaf2ccc4
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Limited, All rights reserved.
+ * Author: Mike Leach <mike.leach@linaro.org>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include "coresight-priv.h"
+
+/*
+ * Connections group - links attribute.
+ * Count of created links between coresight components in the group.
+ */
+static ssize_t nr_links_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ return sprintf(buf, "%d\n", csdev->nr_links);
+}
+static DEVICE_ATTR_RO(nr_links);
+
+static struct attribute *coresight_conns_attrs[] = {
+ &dev_attr_nr_links.attr,
+ NULL,
+};
+
+static struct attribute_group coresight_conns_group = {
+ .attrs = coresight_conns_attrs,
+ .name = "connections",
+};
+
+/*
+ * Create connections group for CoreSight devices.
+ * This group will then be used to collate the sysfs links between
+ * devices.
+ */
+int coresight_create_conns_sysfs_group(struct coresight_device *csdev)
+{
+ int ret = 0;
+
+ if (!csdev)
+ return -EINVAL;
+
+ ret = sysfs_create_group(&csdev->dev.kobj, &coresight_conns_group);
+ if (ret)
+ return ret;
+
+ csdev->has_conns_grp = true;
+ return ret;
+}
+
+void coresight_remove_conns_sysfs_group(struct coresight_device *csdev)
+{
+ if (!csdev)
+ return;
+
+ if (csdev->has_conns_grp) {
+ sysfs_remove_group(&csdev->dev.kobj, &coresight_conns_group);
+ csdev->has_conns_grp = false;
+ }
+}
+
+int coresight_add_sysfs_link(struct coresight_sysfs_link *info)
+{
+ int ret = 0;
+
+ if (!info)
+ return -EINVAL;
+ if (!info->orig || !info->target ||
+ !info->orig_name || !info->target_name)
+ return -EINVAL;
+ if (!info->orig->has_conns_grp || !info->target->has_conns_grp)
+ return -EINVAL;
+
+ /* first link orig->target */
+ ret = sysfs_add_link_to_group(&info->orig->dev.kobj,
+ coresight_conns_group.name,
+ &info->target->dev.kobj,
+ info->orig_name);
+ if (ret)
+ return ret;
+
+ /* second link target->orig */
+ ret = sysfs_add_link_to_group(&info->target->dev.kobj,
+ coresight_conns_group.name,
+ &info->orig->dev.kobj,
+ info->target_name);
+
+ /* error in second link - remove first - otherwise inc counts */
+ if (ret) {
+ sysfs_remove_link_from_group(&info->orig->dev.kobj,
+ coresight_conns_group.name,
+ info->orig_name);
+ } else {
+ info->orig->nr_links++;
+ info->target->nr_links++;
+ }
+
+ return ret;
+}
+
+void coresight_remove_sysfs_link(struct coresight_sysfs_link *info)
+{
+ if (!info)
+ return;
+ if (!info->orig || !info->target ||
+ !info->orig_name || !info->target_name)
+ return;
+
+ sysfs_remove_link_from_group(&info->orig->dev.kobj,
+ coresight_conns_group.name,
+ info->orig_name);
+
+ sysfs_remove_link_from_group(&info->target->dev.kobj,
+ coresight_conns_group.name,
+ info->target_name);
+
+ info->orig->nr_links--;
+ info->target->nr_links--;
+}
+
+/*
+ * coresight_make_links: Make a link for a connection from a @orig
+ * device to @target, represented by @conn.
+ *
+ * e.g, for devOrig[output_X] -> devTarget[input_Y] is represented
+ * as two symbolic links :
+ *
+ * /sys/.../devOrig/out:X -> /sys/.../devTarget/
+ * /sys/.../devTarget/in:Y -> /sys/.../devOrig/
+ *
+ * The link names are allocated for a device where it appears. i.e, the
+ * "out" link on the master and "in" link on the slave device.
+ * The link info is stored in the connection record for avoiding
+ * the reconstruction of names for removal.
+ */
+int coresight_make_links(struct coresight_device *orig,
+ struct coresight_connection *conn,
+ struct coresight_device *target)
+{
+ int ret = -ENOMEM;
+ char *outs = NULL, *ins = NULL;
+ struct coresight_sysfs_link *link = NULL;
+
+ do {
+ outs = devm_kasprintf(&orig->dev, GFP_KERNEL,
+ "out:%d", conn->outport);
+ if (!outs)
+ break;
+ ins = devm_kasprintf(&target->dev, GFP_KERNEL,
+ "in:%d", conn->child_port);
+ if (!ins)
+ break;
+ link = devm_kzalloc(&orig->dev,
+ sizeof(struct coresight_sysfs_link),
+ GFP_KERNEL);
+ if (!link)
+ break;
+
+ link->orig = orig;
+ link->target = target;
+ link->orig_name = outs;
+ link->target_name = ins;
+
+ ret = coresight_add_sysfs_link(link);
+ if (ret)
+ break;
+
+ conn->link = link;
+
+ /*
+ * Install the device connection. This also indicates that
+ * the links are operational on both ends.
+ */
+ conn->child_dev = target;
+ return 0;
+ } while (0);
+
+ return ret;
+}
+
+/*
+ * coresight_remove_links: Remove the sysfs links for a given connection @conn,
+ * from @orig device to @target device. See coresight_make_links() for more
+ * details.
+ */
+void coresight_remove_links(struct coresight_device *orig,
+ struct coresight_connection *conn)
+{
+ if (!orig || !conn->link)
+ return;
+
+ coresight_remove_sysfs_link(conn->link);
+
+ devm_kfree(&conn->child_dev->dev, conn->link->target_name);
+ devm_kfree(&orig->dev, conn->link->orig_name);
+ devm_kfree(&orig->dev, conn->link);
+ conn->link = NULL;
+ conn->child_dev = NULL;
+}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d0cc3985b72a..36cce2bfb744 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -596,13 +596,6 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
goto out;
}
- /* There is no point in reading a TMC in HW FIFO mode */
- mode = readl_relaxed(drvdata->base + TMC_MODE);
- if (mode != TMC_MODE_CIRCULAR_BUFFER) {
- ret = -EINVAL;
- goto out;
- }
-
/* Don't interfere if operated from Perf */
if (drvdata->mode == CS_MODE_PERF) {
ret = -EINVAL;
@@ -616,8 +609,15 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if need be */
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS) {
+ /* There is no point in reading a TMC in HW FIFO mode */
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+ ret = -EINVAL;
+ goto out;
+ }
__tmc_etb_disable_hw(drvdata);
+ }
drvdata->reading = true;
out:
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 1cf82fa58289..39fba1d16e6e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -361,7 +361,7 @@ static const struct attribute_group coresight_tmc_mgmt_group = {
.name = "mgmt",
};
-const struct attribute_group *coresight_tmc_groups[] = {
+static const struct attribute_group *coresight_tmc_groups[] = {
&coresight_tmc_group,
&coresight_tmc_mgmt_group,
NULL,
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index c71553c09f8e..f3efbb3b2b4d 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1031,7 +1031,7 @@ static void coresight_device_release(struct device *dev)
static int coresight_orphan_match(struct device *dev, void *data)
{
- int i;
+ int i, ret = 0;
bool still_orphan = false;
struct coresight_device *csdev, *i_csdev;
struct coresight_connection *conn;
@@ -1053,49 +1053,62 @@ static int coresight_orphan_match(struct device *dev, void *data)
for (i = 0; i < i_csdev->pdata->nr_outport; i++) {
conn = &i_csdev->pdata->conns[i];
+ /* Skip the port if FW doesn't describe it */
+ if (!conn->child_fwnode)
+ continue;
/* We have found at least one orphan connection */
if (conn->child_dev == NULL) {
/* Does it match this newly added device? */
- if (conn->child_fwnode == csdev->dev.fwnode)
- conn->child_dev = csdev;
- else
+ if (conn->child_fwnode == csdev->dev.fwnode) {
+ ret = coresight_make_links(i_csdev,
+ conn, csdev);
+ if (ret)
+ return ret;
+ } else {
/* This component still has an orphan */
still_orphan = true;
+ }
}
}
i_csdev->orphan = still_orphan;
/*
- * Returning '0' ensures that all known component on the
- * bus will be checked.
+ * Returning '0' in case we didn't encounter any error,
+ * ensures that all known component on the bus will be checked.
*/
return 0;
}
-static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
+static int coresight_fixup_orphan_conns(struct coresight_device *csdev)
{
- /*
- * No need to check for a return value as orphan connection(s)
- * are hooked-up with each newly added component.
- */
- bus_for_each_dev(&coresight_bustype, NULL,
+ return bus_for_each_dev(&coresight_bustype, NULL,
csdev, coresight_orphan_match);
}
-static void coresight_fixup_device_conns(struct coresight_device *csdev)
+static int coresight_fixup_device_conns(struct coresight_device *csdev)
{
- int i;
+ int i, ret = 0;
for (i = 0; i < csdev->pdata->nr_outport; i++) {
struct coresight_connection *conn = &csdev->pdata->conns[i];
+ if (!conn->child_fwnode)
+ continue;
conn->child_dev =
coresight_find_csdev_by_fwnode(conn->child_fwnode);
- if (!conn->child_dev)
+ if (conn->child_dev) {
+ ret = coresight_make_links(csdev, conn,
+ conn->child_dev);
+ if (ret)
+ break;
+ } else {
csdev->orphan = true;
+ }
}
+
+ return 0;
}
static int coresight_remove_match(struct device *dev, void *data)
@@ -1118,12 +1131,12 @@ static int coresight_remove_match(struct device *dev, void *data)
for (i = 0; i < iterator->pdata->nr_outport; i++) {
conn = &iterator->pdata->conns[i];
- if (conn->child_dev == NULL)
+ if (conn->child_dev == NULL || conn->child_fwnode == NULL)
continue;
if (csdev->dev.fwnode == conn->child_fwnode) {
iterator->orphan = true;
- conn->child_dev = NULL;
+ coresight_remove_links(iterator, conn);
/*
* Drop the reference to the handle for the remote
* device acquired in parsing the connections from
@@ -1213,16 +1226,27 @@ postcore_initcall(coresight_init);
* coresight_release_platform_data: Release references to the devices connected
* to the output port of this device.
*/
-void coresight_release_platform_data(struct coresight_platform_data *pdata)
+void coresight_release_platform_data(struct coresight_device *csdev,
+ struct coresight_platform_data *pdata)
{
int i;
+ struct coresight_connection *conns = pdata->conns;
for (i = 0; i < pdata->nr_outport; i++) {
- if (pdata->conns[i].child_fwnode) {
- fwnode_handle_put(pdata->conns[i].child_fwnode);
+ /* If we have made the links, remove them now */
+ if (csdev && conns[i].child_dev)
+ coresight_remove_links(csdev, &conns[i]);
+ /*
+ * Drop the refcount and clear the handle as this device
+ * is going away
+ */
+ if (conns[i].child_fwnode) {
+ fwnode_handle_put(conns[i].child_fwnode);
pdata->conns[i].child_fwnode = NULL;
}
}
+ if (csdev)
+ coresight_remove_conns_sysfs_group(csdev);
}
struct coresight_device *coresight_register(struct coresight_desc *desc)
@@ -1304,11 +1328,19 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
mutex_lock(&coresight_mutex);
- coresight_fixup_device_conns(csdev);
- coresight_fixup_orphan_conns(csdev);
- cti_add_assoc_to_csdev(csdev);
+ ret = coresight_create_conns_sysfs_group(csdev);
+ if (!ret)
+ ret = coresight_fixup_device_conns(csdev);
+ if (!ret)
+ ret = coresight_fixup_orphan_conns(csdev);
+ if (!ret)
+ cti_add_assoc_to_csdev(csdev);
mutex_unlock(&coresight_mutex);
+ if (ret) {
+ coresight_unregister(csdev);
+ return ERR_PTR(ret);
+ }
return csdev;
@@ -1316,7 +1348,7 @@ err_free_csdev:
kfree(csdev);
err_out:
/* Cleanup the connection information */
- coresight_release_platform_data(desc->pdata);
+ coresight_release_platform_data(NULL, desc->pdata);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(coresight_register);
@@ -1326,7 +1358,7 @@ void coresight_unregister(struct coresight_device *csdev)
etm_perf_del_symlink_sink(csdev);
/* Remove references of that device in the topology */
coresight_remove_conns(csdev);
- coresight_release_platform_data(csdev->pdata);
+ coresight_release_platform_data(csdev, csdev->pdata);
device_unregister(&csdev->dev);
}
EXPORT_SYMBOL_GPL(coresight_unregister);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 5536673060cc..c429d664f655 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -357,12 +357,12 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
dev_pm_set_driver_flags(&pdev->dev,
DPM_FLAG_SMART_PREPARE |
- DPM_FLAG_LEAVE_SUSPENDED);
+ DPM_FLAG_MAY_SKIP_RESUME);
} else {
dev_pm_set_driver_flags(&pdev->dev,
DPM_FLAG_SMART_PREPARE |
DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_LEAVE_SUSPENDED);
+ DPM_FLAG_MAY_SKIP_RESUME);
}
/* The code below assumes runtime PM to be disabled. */
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index a9c03f5c3482..4f333889489c 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1439,9 +1439,9 @@ static int i801_add_mux(struct i801_priv *priv)
return -ENOMEM;
lookup->dev_id = "i2c-mux-gpio";
for (i = 0; i < mux_config->n_gpios; i++) {
- lookup->table[i].chip_label = mux_config->gpio_chip;
- lookup->table[i].chip_hwnum = mux_config->gpios[i];
- lookup->table[i].con_id = "mux";
+ lookup->table[i] = (struct gpiod_lookup)
+ GPIO_LOOKUP(mux_config->gpio_chip,
+ mux_config->gpios[i], "mux", 0);
}
gpiod_add_lookup_table(lookup);
priv->lookup = lookup;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 5a5638e1daa1..57986984a90b 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -435,8 +435,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
* fall through to the write state, as we will need to
* send a byte as well
*/
- /* Fall through */
-
+ fallthrough;
case STATE_WRITE:
/*
* we are writing data to the device... check for the
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index d79cd6d54b3a..97f2e29265da 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1008,7 +1008,6 @@ static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
struct i3c_device_info *info)
{
struct i3c_ccc_cmd_dest dest;
- unsigned int expected_len;
struct i3c_ccc_mrl *mrl;
struct i3c_ccc_cmd cmd;
int ret;
@@ -1024,22 +1023,23 @@ static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
if (!(info->bcr & I3C_BCR_IBI_PAYLOAD))
dest.payload.len -= 1;
- expected_len = dest.payload.len;
i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMRL, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
if (ret)
goto out;
- if (dest.payload.len != expected_len) {
+ switch (dest.payload.len) {
+ case 3:
+ info->max_ibi_len = mrl->ibi_len;
+ fallthrough;
+ case 2:
+ info->max_read_len = be16_to_cpu(mrl->read_len);
+ break;
+ default:
ret = -EIO;
goto out;
}
- info->max_read_len = be16_to_cpu(mrl->read_len);
-
- if (info->bcr & I3C_BCR_IBI_PAYLOAD)
- info->max_ibi_len = mrl->ibi_len;
-
out:
i3c_ccc_cmd_dest_cleanup(&dest);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index dcf8b51b47fd..7f17f8303988 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1034,8 +1034,8 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
return 0;
}
-static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
- int format, char *buf, int buflen)
+static int ide_cdrom_read_tocentry(ide_drive_t *drive, int trackno,
+ int msf_flag, int format, char *buf, int buflen)
{
unsigned char cmd[BLK_MAX_CDB];
@@ -1104,7 +1104,7 @@ int ide_cd_read_toc(ide_drive_t *drive)
sectors_per_frame << SECTOR_SHIFT);
/* first read just the header, so we know how long the TOC is */
- stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
+ stat = ide_cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
sizeof(struct atapi_toc_header));
if (stat)
return stat;
@@ -1121,7 +1121,7 @@ int ide_cd_read_toc(ide_drive_t *drive)
ntracks = MAX_TRACKS;
/* now read the whole schmeer */
- stat = cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0,
+ stat = ide_cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0,
(char *)&toc->hdr,
sizeof(struct atapi_toc_header) +
(ntracks + 1) *
@@ -1141,7 +1141,7 @@ int ide_cd_read_toc(ide_drive_t *drive)
* Heiko Eißfeldt.
*/
ntracks = 0;
- stat = cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0,
+ stat = ide_cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0,
(char *)&toc->hdr,
sizeof(struct atapi_toc_header) +
(ntracks + 1) *
@@ -1181,7 +1181,7 @@ int ide_cd_read_toc(ide_drive_t *drive)
if (toc->hdr.first_track != CDROM_LEADOUT) {
/* read the multisession information */
- stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
+ stat = ide_cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
sizeof(ms_tmp));
if (stat)
return stat;
@@ -1195,7 +1195,7 @@ int ide_cd_read_toc(ide_drive_t *drive)
if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
/* re-read multisession information using MSF format */
- stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
+ stat = ide_cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
sizeof(ms_tmp));
if (stat)
return stat;
@@ -1305,8 +1305,7 @@ static int ide_cdrom_register(ide_drive_t *drive, int nslots)
if (drive->atapi_flags & IDE_AFLAG_NO_SPEED_SELECT)
devinfo->mask |= CDC_SELECT_SPEED;
- devinfo->disk = info->disk;
- return register_cdrom(devinfo);
+ return register_cdrom(info->disk, devinfo);
}
static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index b137f27a34d5..c31f1d2b3b07 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -233,10 +233,13 @@ static ide_startstop_t do_special(ide_drive_t *drive)
void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
- struct scatterlist *sg = hwif->sg_table;
+ struct scatterlist *sg = hwif->sg_table, *last_sg = NULL;
struct request *rq = cmd->rq;
- cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
+ cmd->sg_nents = __blk_rq_map_sg(drive->queue, rq, sg, &last_sg);
+ if (blk_rq_bytes(rq) && (blk_rq_bytes(rq) & rq->q->dma_pad_mask))
+ last_sg->length +=
+ (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
}
EXPORT_SYMBOL_GPL(ide_map_sg);
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 5d91a6dda894..1080637ca40e 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -89,13 +89,13 @@ config ADXL372_I2C
module will be called adxl372_i2c.
config BMA180
- tristate "Bosch BMA180/BMA25x 3-Axis Accelerometer Driver"
- depends on I2C
+ tristate "Bosch BMA023/BMA1x0/BMA25x 3-Axis Accelerometer Driver"
+ depends on I2C && INPUT_BMA150=n
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say Y here if you want to build a driver for the Bosch BMA180 or
- BMA25x triaxial acceleration sensor.
+ Say Y here if you want to build a driver for the Bosch BMA023, BMA150
+ BMA180, SMB380, or BMA25x triaxial acceleration sensor.
To compile this driver as a module, choose M here: the
module will be called bma180.
@@ -238,7 +238,7 @@ config IIO_ST_ACCEL_3AXIS
Say yes here to build support for STMicroelectronics accelerometers:
LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL,
- LNG2DM, LIS3DE, LIS2DE12
+ LNG2DM, LIS3DE, LIS2DE12, LIS2HH12
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index fcd91d5f05fd..265722e4b13f 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -7,6 +7,7 @@
* Support for BMA250 (c) Peter Meerwald <pmeerw@pmeerw.net>
*
* SPI is not supported by driver
+ * BMA023/BMA150/SMB380: 7-bit I2C slave address 0x38
* BMA180: 7-bit I2C slave address 0x40 or 0x41
* BMA250: 7-bit I2C slave address 0x18 or 0x19
* BMA254: 7-bit I2C slave address 0x18 or 0x19
@@ -33,6 +34,8 @@
#define BMA180_IRQ_NAME "bma180_event"
enum chip_ids {
+ BMA023,
+ BMA150,
BMA180,
BMA250,
BMA254,
@@ -48,7 +51,7 @@ struct bma180_part_info {
unsigned int num_scales;
const int *bw_table;
unsigned int num_bw;
- int center_temp;
+ int temp_offset;
u8 int_reset_reg, int_reset_mask;
u8 sleep_reg, sleep_mask;
@@ -57,13 +60,25 @@ struct bma180_part_info {
u8 power_reg, power_mask, lowpower_val;
u8 int_enable_reg, int_enable_mask;
u8 int_map_reg, int_enable_dataready_int1_mask;
- u8 softreset_reg;
+ u8 softreset_reg, softreset_val;
int (*chip_config)(struct bma180_data *data);
void (*chip_disable)(struct bma180_data *data);
};
/* Register set */
+#define BMA023_CTRL_REG0 0x0a
+#define BMA023_CTRL_REG1 0x0b
+#define BMA023_CTRL_REG2 0x14
+#define BMA023_CTRL_REG3 0x15
+
+#define BMA023_RANGE_MASK GENMASK(4, 3) /* Range of accel values */
+#define BMA023_BW_MASK GENMASK(2, 0) /* Accel bandwidth */
+#define BMA023_SLEEP BIT(0)
+#define BMA023_INT_RESET_MASK BIT(6)
+#define BMA023_NEW_DATA_INT BIT(5) /* Intr every new accel data is ready */
+#define BMA023_RESET_VAL BIT(1)
+
#define BMA180_CHIP_ID 0x00 /* Need to distinguish BMA180 from other */
#define BMA180_ACC_X_LSB 0x02 /* First of 6 registers of accel data */
#define BMA180_TEMP 0x08
@@ -94,6 +109,7 @@ struct bma180_part_info {
/* We have to write this value in reset register to do soft reset */
#define BMA180_RESET_VAL 0xb6
+#define BMA023_ID_REG_VAL 0x02
#define BMA180_ID_REG_VAL 0x03
#define BMA250_ID_REG_VAL 0x03
#define BMA254_ID_REG_VAL 0xfa /* 250 decimal */
@@ -156,6 +172,9 @@ enum bma180_chan {
TEMP
};
+static int bma023_bw_table[] = { 25, 50, 100, 190, 375, 750, 1500 }; /* Hz */
+static int bma023_scale_table[] = { 2452, 4903, 9709, };
+
static int bma180_bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
static int bma180_scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
@@ -319,7 +338,8 @@ static int bma180_set_pmode(struct bma180_data *data, bool mode)
static int bma180_soft_reset(struct bma180_data *data)
{
int ret = i2c_smbus_write_byte_data(data->client,
- data->part_info->softreset_reg, BMA180_RESET_VAL);
+ data->part_info->softreset_reg,
+ data->part_info->softreset_val);
if (ret)
dev_err(&data->client->dev, "failed to reset the chip\n");
@@ -349,11 +369,28 @@ static int bma180_chip_init(struct bma180_data *data)
*/
msleep(20);
- ret = bma180_set_new_data_intr_state(data, false);
+ return bma180_set_new_data_intr_state(data, false);
+}
+
+static int bma023_chip_config(struct bma180_data *data)
+{
+ int ret = bma180_chip_init(data);
+
if (ret)
- return ret;
+ goto err;
+
+ ret = bma180_set_bw(data, 50); /* 50 Hz */
+ if (ret)
+ goto err;
+ ret = bma180_set_scale(data, 2452); /* 2 G */
+ if (ret)
+ goto err;
- return bma180_set_pmode(data, false);
+ return 0;
+
+err:
+ dev_err(&data->client->dev, "failed to config the chip\n");
+ return ret;
}
static int bma180_chip_config(struct bma180_data *data)
@@ -362,6 +399,9 @@ static int bma180_chip_config(struct bma180_data *data)
if (ret)
goto err;
+ ret = bma180_set_pmode(data, false);
+ if (ret)
+ goto err;
ret = bma180_set_bits(data, BMA180_CTRL_REG0, BMA180_DIS_WAKE_UP, 1);
if (ret)
goto err;
@@ -391,6 +431,9 @@ static int bma25x_chip_config(struct bma180_data *data)
if (ret)
goto err;
+ ret = bma180_set_pmode(data, false);
+ if (ret)
+ goto err;
ret = bma180_set_bw(data, 16); /* 16 Hz */
if (ret)
goto err;
@@ -413,6 +456,17 @@ err:
return ret;
}
+static void bma023_chip_disable(struct bma180_data *data)
+{
+ if (bma180_set_sleep_state(data, true))
+ goto err;
+
+ return;
+
+err:
+ dev_err(&data->client->dev, "failed to disable the chip\n");
+}
+
static void bma180_chip_disable(struct bma180_data *data)
{
if (bma180_set_new_data_intr_state(data, false))
@@ -512,8 +566,12 @@ static int bma180_read_raw(struct iio_dev *indio_dev,
iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
- *val = sign_extend32(ret >> chan->scan_type.shift,
- chan->scan_type.realbits - 1);
+ if (chan->scan_type.sign == 's') {
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+ } else {
+ *val = ret;
+ }
return IIO_VAL_INT;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
*val = data->bw;
@@ -531,7 +589,7 @@ static int bma180_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_OFFSET:
- *val = data->part_info->center_temp;
+ *val = data->part_info->temp_offset;
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -609,6 +667,11 @@ static const struct iio_enum bma180_power_mode_enum = {
.set = bma180_set_power_mode,
};
+static const struct iio_chan_spec_ext_info bma023_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bma180_accel_get_mount_matrix),
+ { }
+};
+
static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
IIO_ENUM("power_mode", true, &bma180_power_mode_enum),
IIO_ENUM_AVAILABLE("power_mode", &bma180_power_mode_enum),
@@ -616,6 +679,35 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
{ }
};
+#define BMA023_ACC_CHANNEL(_axis, _bits) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .scan_index = AXIS_##_axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = _bits, \
+ .storagebits = 16, \
+ .shift = 16 - _bits, \
+ }, \
+ .ext_info = bma023_ext_info, \
+}
+
+#define BMA150_TEMP_CHANNEL { \
+ .type = IIO_TEMP, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_index = TEMP, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 16, \
+ }, \
+}
+
#define BMA180_ACC_CHANNEL(_axis, _bits) { \
.type = IIO_ACCEL, \
.modified = 1, \
@@ -645,6 +737,21 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
}, \
}
+static const struct iio_chan_spec bma023_channels[] = {
+ BMA023_ACC_CHANNEL(X, 10),
+ BMA023_ACC_CHANNEL(Y, 10),
+ BMA023_ACC_CHANNEL(Z, 10),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct iio_chan_spec bma150_channels[] = {
+ BMA023_ACC_CHANNEL(X, 10),
+ BMA023_ACC_CHANNEL(Y, 10),
+ BMA023_ACC_CHANNEL(Z, 10),
+ BMA150_TEMP_CHANNEL,
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
static const struct iio_chan_spec bma180_channels[] = {
BMA180_ACC_CHANNEL(X, 14),
BMA180_ACC_CHANNEL(Y, 14),
@@ -670,6 +777,63 @@ static const struct iio_chan_spec bma254_channels[] = {
};
static const struct bma180_part_info bma180_part_info[] = {
+ [BMA023] = {
+ .chip_id = BMA023_ID_REG_VAL,
+ .channels = bma023_channels,
+ .num_channels = ARRAY_SIZE(bma023_channels),
+ .scale_table = bma023_scale_table,
+ .num_scales = ARRAY_SIZE(bma023_scale_table),
+ .bw_table = bma023_bw_table,
+ .num_bw = ARRAY_SIZE(bma023_bw_table),
+ /* No temperature channel */
+ .temp_offset = 0,
+ .int_reset_reg = BMA023_CTRL_REG0,
+ .int_reset_mask = BMA023_INT_RESET_MASK,
+ .sleep_reg = BMA023_CTRL_REG0,
+ .sleep_mask = BMA023_SLEEP,
+ .bw_reg = BMA023_CTRL_REG2,
+ .bw_mask = BMA023_BW_MASK,
+ .scale_reg = BMA023_CTRL_REG2,
+ .scale_mask = BMA023_RANGE_MASK,
+ /* No power mode on bma023 */
+ .power_reg = 0,
+ .power_mask = 0,
+ .lowpower_val = 0,
+ .int_enable_reg = BMA023_CTRL_REG3,
+ .int_enable_mask = BMA023_NEW_DATA_INT,
+ .softreset_reg = BMA023_CTRL_REG0,
+ .softreset_val = BMA023_RESET_VAL,
+ .chip_config = bma023_chip_config,
+ .chip_disable = bma023_chip_disable,
+ },
+ [BMA150] = {
+ .chip_id = BMA023_ID_REG_VAL,
+ .channels = bma150_channels,
+ .num_channels = ARRAY_SIZE(bma150_channels),
+ .scale_table = bma023_scale_table,
+ .num_scales = ARRAY_SIZE(bma023_scale_table),
+ .bw_table = bma023_bw_table,
+ .num_bw = ARRAY_SIZE(bma023_bw_table),
+ .temp_offset = -60, /* 0 LSB @ -30 degree C */
+ .int_reset_reg = BMA023_CTRL_REG0,
+ .int_reset_mask = BMA023_INT_RESET_MASK,
+ .sleep_reg = BMA023_CTRL_REG0,
+ .sleep_mask = BMA023_SLEEP,
+ .bw_reg = BMA023_CTRL_REG2,
+ .bw_mask = BMA023_BW_MASK,
+ .scale_reg = BMA023_CTRL_REG2,
+ .scale_mask = BMA023_RANGE_MASK,
+ /* No power mode on bma150 */
+ .power_reg = 0,
+ .power_mask = 0,
+ .lowpower_val = 0,
+ .int_enable_reg = BMA023_CTRL_REG3,
+ .int_enable_mask = BMA023_NEW_DATA_INT,
+ .softreset_reg = BMA023_CTRL_REG0,
+ .softreset_val = BMA023_RESET_VAL,
+ .chip_config = bma023_chip_config,
+ .chip_disable = bma023_chip_disable,
+ },
[BMA180] = {
.chip_id = BMA180_ID_REG_VAL,
.channels = bma180_channels,
@@ -678,7 +842,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.num_scales = ARRAY_SIZE(bma180_scale_table),
.bw_table = bma180_bw_table,
.num_bw = ARRAY_SIZE(bma180_bw_table),
- .center_temp = 48, /* 0 LSB @ 24 degree C */
+ .temp_offset = 48, /* 0 LSB @ 24 degree C */
.int_reset_reg = BMA180_CTRL_REG0,
.int_reset_mask = BMA180_RESET_INT,
.sleep_reg = BMA180_CTRL_REG0,
@@ -693,6 +857,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.int_enable_reg = BMA180_CTRL_REG3,
.int_enable_mask = BMA180_NEW_DATA_INT,
.softreset_reg = BMA180_RESET,
+ .softreset_val = BMA180_RESET_VAL,
.chip_config = bma180_chip_config,
.chip_disable = bma180_chip_disable,
},
@@ -704,7 +869,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.num_scales = ARRAY_SIZE(bma25x_scale_table),
.bw_table = bma25x_bw_table,
.num_bw = ARRAY_SIZE(bma25x_bw_table),
- .center_temp = 48, /* 0 LSB @ 24 degree C */
+ .temp_offset = 48, /* 0 LSB @ 24 degree C */
.int_reset_reg = BMA250_INT_RESET_REG,
.int_reset_mask = BMA250_INT_RESET_MASK,
.sleep_reg = BMA250_POWER_REG,
@@ -721,6 +886,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.int_map_reg = BMA250_INT_MAP_REG,
.int_enable_dataready_int1_mask = BMA250_INT1_DATA_MASK,
.softreset_reg = BMA250_RESET_REG,
+ .softreset_val = BMA180_RESET_VAL,
.chip_config = bma25x_chip_config,
.chip_disable = bma25x_chip_disable,
},
@@ -732,7 +898,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.num_scales = ARRAY_SIZE(bma25x_scale_table),
.bw_table = bma25x_bw_table,
.num_bw = ARRAY_SIZE(bma25x_bw_table),
- .center_temp = 46, /* 0 LSB @ 23 degree C */
+ .temp_offset = 46, /* 0 LSB @ 23 degree C */
.int_reset_reg = BMA254_INT_RESET_REG,
.int_reset_mask = BMA254_INT_RESET_MASK,
.sleep_reg = BMA254_POWER_REG,
@@ -749,6 +915,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.int_map_reg = BMA254_INT_MAP_REG,
.int_enable_dataready_int1_mask = BMA254_INT1_DATA_MASK,
.softreset_reg = BMA254_RESET_REG,
+ .softreset_val = BMA180_RESET_VAL,
.chip_config = bma25x_chip_config,
.chip_disable = bma25x_chip_disable,
},
@@ -990,9 +1157,12 @@ static SIMPLE_DEV_PM_OPS(bma180_pm_ops, bma180_suspend, bma180_resume);
#endif
static const struct i2c_device_id bma180_ids[] = {
+ { "bma023", BMA023 },
+ { "bma150", BMA150 },
{ "bma180", BMA180 },
{ "bma250", BMA250 },
{ "bma254", BMA254 },
+ { "smb380", BMA150 },
{ }
};
@@ -1000,6 +1170,14 @@ MODULE_DEVICE_TABLE(i2c, bma180_ids);
static const struct of_device_id bma180_of_match[] = {
{
+ .compatible = "bosch,bma023",
+ .data = (void *)BMA023
+ },
+ {
+ .compatible = "bosch,bma150",
+ .data = (void *)BMA150
+ },
+ {
.compatible = "bosch,bma180",
.data = (void *)BMA180
},
@@ -1011,6 +1189,10 @@ static const struct of_device_id bma180_of_match[] = {
.compatible = "bosch,bma254",
.data = (void *)BMA254
},
+ {
+ .compatible = "bosch,smb380",
+ .data = (void *)BMA150
+ },
{ }
};
MODULE_DEVICE_TABLE(of, bma180_of_match);
@@ -1030,5 +1212,5 @@ module_i2c_driver(bma180_driver);
MODULE_AUTHOR("Kravchenko Oleksandr <x0199363@ti.com>");
MODULE_AUTHOR("Texas Instruments, Inc.");
-MODULE_DESCRIPTION("Bosch BMA180/BMA25x triaxial acceleration sensor");
+MODULE_DESCRIPTION("Bosch BMA023/BMA1x0/BMA25x triaxial acceleration sensor");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/dmard06.c b/drivers/iio/accel/dmard06.c
index 2bf210fa4ba6..ef89bded7390 100644
--- a/drivers/iio/accel/dmard06.c
+++ b/drivers/iio/accel/dmard06.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
@@ -226,7 +227,7 @@ static struct i2c_driver dmard06_driver = {
.id_table = dmard06_id,
.driver = {
.name = DMARD06_DRV_NAME,
- .of_match_table = of_match_ptr(dmard06_of_match),
+ .of_match_table = dmard06_of_match,
.pm = DMARD06_PM_OPS,
},
};
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 0d9e2def2b25..0ec0533448bc 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum accel_3d_channel {
@@ -391,18 +389,13 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&accel_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&accel_state->common_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -426,9 +419,7 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&accel_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &accel_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -443,8 +434,7 @@ static int hid_accel_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&accel_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &accel_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
index 38411e1c155b..b580d605f848 100644
--- a/drivers/iio/accel/kxsd9-i2c.c
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -2,6 +2,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/delay.h>
@@ -21,8 +22,8 @@ static int kxsd9_i2c_probe(struct i2c_client *i2c,
regmap = devm_regmap_init_i2c(i2c, &config);
if (IS_ERR(regmap)) {
- dev_err(&i2c->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&i2c->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
@@ -36,15 +37,11 @@ static int kxsd9_i2c_remove(struct i2c_client *client)
return kxsd9_common_remove(&client->dev);
}
-#ifdef CONFIG_OF
static const struct of_device_id kxsd9_of_match[] = {
{ .compatible = "kionix,kxsd9", },
{ },
};
MODULE_DEVICE_TABLE(of, kxsd9_of_match);
-#else
-#define kxsd9_of_match NULL
-#endif
static const struct i2c_device_id kxsd9_i2c_id[] = {
{"kxsd9", 0},
@@ -55,7 +52,7 @@ MODULE_DEVICE_TABLE(i2c, kxsd9_i2c_id);
static struct i2c_driver kxsd9_i2c_driver = {
.driver = {
.name = "kxsd9",
- .of_match_table = of_match_ptr(kxsd9_of_match),
+ .of_match_table = kxsd9_of_match,
.pm = &kxsd9_dev_pm_ops,
},
.probe = kxsd9_i2c_probe,
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index 3d5bea651923..9d07642c0de1 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -135,7 +135,7 @@ static int mxc4005_read_xyz(struct mxc4005_data *data)
int ret;
ret = regmap_bulk_read(data->regmap, MXC4005_REG_XOUT_UPPER,
- (u8 *) data->buffer, sizeof(data->buffer));
+ data->buffer, sizeof(data->buffer));
if (ret < 0) {
dev_err(data->dev, "failed to read axes\n");
return ret;
@@ -150,7 +150,7 @@ static int mxc4005_read_axis(struct mxc4005_data *data,
__be16 reg;
int ret;
- ret = regmap_bulk_read(data->regmap, addr, (u8 *) &reg, sizeof(reg));
+ ret = regmap_bulk_read(data->regmap, addr, &reg, sizeof(reg));
if (ret < 0) {
dev_err(data->dev, "failed to read reg %02x\n", addr);
return ret;
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 5b13e293cade..5d356288e001 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -35,6 +35,7 @@ enum st_accel_type {
LIS2DW12,
LIS3DHH,
LIS2DE12,
+ LIS2HH12,
ST_ACCEL_MAX,
};
@@ -59,6 +60,7 @@ enum st_accel_type {
#define LIS3DHH_ACCEL_DEV_NAME "lis3dhh"
#define LIS3DE_ACCEL_DEV_NAME "lis3de"
#define LIS2DE12_ACCEL_DEV_NAME "lis2de12"
+#define LIS2HH12_ACCEL_DEV_NAME "lis2hh12"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c
index 9f2b40474b8e..b5c814ef1637 100644
--- a/drivers/iio/accel/st_accel_buffer.c
+++ b/drivers/iio/accel/st_accel_buffer.c
@@ -37,8 +37,7 @@ static int st_accel_buffer_postenable(struct iio_dev *indio_dev)
if (err < 0)
return err;
- err = st_sensors_set_axis_enable(indio_dev,
- (u8)indio_dev->active_scan_mask[0]);
+ err = st_sensors_set_axis_enable(indio_dev, indio_dev->active_scan_mask[0]);
if (err < 0)
goto st_accel_buffer_predisable;
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 7320275c7e56..43c50167d220 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -904,6 +904,83 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.multi_read_bit = true,
.bootime = 2,
},
+ {
+ .wai = 0x41,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LIS2HH12_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_16bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = 0x70,
+ .odr_avl = {
+ { .hz = 10, .value = 0x01, },
+ { .hz = 50, .value = 0x02, },
+ { .hz = 100, .value = 0x03, },
+ { .hz = 200, .value = 0x04, },
+ { .hz = 400, .value = 0x05, },
+ { .hz = 800, .value = 0x06, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0x70,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = 0x23,
+ .mask = 0x30,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(61),
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(122),
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(244),
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x20,
+ .mask = 0x08,
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x01,
+ },
+ .int2 = {
+ .addr = 0x25,
+ .mask = 0x01,
+ },
+ .addr_ihl = 0x24,
+ .mask_ihl = 0x02,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
+
};
static int st_accel_read_raw(struct iio_dev *indio_dev,
@@ -1170,8 +1247,7 @@ EXPORT_SYMBOL(st_accel_get_settings);
int st_accel_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
- struct st_sensors_platform_data *pdata =
- (struct st_sensors_platform_data *)adata->dev->platform_data;
+ struct st_sensors_platform_data *pdata = dev_get_platdata(adata->dev);
struct iio_chan_spec *channels;
size_t channels_size;
int err;
@@ -1204,8 +1280,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
"failed to apply ACPI orientation data: %d\n", err);
indio_dev->channels = channels;
- adata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &adata->sensor_settings->fs.fs_avl[0];
+ adata->current_fullscale = &adata->sensor_settings->fs.fs_avl[0];
adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
if (!pdata)
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 6b283be26ebc..360e16f2cadb 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -104,6 +104,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis2de12",
.data = LIS2DE12_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lis2hh12",
+ .data = LIS2HH12_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -138,6 +142,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LIS2DW12_ACCEL_DEV_NAME },
{ LIS3DE_ACCEL_DEV_NAME },
{ LIS2DE12_ACCEL_DEV_NAME },
+ { LIS2HH12_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 12bb8b7ca1ff..ff3569635ce0 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -246,6 +246,41 @@ config AD799X
To compile this driver as a module, choose M here: the module will be
called ad799x.
+config AD9467
+ tristate "Analog Devices AD9467 High Speed ADC driver"
+ depends on SPI
+ select ADI_AXI_ADC
+ help
+ Say yes here to build support for Analog Devices:
+ * AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
+
+ The driver requires the assistance of the AXI ADC IP core to operate,
+ since SPI is used for configuration only, while data has to be
+ streamed into memory via DMA.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad9467.
+
+config ADI_AXI_ADC
+ tristate "Analog Devices Generic AXI ADC IP core driver"
+ select IIO_BUFFER
+ select IIO_BUFFER_HW_CONSUMER
+ select IIO_BUFFER_DMAENGINE
+ help
+ Say yes here to build support for Analog Devices Generic
+ AXI ADC IP core. The IP core is used for interfacing with
+ analog-to-digital (ADC) converters that require either a high-speed
+ serial interface (JESD204B/C) or a source synchronous parallel
+ interface (LVDS/CMOS).
+ Typically (for such devices) SPI will be used for configuration only,
+ while this IP core handles the streaming of data into memory via DMA.
+
+ Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called adi-axi-adc.
+
config ASPEED_ADC
tristate "Aspeed ADC"
depends on ARCH_ASPEED || COMPILE_TEST
@@ -595,6 +630,16 @@ config MAX1118
To compile this driver as a module, choose M here: the module will be
called max1118.
+config MAX1241
+ tristate "Maxim max1241 ADC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Maxim max1241 12-bit, single-channel
+ ADC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max1241.
+
config MAX1363
tristate "Maxim max1363 ADC driver"
depends on I2C
@@ -692,6 +737,16 @@ config MESON_SARADC
To compile this driver as a module, choose M here: the
module will be called meson_saradc.
+config MP2629_ADC
+ tristate "Monolithic MP2629 ADC driver"
+ depends on MFD_MP2629
+ help
+ Say yes to have support for battery charger IC MP2629 ADC device
+ accessed over I2C.
+
+ This driver provides ADC conversion of system, input power supply
+ and battery voltage & current information.
+
config NAU7802
tristate "Nuvoton NAU7802 ADC driver"
depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 637807861112..90f94ada7b30 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -26,6 +26,8 @@ obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
obj-$(CONFIG_AD7949) += ad7949.o
obj-$(CONFIG_AD799X) += ad799x.o
+obj-$(CONFIG_AD9467) += ad9467.o
+obj-$(CONFIG_ADI_AXI_ADC) += adi-axi-adc.o
obj-$(CONFIG_ASPEED_ADC) += aspeed_adc.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
obj-$(CONFIG_AT91_SAMA5D2_ADC) += at91-sama5d2_adc.o
@@ -57,6 +59,7 @@ obj-$(CONFIG_LTC2497) += ltc2497.o ltc2497-core.o
obj-$(CONFIG_MAX1027) += max1027.o
obj-$(CONFIG_MAX11100) += max11100.o
obj-$(CONFIG_MAX1118) += max1118.o
+obj-$(CONFIG_MAX1241) += max1241.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MAX9611) += max9611.o
obj-$(CONFIG_MCP320X) += mcp320x.o
@@ -65,6 +68,7 @@ obj-$(CONFIG_MCP3911) += mcp3911.o
obj-$(CONFIG_MEDIATEK_MT6577_AUXADC) += mt6577_auxadc.o
obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
obj-$(CONFIG_MESON_SARADC) += meson_saradc.o
+obj-$(CONFIG_MP2629_ADC) += mp2629_adc.o
obj-$(CONFIG_MXS_LRADC_ADC) += mxs-lradc-adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_NPCM_ADC) += npcm_adc.o
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 76747488044b..4e816d714ad2 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -12,9 +12,11 @@
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -27,6 +29,8 @@ struct ad7476_state;
struct ad7476_chip_info {
unsigned int int_vref_uv;
struct iio_chan_spec channel[2];
+ /* channels used when convst gpio is defined */
+ struct iio_chan_spec convst_channel[2];
void (*reset)(struct ad7476_state *);
};
@@ -34,6 +38,7 @@ struct ad7476_state {
struct spi_device *spi;
const struct ad7476_chip_info *chip_info;
struct regulator *reg;
+ struct gpio_desc *convst_gpio;
struct spi_transfer xfer;
struct spi_message msg;
/*
@@ -64,6 +69,17 @@ enum ad7476_supported_device_ids {
ID_ADS7868,
};
+static void ad7091_convst(struct ad7476_state *st)
+{
+ if (!st->convst_gpio)
+ return;
+
+ gpiod_set_value(st->convst_gpio, 0);
+ udelay(1); /* CONVST pulse width: 10 ns min */
+ gpiod_set_value(st->convst_gpio, 1);
+ udelay(1); /* Conversion time: 650 ns max */
+}
+
static irqreturn_t ad7476_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -71,6 +87,8 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
struct ad7476_state *st = iio_priv(indio_dev);
int b_sent;
+ ad7091_convst(st);
+
b_sent = spi_sync(st->spi, &st->msg);
if (b_sent < 0)
goto done;
@@ -93,6 +111,8 @@ static int ad7476_scan_direct(struct ad7476_state *st)
{
int ret;
+ ad7091_convst(st);
+
ret = spi_sync(st->spi, &st->msg);
if (ret)
return ret;
@@ -160,6 +180,8 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
#define AD7940_CHAN(bits) _AD7476_CHAN((bits), 15 - (bits), \
BIT(IIO_CHAN_INFO_RAW))
#define AD7091R_CHAN(bits) _AD7476_CHAN((bits), 16 - (bits), 0)
+#define AD7091R_CONVST_CHAN(bits) _AD7476_CHAN((bits), 16 - (bits), \
+ BIT(IIO_CHAN_INFO_RAW))
#define ADS786X_CHAN(bits) _AD7476_CHAN((bits), 12 - (bits), \
BIT(IIO_CHAN_INFO_RAW))
@@ -167,6 +189,8 @@ static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
[ID_AD7091R] = {
.channel[0] = AD7091R_CHAN(12),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .convst_channel[0] = AD7091R_CONVST_CHAN(12),
+ .convst_channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
.reset = ad7091_reset,
},
[ID_AD7276] = {
@@ -232,6 +256,13 @@ static const struct iio_info ad7476_info = {
.read_raw = &ad7476_read_raw,
};
+static void ad7476_reg_disable(void *data)
+{
+ struct ad7476_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
static int ad7476_probe(struct spi_device *spi)
{
struct ad7476_state *st;
@@ -254,6 +285,17 @@ static int ad7476_probe(struct spi_device *spi)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&spi->dev, ad7476_reg_disable,
+ st);
+ if (ret)
+ return ret;
+
+ st->convst_gpio = devm_gpiod_get_optional(&spi->dev,
+ "adi,conversion-start",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->convst_gpio))
+ return PTR_ERR(st->convst_gpio);
+
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
@@ -266,6 +308,9 @@ static int ad7476_probe(struct spi_device *spi)
indio_dev->channels = st->chip_info->channel;
indio_dev->num_channels = 2;
indio_dev->info = &ad7476_info;
+
+ if (st->convst_gpio)
+ indio_dev->channels = st->chip_info->convst_channel;
/* Setup default message */
st->xfer.rx_buf = &st->data;
@@ -295,19 +340,8 @@ error_disable_reg:
return ret;
}
-static int ad7476_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7476_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- regulator_disable(st->reg);
-
- return 0;
-}
-
static const struct spi_device_id ad7476_id[] = {
+ {"ad7091", ID_AD7091R},
{"ad7091r", ID_AD7091R},
{"ad7273", ID_AD7277},
{"ad7274", ID_AD7276},
@@ -343,7 +377,6 @@ static struct spi_driver ad7476_driver = {
.name = "ad7476",
},
.probe = ad7476_probe,
- .remove = ad7476_remove,
.id_table = ad7476_id,
};
module_spi_driver(ad7476_driver);
diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
index 291c1a898129..f47606ebbbbe 100644
--- a/drivers/iio/adc/ad7780.c
+++ b/drivers/iio/adc/ad7780.c
@@ -206,10 +206,29 @@ static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
.irq_flags = IRQF_TRIGGER_LOW,
};
-#define AD7780_CHANNEL(bits, wordsize) \
- AD_SD_CHANNEL(1, 0, 0, bits, 32, (wordsize) - (bits))
-#define AD7170_CHANNEL(bits, wordsize) \
- AD_SD_CHANNEL_NO_SAMP_FREQ(1, 0, 0, bits, 32, (wordsize) - (bits))
+#define _AD7780_CHANNEL(_bits, _wordsize, _mask_all) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = _mask_all, \
+ .scan_index = 1, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 32, \
+ .shift = (_wordsize) - (_bits), \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define AD7780_CHANNEL(_bits, _wordsize) \
+ _AD7780_CHANNEL(_bits, _wordsize, BIT(IIO_CHAN_INFO_SAMP_FREQ))
+#define AD7170_CHANNEL(_bits, _wordsize) \
+ _AD7780_CHANNEL(_bits, _wordsize, 0)
static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
[ID_AD7170] = {
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index abb239392631..48432b6f6002 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -64,25 +64,73 @@
#define AD7791_MODE_SEL_MASK (0x3 << 6)
#define AD7791_MODE_SEL(x) ((x) << 6)
+#define __AD7991_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, _extend_name, _type, _mask_all) \
+ { \
+ .type = (_type), \
+ .differential = (_channel2 == -1 ? 0 : 1), \
+ .indexed = 1, \
+ .channel = (_channel1), \
+ .channel2 = (_channel2), \
+ .address = (_address), \
+ .extend_name = (_extend_name), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = _mask_all, \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = (_storagebits), \
+ .shift = (_shift), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define AD7991_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7991_CHANNEL(_si, _channel, _channel, _address, _bits, \
+ _storagebits, _shift, "shorted", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7991_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7991_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7991_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7991_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7991_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
+ _shift) \
+ __AD7991_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, "supply", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
#define DECLARE_AD7787_CHANNELS(name, bits, storagebits) \
const struct iio_chan_spec name[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
+ AD7991_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
(bits), (storagebits), 0), \
- AD_SD_CHANNEL(1, 1, AD7791_CH_AIN2, (bits), (storagebits), 0), \
- AD_SD_SHORTED_CHANNEL(2, 0, AD7791_CH_AIN1N_AIN1N, \
+ AD7991_CHANNEL(1, 1, AD7791_CH_AIN2, (bits), (storagebits), 0), \
+ AD7991_SHORTED_CHANNEL(2, 0, AD7791_CH_AIN1N_AIN1N, \
(bits), (storagebits), 0), \
- AD_SD_SUPPLY_CHANNEL(3, 2, AD7791_CH_AVDD_MONITOR, \
+ AD7991_SUPPLY_CHANNEL(3, 2, AD7791_CH_AVDD_MONITOR, \
(bits), (storagebits), 0), \
IIO_CHAN_SOFT_TIMESTAMP(4), \
}
#define DECLARE_AD7791_CHANNELS(name, bits, storagebits) \
const struct iio_chan_spec name[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
+ AD7991_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
(bits), (storagebits), 0), \
- AD_SD_SHORTED_CHANNEL(1, 0, AD7791_CH_AIN1N_AIN1N, \
+ AD7991_SHORTED_CHANNEL(1, 0, AD7791_CH_AIN1N_AIN1N, \
(bits), (storagebits), 0), \
- AD_SD_SUPPLY_CHANNEL(2, 1, AD7791_CH_AVDD_MONITOR, \
+ AD7991_SUPPLY_CHANNEL(2, 1, AD7791_CH_AVDD_MONITOR, \
(bits), (storagebits), 0), \
IIO_CHAN_SOFT_TIMESTAMP(3), \
}
@@ -444,5 +492,5 @@ static struct spi_driver ad7791_driver = {
module_spi_driver(ad7791_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_DESCRIPTION("Analog Device AD7787/AD7788/AD7789/AD7790/AD7791 ADC driver");
+MODULE_DESCRIPTION("Analog Devices AD7787/AD7788/AD7789/AD7790/AD7791 ADC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index e5691e330323..808485f42415 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -354,29 +354,28 @@ static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
static IIO_CONST_ATTR_NAMED(sampling_frequency_available_ad7797,
sampling_frequency_available, "123 62 50 33 17 16 12 10 8 6 4");
-static ssize_t ad7793_show_scale_available(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int ad7793_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7793_state *st = iio_priv(indio_dev);
- int i, len = 0;
- for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
- len += sprintf(buf + len, "%d.%09u ", st->scale_avail[i][0],
- st->scale_avail[i][1]);
-
- len += sprintf(buf + len, "\n");
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)st->scale_avail;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(st->scale_avail) * 2;
- return len;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
}
-static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
- in_voltage-voltage_scale_available, S_IRUGO,
- ad7793_show_scale_available, NULL, 0);
-
static struct attribute *ad7793_attributes[] = {
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
NULL
};
@@ -534,6 +533,7 @@ static const struct iio_info ad7793_info = {
.read_raw = &ad7793_read_raw,
.write_raw = &ad7793_write_raw,
.write_raw_get_fmt = &ad7793_write_raw_get_fmt,
+ .read_avail = ad7793_read_avail,
.attrs = &ad7793_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
};
@@ -546,47 +546,113 @@ static const struct iio_info ad7797_info = {
.validate_trigger = ad_sd_validate_trigger,
};
+#define __AD7793_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, _extend_name, _type, _mask_type_av, _mask_all) \
+ { \
+ .type = (_type), \
+ .differential = (_channel2 == -1 ? 0 : 1), \
+ .indexed = 1, \
+ .channel = (_channel1), \
+ .channel2 = (_channel2), \
+ .address = (_address), \
+ .extend_name = (_extend_name), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = (_mask_type_av), \
+ .info_mask_shared_by_all = _mask_all, \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = (_storagebits), \
+ .shift = (_shift), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define AD7793_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7793_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, _channel, _channel, _address, _bits, \
+ _storagebits, _shift, "shorted", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7793_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, 0, -1, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_TEMP, \
+ 0, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7793_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
+ _shift) \
+ __AD7793_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, "supply", IIO_VOLTAGE, \
+ 0, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7797_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ 0, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7797_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, _channel, _channel, _address, _bits, \
+ _storagebits, _shift, "shorted", IIO_VOLTAGE, \
+ 0, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
#define DECLARE_AD7793_CHANNELS(_name, _b, _sb, _s) \
const struct iio_chan_spec _name##_channels[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), (_s)), \
- AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), (_s)), \
- AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), (_s)), \
- AD_SD_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), (_s)), \
- AD_SD_TEMP_CHANNEL(4, AD7793_CH_TEMP, (_b), (_sb), (_s)), \
- AD_SD_SUPPLY_CHANNEL(5, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), (_s)), \
+ AD7793_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), (_s)), \
+ AD7793_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), (_s)), \
+ AD7793_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), (_s)), \
+ AD7793_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), (_s)), \
+ AD7793_TEMP_CHANNEL(4, AD7793_CH_TEMP, (_b), (_sb), (_s)), \
+ AD7793_SUPPLY_CHANNEL(5, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), (_s)), \
IIO_CHAN_SOFT_TIMESTAMP(6), \
}
#define DECLARE_AD7795_CHANNELS(_name, _b, _sb) \
const struct iio_chan_spec _name##_channels[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(3, 3, 3, AD7795_CH_AIN4P_AIN4M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(4, 4, 4, AD7795_CH_AIN5P_AIN5M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(5, 5, 5, AD7795_CH_AIN6P_AIN6M, (_b), (_sb), 0), \
- AD_SD_SHORTED_CHANNEL(6, 0, AD7795_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
- AD_SD_TEMP_CHANNEL(7, AD7793_CH_TEMP, (_b), (_sb), 0), \
- AD_SD_SUPPLY_CHANNEL(8, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(3, 3, 3, AD7795_CH_AIN4P_AIN4M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(4, 4, 4, AD7795_CH_AIN5P_AIN5M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(5, 5, 5, AD7795_CH_AIN6P_AIN6M, (_b), (_sb), 0), \
+ AD7793_SHORTED_CHANNEL(6, 0, AD7795_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD7793_TEMP_CHANNEL(7, AD7793_CH_TEMP, (_b), (_sb), 0), \
+ AD7793_SUPPLY_CHANNEL(8, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
IIO_CHAN_SOFT_TIMESTAMP(9), \
}
#define DECLARE_AD7797_CHANNELS(_name, _b, _sb) \
const struct iio_chan_spec _name##_channels[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
- AD_SD_SHORTED_CHANNEL(1, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
- AD_SD_TEMP_CHANNEL(2, AD7793_CH_TEMP, (_b), (_sb), 0), \
- AD_SD_SUPPLY_CHANNEL(3, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ AD7797_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD7797_SHORTED_CHANNEL(1, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD7793_TEMP_CHANNEL(2, AD7793_CH_TEMP, (_b), (_sb), 0), \
+ AD7793_SUPPLY_CHANNEL(3, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
IIO_CHAN_SOFT_TIMESTAMP(4), \
}
#define DECLARE_AD7799_CHANNELS(_name, _b, _sb) \
const struct iio_chan_spec _name##_channels[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
- AD_SD_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
- AD_SD_SUPPLY_CHANNEL(4, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
+ AD7793_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD7793_SUPPLY_CHANNEL(4, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
IIO_CHAN_SOFT_TIMESTAMP(5), \
}
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
new file mode 100644
index 000000000000..1e8fd83b9bc2
--- /dev/null
+++ b/drivers/iio/adc/ad9467.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices AD9467 SPI ADC driver
+ *
+ * Copyright 2012-2020 Analog Devices Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/clk.h>
+
+#include <linux/iio/adc/adi-axi-adc.h>
+
+/*
+ * ADI High-Speed ADC common spi interface registers
+ * See Application-Note AN-877:
+ * https://www.analog.com/media/en/technical-documentation/application-notes/AN-877.pdf
+ */
+
+#define AN877_ADC_REG_CHIP_PORT_CONF 0x00
+#define AN877_ADC_REG_CHIP_ID 0x01
+#define AN877_ADC_REG_CHIP_GRADE 0x02
+#define AN877_ADC_REG_CHAN_INDEX 0x05
+#define AN877_ADC_REG_TRANSFER 0xFF
+#define AN877_ADC_REG_MODES 0x08
+#define AN877_ADC_REG_TEST_IO 0x0D
+#define AN877_ADC_REG_ADC_INPUT 0x0F
+#define AN877_ADC_REG_OFFSET 0x10
+#define AN877_ADC_REG_OUTPUT_MODE 0x14
+#define AN877_ADC_REG_OUTPUT_ADJUST 0x15
+#define AN877_ADC_REG_OUTPUT_PHASE 0x16
+#define AN877_ADC_REG_OUTPUT_DELAY 0x17
+#define AN877_ADC_REG_VREF 0x18
+#define AN877_ADC_REG_ANALOG_INPUT 0x2C
+
+/* AN877_ADC_REG_TEST_IO */
+#define AN877_ADC_TESTMODE_OFF 0x0
+#define AN877_ADC_TESTMODE_MIDSCALE_SHORT 0x1
+#define AN877_ADC_TESTMODE_POS_FULLSCALE 0x2
+#define AN877_ADC_TESTMODE_NEG_FULLSCALE 0x3
+#define AN877_ADC_TESTMODE_ALT_CHECKERBOARD 0x4
+#define AN877_ADC_TESTMODE_PN23_SEQ 0x5
+#define AN877_ADC_TESTMODE_PN9_SEQ 0x6
+#define AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE 0x7
+#define AN877_ADC_TESTMODE_USER 0x8
+#define AN877_ADC_TESTMODE_BIT_TOGGLE 0x9
+#define AN877_ADC_TESTMODE_SYNC 0xA
+#define AN877_ADC_TESTMODE_ONE_BIT_HIGH 0xB
+#define AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY 0xC
+#define AN877_ADC_TESTMODE_RAMP 0xF
+
+/* AN877_ADC_REG_TRANSFER */
+#define AN877_ADC_TRANSFER_SYNC 0x1
+
+/* AN877_ADC_REG_OUTPUT_MODE */
+#define AN877_ADC_OUTPUT_MODE_OFFSET_BINARY 0x0
+#define AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT 0x1
+#define AN877_ADC_OUTPUT_MODE_GRAY_CODE 0x2
+
+/* AN877_ADC_REG_OUTPUT_PHASE */
+#define AN877_ADC_OUTPUT_EVEN_ODD_MODE_EN 0x20
+#define AN877_ADC_INVERT_DCO_CLK 0x80
+
+/* AN877_ADC_REG_OUTPUT_DELAY */
+#define AN877_ADC_DCO_DELAY_ENABLE 0x80
+
+/*
+ * Analog Devices AD9467 16-Bit, 200/250 MSPS ADC
+ */
+
+#define CHIPID_AD9467 0x50
+#define AD9467_DEF_OUTPUT_MODE 0x08
+#define AD9467_REG_VREF_MASK 0x0F
+
+enum {
+ ID_AD9467,
+};
+
+struct ad9467_state {
+ struct spi_device *spi;
+ struct clk *clk;
+ unsigned int output_mode;
+
+ struct gpio_desc *pwrdown_gpio;
+ struct gpio_desc *reset_gpio;
+};
+
+static int ad9467_spi_read(struct spi_device *spi, unsigned int reg)
+{
+ unsigned char tbuf[2], rbuf[1];
+ int ret;
+
+ tbuf[0] = 0x80 | (reg >> 8);
+ tbuf[1] = reg & 0xFF;
+
+ ret = spi_write_then_read(spi,
+ tbuf, ARRAY_SIZE(tbuf),
+ rbuf, ARRAY_SIZE(rbuf));
+
+ if (ret < 0)
+ return ret;
+
+ return rbuf[0];
+}
+
+static int ad9467_spi_write(struct spi_device *spi, unsigned int reg,
+ unsigned int val)
+{
+ unsigned char buf[3];
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xFF;
+ buf[2] = val;
+
+ return spi_write(spi, buf, ARRAY_SIZE(buf));
+}
+
+static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct spi_device *spi = st->spi;
+ int ret;
+
+ if (readval == NULL) {
+ ret = ad9467_spi_write(spi, reg, writeval);
+ ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
+ return ret;
+ }
+
+ ret = ad9467_spi_read(spi, reg);
+ if (ret < 0)
+ return ret;
+ *readval = ret;
+
+ return 0;
+}
+
+static const unsigned int ad9467_scale_table[][2] = {
+ {2000, 0}, {2100, 6}, {2200, 7},
+ {2300, 8}, {2400, 9}, {2500, 10},
+};
+
+static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
+ unsigned int *val, unsigned int *val2)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ const struct iio_chan_spec *chan = &info->channels[0];
+ unsigned int tmp;
+
+ tmp = (info->scale_table[index][0] * 1000000ULL) >>
+ chan->scan_type.realbits;
+ *val = tmp / 1000000;
+ *val2 = tmp % 1000000;
+}
+
+#define AD9467_CHAN(_chan, _si, _bits, _sign) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _chan, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = _sign, \
+ .realbits = _bits, \
+ .storagebits = 16, \
+ }, \
+}
+
+static const struct iio_chan_spec ad9467_channels[] = {
+ AD9467_CHAN(0, 0, 16, 'S'),
+};
+
+static const struct adi_axi_adc_chip_info ad9467_chip_tbl[] = {
+ [ID_AD9467] = {
+ .id = CHIPID_AD9467,
+ .max_rate = 250000000UL,
+ .scale_table = ad9467_scale_table,
+ .num_scales = ARRAY_SIZE(ad9467_scale_table),
+ .channels = ad9467_channels,
+ .num_channels = ARRAY_SIZE(ad9467_channels),
+ },
+};
+
+static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ unsigned int i, vref_val, vref_mask;
+
+ vref_val = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
+
+ switch (info->id) {
+ case CHIPID_AD9467:
+ vref_mask = AD9467_REG_VREF_MASK;
+ break;
+ default:
+ vref_mask = 0xFFFF;
+ break;
+ }
+
+ vref_val &= vref_mask;
+
+ for (i = 0; i < info->num_scales; i++) {
+ if (vref_val == info->scale_table[i][1])
+ break;
+ }
+
+ if (i == info->num_scales)
+ return -ERANGE;
+
+ __ad9467_get_scale(conv, i, val, val2);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ unsigned int scale_val[2];
+ unsigned int i;
+
+ if (val != 0)
+ return -EINVAL;
+
+ for (i = 0; i < info->num_scales; i++) {
+ __ad9467_get_scale(conv, i, &scale_val[0], &scale_val[1]);
+ if (scale_val[0] != val || scale_val[1] != val2)
+ continue;
+
+ ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
+ info->scale_table[i][1]);
+ ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ad9467_read_raw(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long m)
+{
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ return ad9467_get_scale(conv, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = clk_get_rate(st->clk);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ long r_clk;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return ad9467_set_scale(conv, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ r_clk = clk_round_rate(st->clk, val);
+ if (r_clk < 0 || r_clk > info->max_rate) {
+ dev_warn(&st->spi->dev,
+ "Error setting ADC sample rate %ld", r_clk);
+ return -EINVAL;
+ }
+
+ return clk_set_rate(st->clk, r_clk);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
+{
+ int ret;
+
+ ret = ad9467_spi_write(spi, AN877_ADC_REG_OUTPUT_MODE, mode);
+ if (ret < 0)
+ return ret;
+
+ return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
+}
+
+static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
+{
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+
+ return ad9467_outputmode_set(st->spi, st->output_mode);
+}
+
+static int ad9467_setup(struct ad9467_state *st, unsigned int chip_id)
+{
+ switch (chip_id) {
+ case CHIPID_AD9467:
+ st->output_mode = AD9467_DEF_OUTPUT_MODE |
+ AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ad9467_clk_disable(void *data)
+{
+ struct ad9467_state *st = data;
+
+ clk_disable_unprepare(st->clk);
+}
+
+static int ad9467_probe(struct spi_device *spi)
+{
+ const struct adi_axi_adc_chip_info *info;
+ struct adi_axi_adc_conv *conv;
+ struct ad9467_state *st;
+ unsigned int id;
+ int ret;
+
+ info = of_device_get_match_data(&spi->dev);
+ if (!info)
+ return -ENODEV;
+
+ conv = devm_adi_axi_adc_conv_register(&spi->dev, sizeof(*st));
+ if (IS_ERR(conv))
+ return PTR_ERR(conv);
+
+ st = adi_axi_adc_conv_priv(conv);
+ st->spi = spi;
+
+ st->clk = devm_clk_get(&spi->dev, "adc-clk");
+ if (IS_ERR(st->clk))
+ return PTR_ERR(st->clk);
+
+ ret = clk_prepare_enable(st->clk);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, ad9467_clk_disable, st);
+ if (ret)
+ return ret;
+
+ st->pwrdown_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->pwrdown_gpio))
+ return PTR_ERR(st->pwrdown_gpio);
+
+ st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->reset_gpio))
+ return PTR_ERR(st->reset_gpio);
+
+ if (st->reset_gpio) {
+ udelay(1);
+ ret = gpiod_direction_output(st->reset_gpio, 1);
+ if (ret)
+ return ret;
+ mdelay(10);
+ }
+
+ spi_set_drvdata(spi, st);
+
+ conv->chip_info = info;
+
+ id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
+ if (id != conv->chip_info->id) {
+ dev_err(&spi->dev, "Unrecognized CHIP_ID 0x%X\n", id);
+ return -ENODEV;
+ }
+
+ conv->reg_access = ad9467_reg_access;
+ conv->write_raw = ad9467_write_raw;
+ conv->read_raw = ad9467_read_raw;
+ conv->preenable_setup = ad9467_preenable_setup;
+
+ return ad9467_setup(st, id);
+}
+
+static const struct of_device_id ad9467_of_match[] = {
+ { .compatible = "adi,ad9467", .data = &ad9467_chip_tbl[ID_AD9467], },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ad9467_of_match);
+
+static struct spi_driver ad9467_driver = {
+ .driver = {
+ .name = "ad9467",
+ .of_match_table = ad9467_of_match,
+ },
+ .probe = ad9467_probe,
+};
+module_spi_driver(ad9467_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD9467 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 8115b6de1d6c..dd3d54b3bc8b 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -70,9 +70,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
switch (size) {
case 3:
- data[1] = val >> 16;
- data[2] = val >> 8;
- data[3] = val;
+ put_unaligned_be24(val, &data[1]);
break;
case 2:
put_unaligned_be16(val, &data[1]);
@@ -157,9 +155,7 @@ int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta,
*val = get_unaligned_be32(sigma_delta->data);
break;
case 3:
- *val = (sigma_delta->data[0] << 16) |
- (sigma_delta->data[1] << 8) |
- sigma_delta->data[2];
+ *val = get_unaligned_be24(&sigma_delta->data[0]);
break;
case 2:
*val = get_unaligned_be16(sigma_delta->data);
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
new file mode 100644
index 000000000000..c24c8da99eb4
--- /dev/null
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices Generic AXI ADC IP core
+ * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+ *
+ * Copyright 2012-2020 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/buffer-dmaengine.h>
+
+#include <linux/fpga/adi-axi-common.h>
+#include <linux/iio/adc/adi-axi-adc.h>
+
+/**
+ * Register definitions:
+ * https://wiki.analog.com/resources/fpga/docs/axi_adc_ip#register_map
+ */
+
+/* ADC controls */
+
+#define ADI_AXI_REG_RSTN 0x0040
+#define ADI_AXI_REG_RSTN_CE_N BIT(2)
+#define ADI_AXI_REG_RSTN_MMCM_RSTN BIT(1)
+#define ADI_AXI_REG_RSTN_RSTN BIT(0)
+
+/* ADC Channel controls */
+
+#define ADI_AXI_REG_CHAN_CTRL(c) (0x0400 + (c) * 0x40)
+#define ADI_AXI_REG_CHAN_CTRL_LB_OWR BIT(11)
+#define ADI_AXI_REG_CHAN_CTRL_PN_SEL_OWR BIT(10)
+#define ADI_AXI_REG_CHAN_CTRL_IQCOR_EN BIT(9)
+#define ADI_AXI_REG_CHAN_CTRL_DCFILT_EN BIT(8)
+#define ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT BIT(6)
+#define ADI_AXI_REG_CHAN_CTRL_FMT_TYPE BIT(5)
+#define ADI_AXI_REG_CHAN_CTRL_FMT_EN BIT(4)
+#define ADI_AXI_REG_CHAN_CTRL_PN_TYPE_OWR BIT(1)
+#define ADI_AXI_REG_CHAN_CTRL_ENABLE BIT(0)
+
+#define ADI_AXI_REG_CHAN_CTRL_DEFAULTS \
+ (ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT | \
+ ADI_AXI_REG_CHAN_CTRL_FMT_EN | \
+ ADI_AXI_REG_CHAN_CTRL_ENABLE)
+
+struct adi_axi_adc_core_info {
+ unsigned int version;
+};
+
+struct adi_axi_adc_state {
+ struct mutex lock;
+
+ struct adi_axi_adc_client *client;
+ void __iomem *regs;
+};
+
+struct adi_axi_adc_client {
+ struct list_head entry;
+ struct adi_axi_adc_conv conv;
+ struct adi_axi_adc_state *state;
+ struct device *dev;
+ const struct adi_axi_adc_core_info *info;
+};
+
+static LIST_HEAD(registered_clients);
+static DEFINE_MUTEX(registered_clients_lock);
+
+static struct adi_axi_adc_client *conv_to_client(struct adi_axi_adc_conv *conv)
+{
+ return container_of(conv, struct adi_axi_adc_client, conv);
+}
+
+void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv)
+{
+ struct adi_axi_adc_client *cl = conv_to_client(conv);
+
+ return (char *)cl + ALIGN(sizeof(struct adi_axi_adc_client), IIO_ALIGN);
+}
+EXPORT_SYMBOL_GPL(adi_axi_adc_conv_priv);
+
+static void adi_axi_adc_write(struct adi_axi_adc_state *st,
+ unsigned int reg,
+ unsigned int val)
+{
+ iowrite32(val, st->regs + reg);
+}
+
+static unsigned int adi_axi_adc_read(struct adi_axi_adc_state *st,
+ unsigned int reg)
+{
+ return ioread32(st->regs + reg);
+}
+
+static int adi_axi_adc_config_dma_buffer(struct device *dev,
+ struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer;
+ const char *dma_name;
+
+ if (!device_property_present(dev, "dmas"))
+ return 0;
+
+ if (device_property_read_string(dev, "dma-names", &dma_name))
+ dma_name = "rx";
+
+ buffer = devm_iio_dmaengine_buffer_alloc(indio_dev->dev.parent,
+ dma_name);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+ iio_device_attach_buffer(indio_dev, buffer);
+
+ return 0;
+}
+
+static int adi_axi_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+
+ if (!conv->read_raw)
+ return -EOPNOTSUPP;
+
+ return conv->read_raw(conv, chan, val, val2, mask);
+}
+
+static int adi_axi_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+
+ if (!conv->write_raw)
+ return -EOPNOTSUPP;
+
+ return conv->write_raw(conv, chan, val, val2, mask);
+}
+
+static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+ unsigned int i, ctrl;
+
+ for (i = 0; i < conv->chip_info->num_channels; i++) {
+ ctrl = adi_axi_adc_read(st, ADI_AXI_REG_CHAN_CTRL(i));
+
+ if (test_bit(i, scan_mask))
+ ctrl |= ADI_AXI_REG_CHAN_CTRL_ENABLE;
+ else
+ ctrl &= ~ADI_AXI_REG_CHAN_CTRL_ENABLE;
+
+ adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i), ctrl);
+ }
+
+ return 0;
+}
+
+static struct adi_axi_adc_conv *adi_axi_adc_conv_register(struct device *dev,
+ size_t sizeof_priv)
+{
+ struct adi_axi_adc_client *cl;
+ size_t alloc_size;
+
+ alloc_size = ALIGN(sizeof(struct adi_axi_adc_client), IIO_ALIGN);
+ if (sizeof_priv)
+ alloc_size += ALIGN(sizeof_priv, IIO_ALIGN);
+
+ cl = kzalloc(alloc_size, GFP_KERNEL);
+ if (!cl)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&registered_clients_lock);
+
+ cl->dev = get_device(dev);
+
+ list_add_tail(&cl->entry, &registered_clients);
+
+ mutex_unlock(&registered_clients_lock);
+
+ return &cl->conv;
+}
+
+static void adi_axi_adc_conv_unregister(struct adi_axi_adc_conv *conv)
+{
+ struct adi_axi_adc_client *cl = conv_to_client(conv);
+
+ mutex_lock(&registered_clients_lock);
+
+ list_del(&cl->entry);
+ put_device(cl->dev);
+
+ mutex_unlock(&registered_clients_lock);
+
+ kfree(cl);
+}
+
+static void devm_adi_axi_adc_conv_release(struct device *dev, void *res)
+{
+ adi_axi_adc_conv_unregister(*(struct adi_axi_adc_conv **)res);
+}
+
+struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+ size_t sizeof_priv)
+{
+ struct adi_axi_adc_conv **ptr, *conv;
+
+ ptr = devres_alloc(devm_adi_axi_adc_conv_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ conv = adi_axi_adc_conv_register(dev, sizeof_priv);
+ if (IS_ERR(conv)) {
+ devres_free(ptr);
+ return ERR_CAST(conv);
+ }
+
+ *ptr = conv;
+ devres_add(dev, ptr);
+
+ return conv;
+}
+EXPORT_SYMBOL_GPL(devm_adi_axi_adc_conv_register);
+
+static ssize_t in_voltage_scale_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < conv->chip_info->num_scales; i++) {
+ const unsigned int *s = conv->chip_info->scale_table[i];
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%u.%06u ", s[0], s[1]);
+ }
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0);
+
+enum {
+ ADI_AXI_ATTR_SCALE_AVAIL,
+};
+
+#define ADI_AXI_ATTR(_en_, _file_) \
+ [ADI_AXI_ATTR_##_en_] = &iio_dev_attr_##_file_.dev_attr.attr
+
+static struct attribute *adi_axi_adc_attributes[] = {
+ ADI_AXI_ATTR(SCALE_AVAIL, in_voltage_scale_available),
+ NULL
+};
+
+static umode_t axi_adc_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+
+ switch (n) {
+ case ADI_AXI_ATTR_SCALE_AVAIL:
+ if (!conv->chip_info->num_scales)
+ return 0;
+ return attr->mode;
+ default:
+ return attr->mode;
+ }
+}
+
+static const struct attribute_group adi_axi_adc_attribute_group = {
+ .attrs = adi_axi_adc_attributes,
+ .is_visible = axi_adc_attr_is_visible,
+};
+
+static const struct iio_info adi_axi_adc_info = {
+ .read_raw = &adi_axi_adc_read_raw,
+ .write_raw = &adi_axi_adc_write_raw,
+ .attrs = &adi_axi_adc_attribute_group,
+ .update_scan_mode = &adi_axi_adc_update_scan_mode,
+};
+
+static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = {
+ .version = ADI_AXI_PCORE_VER(10, 0, 'a'),
+};
+
+static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev)
+{
+ const struct adi_axi_adc_core_info *info;
+ struct adi_axi_adc_client *cl;
+ struct device_node *cln;
+
+ info = of_device_get_match_data(dev);
+ if (!info)
+ return ERR_PTR(-ENODEV);
+
+ cln = of_parse_phandle(dev->of_node, "adi,adc-dev", 0);
+ if (!cln) {
+ dev_err(dev, "No 'adi,adc-dev' node defined\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&registered_clients_lock);
+
+ list_for_each_entry(cl, &registered_clients, entry) {
+ if (!cl->dev)
+ continue;
+
+ if (cl->dev->of_node != cln)
+ continue;
+
+ if (!try_module_get(dev->driver->owner)) {
+ mutex_unlock(&registered_clients_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ get_device(dev);
+ cl->info = info;
+ mutex_unlock(&registered_clients_lock);
+ return cl;
+ }
+
+ mutex_unlock(&registered_clients_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int adi_axi_adc_setup_channels(struct device *dev,
+ struct adi_axi_adc_state *st)
+{
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+ int i, ret;
+
+ if (conv->preenable_setup) {
+ ret = conv->preenable_setup(conv);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < conv->chip_info->num_channels; i++) {
+ adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i),
+ ADI_AXI_REG_CHAN_CTRL_DEFAULTS);
+ }
+
+ return 0;
+}
+
+static void axi_adc_reset(struct adi_axi_adc_state *st)
+{
+ adi_axi_adc_write(st, ADI_AXI_REG_RSTN, 0);
+ mdelay(10);
+ adi_axi_adc_write(st, ADI_AXI_REG_RSTN, ADI_AXI_REG_RSTN_MMCM_RSTN);
+ mdelay(10);
+ adi_axi_adc_write(st, ADI_AXI_REG_RSTN,
+ ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
+}
+
+static void adi_axi_adc_cleanup(void *data)
+{
+ struct adi_axi_adc_client *cl = data;
+
+ put_device(cl->dev);
+ module_put(cl->dev->driver->owner);
+}
+
+static int adi_axi_adc_probe(struct platform_device *pdev)
+{
+ struct adi_axi_adc_conv *conv;
+ struct iio_dev *indio_dev;
+ struct adi_axi_adc_client *cl;
+ struct adi_axi_adc_state *st;
+ unsigned int ver;
+ int ret;
+
+ cl = adi_axi_adc_attach_client(&pdev->dev);
+ if (IS_ERR(cl))
+ return PTR_ERR(cl);
+
+ ret = devm_add_action_or_reset(&pdev->dev, adi_axi_adc_cleanup, cl);
+ if (ret)
+ return ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->client = cl;
+ cl->state = st;
+ mutex_init(&st->lock);
+
+ st->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(st->regs))
+ return PTR_ERR(st->regs);
+
+ conv = &st->client->conv;
+
+ axi_adc_reset(st);
+
+ ver = adi_axi_adc_read(st, ADI_AXI_REG_VERSION);
+
+ if (cl->info->version > ver) {
+ dev_err(&pdev->dev,
+ "IP core version is too old. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
+ ADI_AXI_PCORE_VER_MAJOR(cl->info->version),
+ ADI_AXI_PCORE_VER_MINOR(cl->info->version),
+ ADI_AXI_PCORE_VER_PATCH(cl->info->version),
+ ADI_AXI_PCORE_VER_MAJOR(ver),
+ ADI_AXI_PCORE_VER_MINOR(ver),
+ ADI_AXI_PCORE_VER_PATCH(ver));
+ return -ENODEV;
+ }
+
+ indio_dev->info = &adi_axi_adc_info;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = "adi-axi-adc";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = conv->chip_info->num_channels;
+ indio_dev->channels = conv->chip_info->channels;
+
+ ret = adi_axi_adc_config_dma_buffer(&pdev->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = adi_axi_adc_setup_channels(&pdev->dev, st);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "AXI ADC IP core (%d.%.2d.%c) probed\n",
+ ADI_AXI_PCORE_VER_MAJOR(ver),
+ ADI_AXI_PCORE_VER_MINOR(ver),
+ ADI_AXI_PCORE_VER_PATCH(ver));
+
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id adi_axi_adc_of_match[] = {
+ { .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info },
+ { /* end of list */ }
+};
+MODULE_DEVICE_TABLE(of, adi_axi_adc_of_match);
+
+static struct platform_driver adi_axi_adc_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = adi_axi_adc_of_match,
+ },
+ .probe = adi_axi_adc_probe,
+};
+module_platform_driver(adi_axi_adc_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices Generic AXI ADC IP core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 9d96f7d08b95..9abbbdcc7420 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
@@ -100,6 +101,8 @@
#define AT91_SAMA5D2_IER_YRDY BIT(21)
/* Interrupt Enable Register - TS pressure measurement ready */
#define AT91_SAMA5D2_IER_PRDY BIT(22)
+/* Interrupt Enable Register - Data ready */
+#define AT91_SAMA5D2_IER_DRDY BIT(24)
/* Interrupt Enable Register - general overrun error */
#define AT91_SAMA5D2_IER_GOVRE BIT(25)
/* Interrupt Enable Register - Pen detect */
@@ -486,6 +489,21 @@ static inline int at91_adc_of_xlate(struct iio_dev *indio_dev,
return at91_adc_chan_xlate(indio_dev, iiospec->args[0]);
}
+static unsigned int at91_adc_active_scan_mask_to_reg(struct iio_dev *indio_dev)
+{
+ u32 mask = 0;
+ u8 bit;
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->num_channels) {
+ struct iio_chan_spec const *chan =
+ at91_adc_chan_get(indio_dev, bit);
+ mask |= BIT(chan->channel);
+ }
+
+ return mask & GENMASK(11, 0);
+}
+
static void at91_adc_config_emr(struct at91_adc_state *st)
{
/* configure the extended mode register */
@@ -710,7 +728,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
struct iio_dev *indio = iio_trigger_get_drvdata(trig);
struct at91_adc_state *st = iio_priv(indio);
u32 status = at91_adc_readl(st, AT91_SAMA5D2_TRGR);
- u8 bit;
/* clear TRGMOD */
status &= ~AT91_SAMA5D2_TRGR_TRGMOD_MASK;
@@ -721,50 +738,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
/* set/unset hw trigger */
at91_adc_writel(st, AT91_SAMA5D2_TRGR, status);
- for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
- struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
- u32 cor;
-
- if (!chan)
- continue;
- /* these channel types cannot be handled by this trigger */
- if (chan->type == IIO_POSITIONRELATIVE ||
- chan->type == IIO_PRESSURE)
- continue;
-
- if (state) {
- cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
-
- if (chan->differential)
- cor |= (BIT(chan->channel) |
- BIT(chan->channel2)) <<
- AT91_SAMA5D2_COR_DIFF_OFFSET;
- else
- cor &= ~(BIT(chan->channel) <<
- AT91_SAMA5D2_COR_DIFF_OFFSET);
-
- at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
- }
-
- if (state) {
- at91_adc_writel(st, AT91_SAMA5D2_CHER,
- BIT(chan->channel));
- /* enable irq only if not using DMA */
- if (!st->dma_st.dma_chan) {
- at91_adc_writel(st, AT91_SAMA5D2_IER,
- BIT(chan->channel));
- }
- } else {
- /* disable irq only if not using DMA */
- if (!st->dma_st.dma_chan) {
- at91_adc_writel(st, AT91_SAMA5D2_IDR,
- BIT(chan->channel));
- }
- at91_adc_writel(st, AT91_SAMA5D2_CHDR,
- BIT(chan->channel));
- }
- }
-
return 0;
}
@@ -781,6 +754,7 @@ static int at91_adc_reenable_trigger(struct iio_trigger *trig)
/* Needed to ACK the DRDY interruption */
at91_adc_readl(st, AT91_SAMA5D2_LCDR);
+
return 0;
}
@@ -888,18 +862,37 @@ static int at91_adc_dma_start(struct iio_dev *indio_dev)
return 0;
}
-static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
+static bool at91_adc_buffer_check_use_irq(struct iio_dev *indio,
+ struct at91_adc_state *st)
+{
+ /* if using DMA, we do not use our own IRQ (we use DMA-controller) */
+ if (st->dma_st.dma_chan)
+ return false;
+ /* if the trigger is not ours, then it has its own IRQ */
+ if (iio_trigger_validate_own_device(indio->trig, indio))
+ return false;
+ return true;
+}
+
+static bool at91_adc_current_chan_is_touch(struct iio_dev *indio_dev)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ return !!bitmap_subset(indio_dev->active_scan_mask,
+ &st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1);
+}
+
+static int at91_adc_buffer_preenable(struct iio_dev *indio_dev)
{
int ret;
+ u8 bit;
struct at91_adc_state *st = iio_priv(indio_dev);
/* check if we are enabling triggered buffer or the touchscreen */
- if (bitmap_subset(indio_dev->active_scan_mask,
- &st->touch_st.channels_bitmask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
- /* touchscreen enabling */
+ if (at91_adc_current_chan_is_touch(indio_dev))
return at91_adc_configure_touch(st, true);
- }
+
/* if we are not in triggered mode, we cannot enable the buffer. */
if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
@@ -911,41 +904,65 @@ static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
return ret;
}
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->num_channels) {
+ struct iio_chan_spec const *chan =
+ at91_adc_chan_get(indio_dev, bit);
+ u32 cor;
+
+ if (!chan)
+ continue;
+ /* these channel types cannot be handled by this trigger */
+ if (chan->type == IIO_POSITIONRELATIVE ||
+ chan->type == IIO_PRESSURE)
+ continue;
+
+ cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
+
+ if (chan->differential)
+ cor |= (BIT(chan->channel) | BIT(chan->channel2)) <<
+ AT91_SAMA5D2_COR_DIFF_OFFSET;
+ else
+ cor &= ~(BIT(chan->channel) <<
+ AT91_SAMA5D2_COR_DIFF_OFFSET);
+
+ at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
+
+ at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
+ }
+
+ if (at91_adc_buffer_check_use_irq(indio_dev, st))
+ at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_DRDY);
+
+ return 0;
+}
+
+static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
+{
+ if (at91_adc_current_chan_is_touch(indio_dev))
+ return 0;
+
return iio_triggered_buffer_postenable(indio_dev);
}
-static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
+static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
{
struct at91_adc_state *st = iio_priv(indio_dev);
- int ret;
u8 bit;
/* check if we are disabling triggered buffer or the touchscreen */
- if (bitmap_subset(indio_dev->active_scan_mask,
- &st->touch_st.channels_bitmask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
- /* touchscreen disable */
+ if (at91_adc_current_chan_is_touch(indio_dev))
return at91_adc_configure_touch(st, false);
- }
+
/* if we are not in triggered mode, nothing to do here */
if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
- /* continue with the triggered buffer */
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret < 0)
- dev_err(&indio_dev->dev, "buffer predisable failed\n");
-
- if (!st->dma_st.dma_chan)
- return ret;
-
- /* if we are using DMA we must clear registers and end DMA */
- dmaengine_terminate_sync(st->dma_st.dma_chan);
-
/*
- * For each enabled channel we must read the last converted value
+ * For each enable channel we must disable it in hardware.
+ * In the case of DMA, we must read the last converted value
* to clear EOC status and not get a possible interrupt later.
- * This value is being read by DMA from LCDR anyway
+ * This value is being read by DMA from LCDR anyway, so it's not lost.
*/
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->num_channels) {
@@ -958,16 +975,37 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
if (chan->type == IIO_POSITIONRELATIVE ||
chan->type == IIO_PRESSURE)
continue;
+
+ at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+
if (st->dma_st.dma_chan)
at91_adc_readl(st, chan->address);
}
+ if (at91_adc_buffer_check_use_irq(indio_dev, st))
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, AT91_SAMA5D2_IER_DRDY);
+
/* read overflow register to clear possible overflow status */
at91_adc_readl(st, AT91_SAMA5D2_OVER);
- return ret;
+
+ /* if we are using DMA we must clear registers and end DMA */
+ if (st->dma_st.dma_chan)
+ dmaengine_terminate_sync(st->dma_st.dma_chan);
+
+ return 0;
+}
+
+static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
+{
+ if (at91_adc_current_chan_is_touch(indio_dev))
+ return 0;
+
+ return iio_triggered_buffer_predisable(indio_dev);
}
static const struct iio_buffer_setup_ops at91_buffer_setup_ops = {
+ .preenable = &at91_adc_buffer_preenable,
+ .postdisable = &at91_adc_buffer_postdisable,
.postenable = &at91_adc_buffer_postenable,
.predisable = &at91_adc_buffer_predisable,
};
@@ -1015,6 +1053,22 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
int i = 0;
int val;
u8 bit;
+ u32 mask = at91_adc_active_scan_mask_to_reg(indio_dev);
+ unsigned int timeout = 50;
+
+ /*
+ * Check if the conversion is ready. If not, wait a little bit, and
+ * in case of timeout exit with an error.
+ */
+ while ((at91_adc_readl(st, AT91_SAMA5D2_ISR) & mask) != mask &&
+ timeout) {
+ usleep_range(50, 100);
+ timeout--;
+ }
+
+ /* Cannot read data, not ready. Continue without reporting data */
+ if (!timeout)
+ return;
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->num_channels) {
@@ -1102,6 +1156,13 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct at91_adc_state *st = iio_priv(indio_dev);
+ /*
+ * If it's not our trigger, start a conversion now, as we are
+ * actually polling the trigger now.
+ */
+ if (iio_trigger_validate_own_device(indio_dev->trig, indio_dev))
+ at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
+
if (st->dma_st.dma_chan)
at91_adc_trigger_handler_dma(indio_dev);
else
@@ -1114,20 +1175,9 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
static int at91_adc_buffer_init(struct iio_dev *indio)
{
- struct at91_adc_state *st = iio_priv(indio);
-
- if (st->selected_trig->hw_trig) {
- return devm_iio_triggered_buffer_setup(&indio->dev, indio,
- &iio_pollfunc_store_time,
- &at91_adc_trigger_handler, &at91_buffer_setup_ops);
- }
- /*
- * we need to prepare the buffer ops in case we will get
- * another buffer attached (like a callback buffer for the touchscreen)
- */
- indio->setup_ops = &at91_buffer_setup_ops;
-
- return 0;
+ return devm_iio_triggered_buffer_setup(&indio->dev, indio,
+ &iio_pollfunc_store_time,
+ &at91_adc_trigger_handler, &at91_buffer_setup_ops);
}
static unsigned at91_adc_startup_time(unsigned startup_time_min,
@@ -1281,7 +1331,8 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
status = at91_adc_readl(st, AT91_SAMA5D2_XPOSR);
status = at91_adc_readl(st, AT91_SAMA5D2_YPOSR);
status = at91_adc_readl(st, AT91_SAMA5D2_PRESSR);
- } else if (iio_buffer_enabled(indio) && !st->dma_st.dma_chan) {
+ } else if (iio_buffer_enabled(indio) &&
+ (status & AT91_SAMA5D2_IER_DRDY)) {
/* triggered buffer without DMA */
disable_irq_nosync(irq);
iio_trigger_poll(indio->trig);
@@ -1901,14 +1952,10 @@ static __maybe_unused int at91_adc_resume(struct device *dev)
return 0;
/* check if we are enabling triggered buffer or the touchscreen */
- if (bitmap_subset(indio_dev->active_scan_mask,
- &st->touch_st.channels_bitmask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
- /* touchscreen enabling */
+ if (at91_adc_current_chan_is_touch(indio_dev))
return at91_adc_configure_touch(st, true);
- } else {
+ else
return at91_adc_configure_trigger(st->trig, true);
- }
/* not needed but more explicit */
return 0;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index abe99856c823..0368b6dc6d60 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -1152,7 +1152,6 @@ static int at91_adc_probe(struct platform_device *pdev)
int ret;
struct iio_dev *idev;
struct at91_adc_state *st;
- struct resource *res;
u32 reg;
idev = devm_iio_device_alloc(&pdev->dev, sizeof(struct at91_adc_state));
@@ -1182,9 +1181,7 @@ static int at91_adc_probe(struct platform_device *pdev)
if (st->irq < 0)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- st->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ st->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(st->reg_base))
return PTR_ERR(st->reg_base);
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 22131a677445..6bda4f4d89fe 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -449,9 +449,6 @@ static void exynos_adc_exynos7_init_hw(struct exynos_adc *info)
{
u32 con1, con2;
- if (info->data->needs_adc_phy)
- regmap_write(info->pmu_map, info->data->phy_offset, 1);
-
con1 = ADC_V2_CON1_SOFT_RESET;
writel(con1, ADC_V2_CON1(info->regs));
@@ -531,8 +528,19 @@ static int exynos_read_raw(struct iio_dev *indio_dev,
unsigned long timeout;
int ret;
- if (mask != IIO_CHAN_INFO_RAW)
+ if (mask == IIO_CHAN_INFO_SCALE) {
+ ret = regulator_get_voltage(info->vdd);
+ if (ret < 0)
+ return ret;
+
+ /* Regulator voltage is in uV, but need mV */
+ *val = ret / 1000;
+ *val2 = info->data->mask;
+
+ return IIO_VAL_FRACTIONAL;
+ } else if (mask != IIO_CHAN_INFO_RAW) {
return -EINVAL;
+ }
mutex_lock(&indio_dev->mlock);
reinit_completion(&info->completion);
@@ -683,6 +691,7 @@ static const struct iio_info exynos_adc_iio_info = {
.channel = _index, \
.address = _index, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
.datasheet_name = _id, \
}
diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
index fa71489195c6..b0a4dc88ba9b 100644
--- a/drivers/iio/adc/fsl-imx25-gcq.c
+++ b/drivers/iio/adc/fsl-imx25-gcq.c
@@ -294,7 +294,6 @@ static int mx25_gcq_probe(struct platform_device *pdev)
struct mx25_gcq_priv *priv;
struct mx25_tsadc *tsadc = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
- struct resource *res;
void __iomem *mem;
int ret;
int i;
@@ -305,8 +304,7 @@ static int mx25_gcq_probe(struct platform_device *pdev)
priv = iio_priv(indio_dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem = devm_ioremap_resource(dev, res);
+ mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c
index c35a1beb817c..a6d2e1f27e76 100644
--- a/drivers/iio/adc/intel_mrfld_adc.c
+++ b/drivers/iio/adc/intel_mrfld_adc.c
@@ -75,7 +75,7 @@ static int mrfld_adc_single_conv(struct iio_dev *indio_dev,
struct regmap *regmap = adc->regmap;
unsigned int req;
long timeout;
- u8 buf[2];
+ __be16 value;
int ret;
reinit_completion(&adc->completion);
@@ -105,11 +105,11 @@ static int mrfld_adc_single_conv(struct iio_dev *indio_dev,
goto done;
}
- ret = regmap_bulk_read(regmap, chan->address, buf, 2);
+ ret = regmap_bulk_read(regmap, chan->address, &value, sizeof(value));
if (ret)
goto done;
- *result = get_unaligned_be16(buf);
+ *result = be16_to_cpu(value);
ret = IIO_VAL_INT;
done:
diff --git a/drivers/iio/adc/max1241.c b/drivers/iio/adc/max1241.c
new file mode 100644
index 000000000000..541939c7abca
--- /dev/null
+++ b/drivers/iio/adc/max1241.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MAX1241 low-power, 12-bit serial ADC
+ *
+ * Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX1240-MAX1241.pdf
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#define MAX1241_VAL_MASK GENMASK(11, 0)
+#define MAX1241_SHUTDOWN_DELAY_USEC 4
+
+enum max1241_id {
+ max1241,
+};
+
+struct max1241 {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct regulator *vdd;
+ struct regulator *vref;
+ struct gpio_desc *shutdown;
+
+ __be16 data ____cacheline_aligned;
+};
+
+static const struct iio_chan_spec max1241_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int max1241_read(struct max1241 *adc)
+{
+ struct spi_transfer xfers[] = {
+ /*
+ * Begin conversion by bringing /CS low for at least
+ * tconv us.
+ */
+ {
+ .len = 0,
+ .delay.value = 8,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
+ },
+ /*
+ * Then read two bytes of data in our RX buffer.
+ */
+ {
+ .rx_buf = &adc->data,
+ .len = 2,
+ },
+ };
+
+ return spi_sync_transfer(adc->spi, xfers, ARRAY_SIZE(xfers));
+}
+
+static int max1241_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret, vref_uV;
+ struct max1241 *adc = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+
+ if (adc->shutdown) {
+ gpiod_set_value(adc->shutdown, 0);
+ udelay(MAX1241_SHUTDOWN_DELAY_USEC);
+ ret = max1241_read(adc);
+ gpiod_set_value(adc->shutdown, 1);
+ } else
+ ret = max1241_read(adc);
+
+ if (ret) {
+ mutex_unlock(&adc->lock);
+ return ret;
+ }
+
+ *val = (be16_to_cpu(adc->data) >> 3) & MAX1241_VAL_MASK;
+
+ mutex_unlock(&adc->lock);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ vref_uV = regulator_get_voltage(adc->vref);
+
+ if (vref_uV < 0)
+ return vref_uV;
+
+ *val = vref_uV / 1000;
+ *val2 = 12;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info max1241_info = {
+ .read_raw = max1241_read_raw,
+};
+
+static void max1241_disable_vdd_action(void *data)
+{
+ struct max1241 *adc = data;
+ struct device *dev = &adc->spi->dev;
+ int err;
+
+ err = regulator_disable(adc->vdd);
+ if (err)
+ dev_err(dev, "could not disable vdd regulator.\n");
+}
+
+static void max1241_disable_vref_action(void *data)
+{
+ struct max1241 *adc = data;
+ struct device *dev = &adc->spi->dev;
+ int err;
+
+ err = regulator_disable(adc->vref);
+ if (err)
+ dev_err(dev, "could not disable vref regulator.\n");
+}
+
+static int max1241_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct max1241 *adc;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->spi = spi;
+ mutex_init(&adc->lock);
+
+ spi_set_drvdata(spi, indio_dev);
+
+ adc->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(adc->vdd)) {
+ dev_err(dev, "failed to get vdd regulator\n");
+ return PTR_ERR(adc->vdd);
+ }
+
+ ret = regulator_enable(adc->vdd);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, max1241_disable_vdd_action, adc);
+ if (ret) {
+ dev_err(dev, "could not set up vdd regulator cleanup action\n");
+ return ret;
+ }
+
+ adc->vref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(adc->vref)) {
+ dev_err(dev, "failed to get vref regulator\n");
+ return PTR_ERR(adc->vref);
+ }
+
+ ret = regulator_enable(adc->vref);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, max1241_disable_vref_action, adc);
+ if (ret) {
+ dev_err(dev, "could not set up vref regulator cleanup action\n");
+ return ret;
+ }
+
+ adc->shutdown = devm_gpiod_get_optional(dev, "shutdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(adc->shutdown))
+ return PTR_ERR(adc->shutdown);
+
+ if (adc->shutdown)
+ dev_dbg(dev, "shutdown pin passed, low-power mode enabled");
+ else
+ dev_dbg(dev, "no shutdown pin passed, low-power mode disabled");
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &max1241_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = max1241_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max1241_channels);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct spi_device_id max1241_id[] = {
+ { "max1241", max1241 },
+ {}
+};
+
+static const struct of_device_id max1241_dt_ids[] = {
+ { .compatible = "maxim,max1241" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, max1241_dt_ids);
+
+static struct spi_driver max1241_spi_driver = {
+ .driver = {
+ .name = "max1241",
+ .of_match_table = max1241_dt_ids,
+ },
+ .probe = max1241_probe,
+ .id_table = max1241_id,
+};
+module_spi_driver(max1241_spi_driver);
+
+MODULE_AUTHOR("Alexandru Lazar <alazar@startmail.com>");
+MODULE_DESCRIPTION("MAX1241 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 5c2cc61b666e..9d92017c79b2 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -150,6 +150,7 @@ struct max1363_chip_info {
* @current_mode: the scan mode of this chip
* @requestedmask: a valid requested set of channels
* @reg: supply regulator
+ * @lock lock to ensure state is consistent
* @monitor_on: whether monitor mode is enabled
* @monitor_speed: parameter corresponding to device monitor speed setting
* @mask_high: bitmask for enabled high thresholds
@@ -169,6 +170,7 @@ struct max1363_state {
const struct max1363_mode *current_mode;
u32 requestedmask;
struct regulator *reg;
+ struct mutex lock;
/* Using monitor modes and buffer at the same time is
currently not supported */
@@ -364,7 +366,11 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
struct max1363_state *st = iio_priv(indio_dev);
struct i2c_client *client = st->client;
- mutex_lock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
+
/*
* If monitor mode is enabled, the method for reading a single
* channel will have to be rather different and has not yet
@@ -372,7 +378,7 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
*
* Also, cannot read directly if buffered capture enabled.
*/
- if (st->monitor_on || iio_buffer_enabled(indio_dev)) {
+ if (st->monitor_on) {
ret = -EBUSY;
goto error_ret;
}
@@ -404,8 +410,10 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
data = rxbuf[0];
}
*val = data;
+
error_ret:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -705,9 +713,9 @@ static ssize_t max1363_monitor_store_freq(struct device *dev,
if (!found)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->monitor_speed = i;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return 0;
}
@@ -810,12 +818,12 @@ static int max1363_read_event_config(struct iio_dev *indio_dev,
int val;
int number = chan->channel;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (dir == IIO_EV_DIR_FALLING)
val = (1 << number) & st->mask_low;
else
val = (1 << number) & st->mask_high;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return val;
}
@@ -962,7 +970,11 @@ static int max1363_write_event_config(struct iio_dev *indio_dev,
u16 unifiedmask;
int number = chan->channel;
- mutex_lock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
+
unifiedmask = st->mask_low | st->mask_high;
if (dir == IIO_EV_DIR_FALLING) {
@@ -989,7 +1001,8 @@ static int max1363_write_event_config(struct iio_dev *indio_dev,
max1363_monitor_mode_update(st, !!(st->mask_high | st->mask_low));
error_ret:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -1587,6 +1600,7 @@ static int max1363_probe(struct i2c_client *client,
st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
st->reg = devm_regulator_get(&client->dev, "vcc");
if (IS_ERR(st->reg)) {
ret = PTR_ERR(st->reg);
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index ea24d7c58b12..d86c0b5d80a3 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/sysfs.h>
#include <linux/of.h>
+#include <asm/unaligned.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -117,11 +118,11 @@ static int mcp3422_read(struct mcp3422 *adc, int *value, u8 *config)
if (sample_rate == MCP3422_SRATE_3) {
ret = i2c_master_recv(adc->i2c, buf, 4);
- temp = buf[0] << 16 | buf[1] << 8 | buf[2];
+ temp = get_unaligned_be24(&buf[0]);
*config = buf[3];
} else {
ret = i2c_master_recv(adc->i2c, buf, 3);
- temp = buf[0] << 8 | buf[1];
+ temp = get_unaligned_be16(&buf[0]);
*config = buf[2];
}
diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c
new file mode 100644
index 000000000000..331a9a728217
--- /dev/null
+++ b/drivers/iio/adc/mp2629_adc.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MP2629 Driver for ADC
+ *
+ * Copyright 2020 Monolithic Power Systems, Inc
+ *
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#include <linux/iio/driver.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/mfd/mp2629.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MP2629_REG_ADC_CTRL 0x03
+#define MP2629_REG_BATT_VOLT 0x0e
+#define MP2629_REG_SYSTEM_VOLT 0x0f
+#define MP2629_REG_INPUT_VOLT 0x11
+#define MP2629_REG_BATT_CURRENT 0x12
+#define MP2629_REG_INPUT_CURRENT 0x13
+
+#define MP2629_ADC_START BIT(7)
+#define MP2629_ADC_CONTINUOUS BIT(6)
+
+#define MP2629_MAP(_mp, _mpc) IIO_MAP(#_mp, "mp2629_charger", "mp2629-"_mpc)
+
+#define MP2629_ADC_CHAN(_ch, _type) { \
+ .type = _type, \
+ .indexed = 1, \
+ .datasheet_name = #_ch, \
+ .channel = MP2629_ ## _ch, \
+ .address = MP2629_REG_ ## _ch, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+struct mp2629_adc {
+ struct regmap *regmap;
+ struct device *dev;
+};
+
+static struct iio_chan_spec mp2629_channels[] = {
+ MP2629_ADC_CHAN(BATT_VOLT, IIO_VOLTAGE),
+ MP2629_ADC_CHAN(SYSTEM_VOLT, IIO_VOLTAGE),
+ MP2629_ADC_CHAN(INPUT_VOLT, IIO_VOLTAGE),
+ MP2629_ADC_CHAN(BATT_CURRENT, IIO_CURRENT),
+ MP2629_ADC_CHAN(INPUT_CURRENT, IIO_CURRENT)
+};
+
+static struct iio_map mp2629_adc_maps[] = {
+ MP2629_MAP(BATT_VOLT, "batt-volt"),
+ MP2629_MAP(SYSTEM_VOLT, "system-volt"),
+ MP2629_MAP(INPUT_VOLT, "input-volt"),
+ MP2629_MAP(BATT_CURRENT, "batt-current"),
+ MP2629_MAP(INPUT_CURRENT, "input-current")
+};
+
+static int mp2629_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mp2629_adc *info = iio_priv(indio_dev);
+ unsigned int rval;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(info->regmap, chan->address, &rval);
+ if (ret)
+ return ret;
+
+ if (chan->address == MP2629_INPUT_VOLT)
+ rval &= GENMASK(6, 0);
+ *val = rval;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->channel) {
+ case MP2629_BATT_VOLT:
+ case MP2629_SYSTEM_VOLT:
+ *val = 20;
+ return IIO_VAL_INT;
+
+ case MP2629_INPUT_VOLT:
+ *val = 60;
+ return IIO_VAL_INT;
+
+ case MP2629_BATT_CURRENT:
+ *val = 175;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL;
+
+ case MP2629_INPUT_CURRENT:
+ *val = 133;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mp2629_adc_info = {
+ .read_raw = &mp2629_read_raw,
+};
+
+static int mp2629_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mp2629_data *ddata = dev_get_drvdata(dev->parent);
+ struct mp2629_adc *info;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ info = iio_priv(indio_dev);
+ info->regmap = ddata->regmap;
+ info->dev = dev;
+ platform_set_drvdata(pdev, indio_dev);
+
+ ret = regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_START | MP2629_ADC_CONTINUOUS,
+ MP2629_ADC_START | MP2629_ADC_CONTINUOUS);
+ if (ret) {
+ dev_err(dev, "adc enable fail: %d\n", ret);
+ return ret;
+ }
+
+ ret = iio_map_array_register(indio_dev, mp2629_adc_maps);
+ if (ret) {
+ dev_err(dev, "IIO maps register fail: %d\n", ret);
+ goto fail_disable;
+ }
+
+ indio_dev->name = "mp2629-adc";
+ indio_dev->dev.parent = dev;
+ indio_dev->channels = mp2629_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mp2629_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &mp2629_adc_info;
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "IIO device register fail: %d\n", ret);
+ goto fail_map_unregister;
+ }
+
+ return 0;
+
+fail_map_unregister:
+ iio_map_array_unregister(indio_dev);
+
+fail_disable:
+ regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_CONTINUOUS, 0);
+ regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_START, 0);
+
+ return ret;
+}
+
+static int mp2629_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct mp2629_adc *info = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ iio_map_array_unregister(indio_dev);
+
+ regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_CONTINUOUS, 0);
+ regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_START, 0);
+
+ return 0;
+}
+
+static const struct of_device_id mp2629_adc_of_match[] = {
+ { .compatible = "mps,mp2629_adc"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2629_adc_of_match);
+
+static struct platform_driver mp2629_adc_driver = {
+ .driver = {
+ .name = "mp2629_adc",
+ .of_match_table = mp2629_adc_of_match,
+ },
+ .probe = mp2629_adc_probe,
+ .remove = mp2629_adc_remove,
+};
+module_platform_driver(mp2629_adc_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MP2629 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 2df88d2b880a..0e2068ec068b 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -65,12 +65,14 @@ struct stm32_adc_priv;
* @clk_sel: clock selection routine
* @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
* @has_syscfg: SYSCFG capability flags
+ * @num_irqs: number of interrupt lines
*/
struct stm32_adc_priv_cfg {
const struct stm32_adc_common_regs *regs;
int (*clk_sel)(struct platform_device *, struct stm32_adc_priv *);
u32 max_clk_rate_hz;
unsigned int has_syscfg;
+ unsigned int num_irqs;
};
/**
@@ -375,21 +377,15 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
unsigned int i;
- for (i = 0; i < STM32_ADC_MAX_ADCS; i++) {
+ /*
+ * Interrupt(s) must be provided, depending on the compatible:
+ * - stm32f4/h7 shares a common interrupt line.
+ * - stm32mp1, has one line per ADC
+ */
+ for (i = 0; i < priv->cfg->num_irqs; i++) {
priv->irq[i] = platform_get_irq(pdev, i);
- if (priv->irq[i] < 0) {
- /*
- * At least one interrupt must be provided, make others
- * optional:
- * - stm32f4/h7 shares a common interrupt.
- * - stm32mp1, has one line per ADC (either for ADC1,
- * ADC2 or both).
- */
- if (i && priv->irq[i] == -ENXIO)
- continue;
-
+ if (priv->irq[i] < 0)
return priv->irq[i];
- }
}
priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0,
@@ -400,9 +396,7 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
return -ENOMEM;
}
- for (i = 0; i < STM32_ADC_MAX_ADCS; i++) {
- if (priv->irq[i] < 0)
- continue;
+ for (i = 0; i < priv->cfg->num_irqs; i++) {
irq_set_chained_handler(priv->irq[i], stm32_adc_irq_handler);
irq_set_handler_data(priv->irq[i], priv);
}
@@ -420,11 +414,8 @@ static void stm32_adc_irq_remove(struct platform_device *pdev,
irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq));
irq_domain_remove(priv->domain);
- for (i = 0; i < STM32_ADC_MAX_ADCS; i++) {
- if (priv->irq[i] < 0)
- continue;
+ for (i = 0; i < priv->cfg->num_irqs; i++)
irq_set_chained_handler(priv->irq[i], NULL);
- }
}
static int stm32_adc_core_switches_supply_en(struct stm32_adc_priv *priv,
@@ -817,6 +808,7 @@ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
.regs = &stm32f4_adc_common_regs,
.clk_sel = stm32f4_adc_clk_sel,
.max_clk_rate_hz = 36000000,
+ .num_irqs = 1,
};
static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
@@ -824,6 +816,7 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
.clk_sel = stm32h7_adc_clk_sel,
.max_clk_rate_hz = 36000000,
.has_syscfg = HAS_VBOOSTER,
+ .num_irqs = 1,
};
static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
@@ -831,6 +824,7 @@ static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
.clk_sel = stm32h7_adc_clk_sel,
.max_clk_rate_hz = 40000000,
.has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD,
+ .num_irqs = 2,
};
static const struct of_device_id stm32_adc_of_match[] = {
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 176e1cb4abb1..0f2c1738a90d 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -496,7 +496,6 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
struct iio_dev *indio_dev)
{
struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
- struct resource *mem;
void __iomem *base;
int ret;
@@ -508,8 +507,7 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
indio_dev->num_channels = ARRAY_SIZE(sun8i_a33_gpadc_channels);
indio_dev->channels = sun8i_a33_gpadc_channels;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, mem);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index 552c2be8d87a..f1ee3b1e2827 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -22,6 +22,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
/* Commands */
#define ADS124S08_CMD_NOP 0x00
#define ADS124S08_CMD_WAKEUP 0x02
@@ -188,7 +190,6 @@ static int ads124s_read(struct iio_dev *indio_dev, unsigned int chan)
{
struct ads124s_private *priv = iio_priv(indio_dev);
int ret;
- u32 tmp;
struct spi_transfer t[] = {
{
.tx_buf = &priv->data[0],
@@ -208,9 +209,7 @@ static int ads124s_read(struct iio_dev *indio_dev, unsigned int chan)
if (ret < 0)
return ret;
- tmp = priv->data[2] << 16 | priv->data[3] << 8 | priv->data[4];
-
- return tmp;
+ return get_unaligned_be24(&priv->data[2]);
}
static int ads124s_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 6fd06e4eff73..d7fecab9252e 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -3,7 +3,7 @@
* Xilinx XADC driver
*
* Copyright 2013-2014 Analog Devices Inc.
- * Author: Lars-Peter Clauen <lars@metafoo.de>
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
*
* Documentation for the parts can be found at:
* - XADC hardmacro: Xilinx UG480
@@ -663,7 +663,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
mutex_lock(&xadc->mutex);
if (state) {
- /* Only one of the two triggers can be active at the a time. */
+ /* Only one of the two triggers can be active at a time. */
if (xadc->trigger != NULL) {
ret = -EBUSY;
goto err_out;
diff --git a/drivers/iio/adc/xilinx-xadc-events.c b/drivers/iio/adc/xilinx-xadc-events.c
index dbfd5da290a4..2357f585720a 100644
--- a/drivers/iio/adc/xilinx-xadc-events.c
+++ b/drivers/iio/adc/xilinx-xadc-events.c
@@ -3,7 +3,7 @@
* Xilinx XADC driver
*
* Copyright 2013 Analog Devices Inc.
- * Author: Lars-Peter Clauen <lars@metafoo.de>
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
*/
#include <linux/iio/events.h>
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index 4017f18b0a4f..25abed9c0285 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -3,7 +3,7 @@
* Xilinx XADC driver
*
* Copyright 2013 Analog Devices Inc.
- * Author: Lars-Peter Clauen <lars@metafoo.de>
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
*/
#ifndef __IIO_XILINX_XADC__
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index a74bd9c0587c..d348af8b9705 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -12,7 +12,6 @@
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/poll.h>
-#include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>
#include <linux/iio/buffer-dma.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index b129693af0fd..6dedf12b69a4 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -134,7 +134,7 @@ static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(indio_dev->buffer);
- return sprintf(buf, "%u\n", dmaengine_buffer->align);
+ return sprintf(buf, "%zu\n", dmaengine_buffer->align);
}
static IIO_DEVICE_ATTR(length_align_bytes, 0444,
@@ -229,6 +229,45 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
}
EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
+static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res)
+{
+ iio_dmaengine_buffer_free(*(struct iio_buffer **)res);
+}
+
+/**
+ * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
+ * @dev: Parent device for the buffer
+ * @channel: DMA channel name, typically "rx".
+ *
+ * This allocates a new IIO buffer which internally uses the DMAengine framework
+ * to perform its transfers. The parent device will be used to request the DMA
+ * channel.
+ *
+ * The buffer will be automatically de-allocated once the device gets destroyed.
+ */
+struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
+ const char *channel)
+{
+ struct iio_buffer **bufferp, *buffer;
+
+ bufferp = devres_alloc(__devm_iio_dmaengine_buffer_free,
+ sizeof(*bufferp), GFP_KERNEL);
+ if (!bufferp)
+ return ERR_PTR(-ENOMEM);
+
+ buffer = iio_dmaengine_buffer_alloc(dev, channel);
+ if (IS_ERR(buffer)) {
+ devres_free(bufferp);
+ return buffer;
+ }
+
+ *bufferp = buffer;
+ devres_add(dev, bufferp);
+
+ return buffer;
+}
+EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_alloc);
+
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
index 95165697d8ae..f2d27788f666 100644
--- a/drivers/iio/buffer/industrialio-hw-consumer.c
+++ b/drivers/iio/buffer/industrialio-hw-consumer.c
@@ -142,17 +142,6 @@ static void devm_iio_hw_consumer_release(struct device *dev, void *res)
iio_hw_consumer_free(*(struct iio_hw_consumer **)res);
}
-static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
-{
- struct iio_hw_consumer **r = res;
-
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
- return *r == data;
-}
-
/**
* devm_iio_hw_consumer_alloc - Resource-managed iio_hw_consumer_alloc()
* @dev: Pointer to consumer device.
@@ -160,9 +149,6 @@ static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
* Managed iio_hw_consumer_alloc. iio_hw_consumer allocated with this function
* is automatically freed on driver detach.
*
- * If an iio_hw_consumer allocated with this function needs to be freed
- * separately, devm_iio_hw_consumer_free() must be used.
- *
* returns pointer to allocated iio_hw_consumer on success, NULL on failure.
*/
struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
@@ -187,23 +173,6 @@ struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_alloc);
/**
- * devm_iio_hw_consumer_free - Resource-managed iio_hw_consumer_free()
- * @dev: Pointer to consumer device.
- * @hwc: iio_hw_consumer to free.
- *
- * Free iio_hw_consumer allocated with devm_iio_hw_consumer_alloc().
- */
-void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_hw_consumer_release,
- devm_iio_hw_consumer_match, hwc);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_free);
-
-/**
* iio_hw_consumer_enable() - Enable IIO hardware consumer
* @hwc: iio_hw_consumer to enable.
*
diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c
index cb322b2f09cd..e8046c1ecd6b 100644
--- a/drivers/iio/buffer/industrialio-triggered-buffer.c
+++ b/drivers/iio/buffer/industrialio-triggered-buffer.c
@@ -126,17 +126,6 @@ int devm_iio_triggered_buffer_setup(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_setup);
-void devm_iio_triggered_buffer_cleanup(struct device *dev,
- struct iio_dev *indio_dev)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_triggered_buffer_clean,
- devm_iio_device_match, indio_dev);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_cleanup);
-
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("IIO helper functions for setting up triggered buffers");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index 3150f8ab984b..1359abed3b31 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -179,16 +179,6 @@ static void devm_iio_kfifo_release(struct device *dev, void *res)
iio_kfifo_free(*(struct iio_buffer **)res);
}
-static int devm_iio_kfifo_match(struct device *dev, void *res, void *data)
-{
- struct iio_buffer **r = res;
-
- if (WARN_ON(!r || !*r))
- return 0;
-
- return *r == data;
-}
-
/**
* devm_iio_fifo_allocate - Resource-managed iio_kfifo_allocate()
* @dev: Device to allocate kfifo buffer for
@@ -216,16 +206,4 @@ struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
}
EXPORT_SYMBOL(devm_iio_kfifo_allocate);
-/**
- * devm_iio_fifo_free - Resource-managed iio_kfifo_free()
- * @dev: Device the buffer belongs to
- * @r: The buffer associated with the device
- */
-void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r)
-{
- WARN_ON(devres_release(dev, devm_iio_kfifo_release,
- devm_iio_kfifo_match, r));
-}
-EXPORT_SYMBOL(devm_iio_kfifo_free);
-
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index a7e65a59bf42..7f21afd73b1c 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -22,6 +22,17 @@ config ATLAS_PH_SENSOR
To compile this driver as module, choose M here: the
module will be called atlas-ph-sensor.
+config ATLAS_EZO_SENSOR
+ tristate "Atlas Scientific EZO sensors"
+ depends on I2C
+ help
+ Say Y here to build I2C interface support for the following
+ Atlas Scientific EZO sensors
+ * CO2 EZO Sensor
+
+ To compile this driver as module, choose M here: the
+ module will be called atlas-ezo-sensor.
+
config BME680
tristate "Bosch Sensortec BME680 sensor driver"
depends on (I2C || SPI)
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index 33d3a595dda9..aba4167db745 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -5,6 +5,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-sensor.o
+obj-$(CONFIG_ATLAS_EZO_SENSOR) += atlas-ezo-sensor.o
obj-$(CONFIG_BME680) += bme680_core.o
obj-$(CONFIG_BME680_I2C) += bme680_i2c.o
obj-$(CONFIG_BME680_SPI) += bme680_spi.o
diff --git a/drivers/iio/chemical/atlas-ezo-sensor.c b/drivers/iio/chemical/atlas-ezo-sensor.c
new file mode 100644
index 000000000000..f5a6d8ec6d4d
--- /dev/null
+++ b/drivers/iio/chemical/atlas-ezo-sensor.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * atlas-ezo-sensor.c - Support for Atlas Scientific EZO sensors
+ *
+ * Copyright (C) 2020 Konsulko Group
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/iio/iio.h>
+
+#define ATLAS_EZO_DRV_NAME "atlas-ezo-sensor"
+#define ATLAS_CO2_INT_TIME_IN_MS 950
+
+enum {
+ ATLAS_CO2_EZO,
+};
+
+struct atlas_ezo_device {
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ int delay;
+};
+
+struct atlas_ezo_data {
+ struct i2c_client *client;
+ struct atlas_ezo_device *chip;
+
+ /* lock to avoid multiple concurrent read calls */
+ struct mutex lock;
+
+ u8 buffer[8];
+};
+
+static const struct iio_chan_spec atlas_co2_ezo_channels[] = {
+ {
+ .type = IIO_CONCENTRATION,
+ .modified = 1,
+ .channel2 = IIO_MOD_CO2,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ },
+};
+
+static struct atlas_ezo_device atlas_ezo_devices[] = {
+ [ATLAS_CO2_EZO] = {
+ .channels = atlas_co2_ezo_channels,
+ .num_channels = 1,
+ .delay = ATLAS_CO2_INT_TIME_IN_MS,
+ },
+};
+
+static int atlas_ezo_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct atlas_ezo_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ int ret = 0;
+
+ if (chan->type != IIO_CONCENTRATION)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ long tmp;
+
+ mutex_lock(&data->lock);
+
+ tmp = i2c_smbus_write_byte(client, 'R');
+
+ if (tmp < 0) {
+ mutex_unlock(&data->lock);
+ return tmp;
+ }
+
+ msleep(data->chip->delay);
+
+ tmp = i2c_master_recv(client, data->buffer, sizeof(data->buffer));
+
+ if (tmp < 0 || data->buffer[0] != 1) {
+ mutex_unlock(&data->lock);
+ return -EBUSY;
+ }
+
+ ret = kstrtol(data->buffer + 1, 10, &tmp);
+
+ *val = tmp;
+
+ mutex_unlock(&data->lock);
+
+ return ret ? ret : IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = 100; /* 0.0001 */
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return ret;
+}
+
+static const struct iio_info atlas_info = {
+ .read_raw = atlas_ezo_read_raw,
+};
+
+static const struct i2c_device_id atlas_ezo_id[] = {
+ { "atlas-co2-ezo", ATLAS_CO2_EZO },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, atlas_ezo_id);
+
+static const struct of_device_id atlas_ezo_dt_ids[] = {
+ { .compatible = "atlas,co2-ezo", .data = (void *)ATLAS_CO2_EZO, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, atlas_ezo_dt_ids);
+
+static int atlas_ezo_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct atlas_ezo_data *data;
+ struct atlas_ezo_device *chip;
+ const struct of_device_id *of_id;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ of_id = of_match_device(atlas_ezo_dt_ids, &client->dev);
+ if (!of_id)
+ chip = &atlas_ezo_devices[id->driver_data];
+ else
+ chip = &atlas_ezo_devices[(unsigned long)of_id->data];
+
+ indio_dev->info = &atlas_info;
+ indio_dev->name = ATLAS_EZO_DRV_NAME;
+ indio_dev->channels = chip->channels;
+ indio_dev->num_channels = chip->num_channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &client->dev;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->chip = chip;
+ mutex_init(&data->lock);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+};
+
+static struct i2c_driver atlas_ezo_driver = {
+ .driver = {
+ .name = ATLAS_EZO_DRV_NAME,
+ .of_match_table = atlas_ezo_dt_ids,
+ },
+ .probe = atlas_ezo_probe,
+ .id_table = atlas_ezo_id,
+};
+module_i2c_driver(atlas_ezo_driver);
+
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
+MODULE_DESCRIPTION("Atlas Scientific EZO sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 7b199ce16ecf..78a27e36bf32 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -53,6 +53,8 @@
#define ATLAS_REG_DO_CALIB_STATUS_PRESSURE BIT(0)
#define ATLAS_REG_DO_CALIB_STATUS_DO BIT(1)
+#define ATLAS_REG_RTD_DATA 0x0e
+
#define ATLAS_REG_PH_TEMP_DATA 0x0e
#define ATLAS_REG_PH_DATA 0x16
@@ -72,12 +74,14 @@
#define ATLAS_EC_INT_TIME_IN_MS 650
#define ATLAS_ORP_INT_TIME_IN_MS 450
#define ATLAS_DO_INT_TIME_IN_MS 450
+#define ATLAS_RTD_INT_TIME_IN_MS 450
enum {
ATLAS_PH_SM,
ATLAS_EC_SM,
ATLAS_ORP_SM,
ATLAS_DO_SM,
+ ATLAS_RTD_SM,
};
struct atlas_data {
@@ -218,6 +222,22 @@ static const struct iio_chan_spec atlas_do_channels[] = {
},
};
+static const struct iio_chan_spec atlas_rtd_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .address = ATLAS_REG_RTD_DATA,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
static int atlas_check_ph_calibration(struct atlas_data *data)
{
struct device *dev = &data->client->dev;
@@ -362,6 +382,12 @@ static struct atlas_device atlas_devices[] = {
.calibration = &atlas_check_do_calibration,
.delay = ATLAS_DO_INT_TIME_IN_MS,
},
+ [ATLAS_RTD_SM] = {
+ .channels = atlas_rtd_channels,
+ .num_channels = 2,
+ .data_reg = ATLAS_REG_RTD_DATA,
+ .delay = ATLAS_RTD_INT_TIME_IN_MS,
+ },
};
static int atlas_set_powermode(struct atlas_data *data, int on)
@@ -438,8 +464,7 @@ static irqreturn_t atlas_trigger_handler(int irq, void *private)
int ret;
ret = regmap_bulk_read(data->regmap, data->chip->data_reg,
- (u8 *) &data->buffer,
- sizeof(__be32) * channels);
+ &data->buffer, sizeof(__be32) * channels);
if (!ret)
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
@@ -475,7 +500,7 @@ static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val)
if (suspended)
msleep(data->chip->delay);
- ret = regmap_bulk_read(data->regmap, reg, (u8 *) val, sizeof(*val));
+ ret = regmap_bulk_read(data->regmap, reg, val, sizeof(*val));
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -490,6 +515,7 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
struct atlas_data *data = iio_priv(indio_dev);
switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
case IIO_CHAN_INFO_RAW: {
int ret;
__be32 reg;
@@ -497,7 +523,7 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_TEMP:
ret = regmap_bulk_read(data->regmap, chan->address,
- (u8 *) &reg, sizeof(reg));
+ &reg, sizeof(reg));
break;
case IIO_PH:
case IIO_CONCENTRATION:
@@ -578,6 +604,7 @@ static const struct i2c_device_id atlas_id[] = {
{ "atlas-ec-sm", ATLAS_EC_SM},
{ "atlas-orp-sm", ATLAS_ORP_SM},
{ "atlas-do-sm", ATLAS_DO_SM},
+ { "atlas-rtd-sm", ATLAS_RTD_SM},
{}
};
MODULE_DEVICE_TABLE(i2c, atlas_id);
@@ -587,6 +614,7 @@ static const struct of_device_id atlas_dt_ids[] = {
{ .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, },
{ .compatible = "atlas,orp-sm", .data = (void *)ATLAS_ORP_SM, },
{ .compatible = "atlas,do-sm", .data = (void *)ATLAS_DO_SM, },
+ { .compatible = "atlas,rtd-sm", .data = (void *)ATLAS_RTD_SM, },
{ }
};
MODULE_DEVICE_TABLE(of, atlas_dt_ids);
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index ccde4c65ff93..13773e01699b 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -114,14 +114,16 @@ static int bme680_read_calib(struct bme680_data *data,
__le16 buf;
/* Temperature related coefficients */
- ret = regmap_bulk_read(data->regmap, BME680_T1_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_T1_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_T1_LSB_REG\n");
return ret;
}
calib->par_t1 = le16_to_cpu(buf);
- ret = regmap_bulk_read(data->regmap, BME680_T2_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_T2_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_T2_LSB_REG\n");
return ret;
@@ -136,14 +138,16 @@ static int bme680_read_calib(struct bme680_data *data,
calib->par_t3 = tmp;
/* Pressure related coefficients */
- ret = regmap_bulk_read(data->regmap, BME680_P1_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P1_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P1_LSB_REG\n");
return ret;
}
calib->par_p1 = le16_to_cpu(buf);
- ret = regmap_bulk_read(data->regmap, BME680_P2_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P2_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P2_LSB_REG\n");
return ret;
@@ -157,14 +161,16 @@ static int bme680_read_calib(struct bme680_data *data,
}
calib->par_p3 = tmp;
- ret = regmap_bulk_read(data->regmap, BME680_P4_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P4_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P4_LSB_REG\n");
return ret;
}
calib->par_p4 = le16_to_cpu(buf);
- ret = regmap_bulk_read(data->regmap, BME680_P5_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P5_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P5_LSB_REG\n");
return ret;
@@ -185,14 +191,16 @@ static int bme680_read_calib(struct bme680_data *data,
}
calib->par_p7 = tmp;
- ret = regmap_bulk_read(data->regmap, BME680_P8_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P8_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P8_LSB_REG\n");
return ret;
}
calib->par_p8 = le16_to_cpu(buf);
- ret = regmap_bulk_read(data->regmap, BME680_P9_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P9_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P9_LSB_REG\n");
return ret;
@@ -276,8 +284,8 @@ static int bme680_read_calib(struct bme680_data *data,
}
calib->par_gh1 = tmp;
- ret = regmap_bulk_read(data->regmap, BME680_GH2_LSB_REG, (u8 *) &buf,
- 2);
+ ret = regmap_bulk_read(data->regmap, BME680_GH2_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_GH2_LSB_REG\n");
return ret;
@@ -615,7 +623,7 @@ static int bme680_read_temp(struct bme680_data *data, int *val)
return ret;
ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
- (u8 *) &tmp, 3);
+ &tmp, 3);
if (ret < 0) {
dev_err(dev, "failed to read temperature\n");
return ret;
@@ -656,7 +664,7 @@ static int bme680_read_press(struct bme680_data *data,
return ret;
ret = regmap_bulk_read(data->regmap, BME680_REG_PRESS_MSB,
- (u8 *) &tmp, 3);
+ &tmp, 3);
if (ret < 0) {
dev_err(dev, "failed to read pressure\n");
return ret;
@@ -689,7 +697,7 @@ static int bme680_read_humid(struct bme680_data *data,
return ret;
ret = regmap_bulk_read(data->regmap, BM6880_REG_HUMIDITY_MSB,
- (u8 *) &tmp, 2);
+ &tmp, sizeof(tmp));
if (ret < 0) {
dev_err(dev, "failed to read humidity\n");
return ret;
@@ -754,7 +762,7 @@ static int bme680_read_gas(struct bme680_data *data,
}
ret = regmap_bulk_read(data->regmap, BME680_REG_GAS_MSB,
- (u8 *) &tmp, 2);
+ &tmp, sizeof(tmp));
if (ret < 0) {
dev_err(dev, "failed to read gas resistance\n");
return ret;
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 2ebdfc35bcda..3ecd633f9ed3 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -16,6 +16,7 @@
*/
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -36,6 +37,7 @@
#define CCS811_ERR 0xE0
/* Used to transition from boot to application mode */
#define CCS811_APP_START 0xF4
+#define CCS811_SW_RESET 0xFF
/* Status register flags */
#define CCS811_STATUS_ERROR BIT(0)
@@ -74,6 +76,7 @@ struct ccs811_data {
struct mutex lock; /* Protect readings */
struct ccs811_reading buffer;
struct iio_trigger *drdy_trig;
+ struct gpio_desc *wakeup_gpio;
bool drdy_trig_on;
};
@@ -166,10 +169,25 @@ static int ccs811_setup(struct i2c_client *client)
CCS811_MODE_IAQ_1SEC);
}
+static void ccs811_set_wakeup(struct ccs811_data *data, bool enable)
+{
+ if (!data->wakeup_gpio)
+ return;
+
+ gpiod_set_value(data->wakeup_gpio, enable);
+
+ if (enable)
+ usleep_range(50, 60);
+ else
+ usleep_range(20, 30);
+}
+
static int ccs811_get_measurement(struct ccs811_data *data)
{
int ret, tries = 11;
+ ccs811_set_wakeup(data, true);
+
/* Maximum waiting time: 1s, as measurements are made every second */
while (tries-- > 0) {
ret = i2c_smbus_read_byte_data(data->client, CCS811_STATUS);
@@ -183,9 +201,12 @@ static int ccs811_get_measurement(struct ccs811_data *data)
if (!(ret & CCS811_STATUS_DATA_READY))
return -EIO;
- return i2c_smbus_read_i2c_block_data(data->client,
+ ret = i2c_smbus_read_i2c_block_data(data->client,
CCS811_ALG_RESULT_DATA, 8,
(char *)&data->buffer);
+ ccs811_set_wakeup(data, false);
+
+ return ret;
}
static int ccs811_read_raw(struct iio_dev *indio_dev,
@@ -336,6 +357,45 @@ static irqreturn_t ccs811_data_rdy_trigger_poll(int irq, void *private)
return IRQ_HANDLED;
}
+static int ccs811_reset(struct i2c_client *client)
+{
+ struct gpio_desc *reset_gpio;
+ int ret;
+
+ reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio))
+ return PTR_ERR(reset_gpio);
+
+ /* Try to reset using nRESET pin if available else do SW reset */
+ if (reset_gpio) {
+ gpiod_set_value(reset_gpio, 1);
+ usleep_range(20, 30);
+ gpiod_set_value(reset_gpio, 0);
+ } else {
+ /*
+ * As per the datasheet, this sequence of values needs to be
+ * written to the SW_RESET register for triggering the soft
+ * reset in the device and placing it in boot mode.
+ */
+ static const u8 reset_seq[] = {
+ 0x11, 0xE5, 0x72, 0x8A,
+ };
+
+ ret = i2c_smbus_write_i2c_block_data(client, CCS811_SW_RESET,
+ sizeof(reset_seq), reset_seq);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to reset sensor\n");
+ return ret;
+ }
+ }
+
+ /* tSTART delay required after reset */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
static int ccs811_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -348,36 +408,59 @@ static int ccs811_probe(struct i2c_client *client,
| I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -EOPNOTSUPP;
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ data->wakeup_gpio = devm_gpiod_get_optional(&client->dev, "wakeup",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(data->wakeup_gpio))
+ return PTR_ERR(data->wakeup_gpio);
+
+ ccs811_set_wakeup(data, true);
+
+ ret = ccs811_reset(client);
+ if (ret) {
+ ccs811_set_wakeup(data, false);
+ return ret;
+ }
+
/* Check hardware id (should be 0x81 for this family of devices) */
ret = i2c_smbus_read_byte_data(client, CCS811_HW_ID);
- if (ret < 0)
+ if (ret < 0) {
+ ccs811_set_wakeup(data, false);
return ret;
+ }
if (ret != CCS811_HW_ID_VALUE) {
dev_err(&client->dev, "hardware id doesn't match CCS81x\n");
+ ccs811_set_wakeup(data, false);
return -ENODEV;
}
ret = i2c_smbus_read_byte_data(client, CCS811_HW_VERSION);
- if (ret < 0)
+ if (ret < 0) {
+ ccs811_set_wakeup(data, false);
return ret;
+ }
if ((ret & CCS811_HW_VERSION_MASK) != CCS811_HW_VERSION_VALUE) {
dev_err(&client->dev, "no CCS811 sensor\n");
+ ccs811_set_wakeup(data, false);
return -ENODEV;
}
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev)
- return -ENOMEM;
-
ret = ccs811_setup(client);
- if (ret < 0)
+ if (ret < 0) {
+ ccs811_set_wakeup(data, false);
return ret;
+ }
- data = iio_priv(indio_dev);
- i2c_set_clientdata(client, indio_dev);
- data->client = client;
+ ccs811_set_wakeup(data, false);
mutex_init(&data->lock);
@@ -466,9 +549,16 @@ static const struct i2c_device_id ccs811_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ccs811_id);
+static const struct of_device_id ccs811_dt_ids[] = {
+ { .compatible = "ams,ccs811" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ccs811_dt_ids);
+
static struct i2c_driver ccs811_driver = {
.driver = {
.name = "ccs811",
+ .of_match_table = ccs811_dt_ids,
},
.probe = ccs811_probe,
.remove = ccs811_remove,
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index 23c9ab252470..07bb90d72434 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -73,6 +73,11 @@ struct pms7003_state {
struct pms7003_frame frame;
struct completion frame_ready;
struct mutex lock; /* must be held whenever state gets touched */
+ /* Used to construct scan to push to the IIO buffer */
+ struct {
+ u16 data[3]; /* PM1, PM2P5, PM10 */
+ s64 ts;
+ } scan;
};
static int pms7003_do_cmd(struct pms7003_state *state, enum pms7003_cmd cmd)
@@ -104,7 +109,6 @@ static irqreturn_t pms7003_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct pms7003_state *state = iio_priv(indio_dev);
struct pms7003_frame *frame = &state->frame;
- u16 data[3 + 1 + 4]; /* PM1, PM2P5, PM10, padding, timestamp */
int ret;
mutex_lock(&state->lock);
@@ -114,12 +118,15 @@ static irqreturn_t pms7003_trigger_handler(int irq, void *p)
goto err;
}
- data[PM1] = pms7003_get_pm(frame->data + PMS7003_PM1_OFFSET);
- data[PM2P5] = pms7003_get_pm(frame->data + PMS7003_PM2P5_OFFSET);
- data[PM10] = pms7003_get_pm(frame->data + PMS7003_PM10_OFFSET);
+ state->scan.data[PM1] =
+ pms7003_get_pm(frame->data + PMS7003_PM1_OFFSET);
+ state->scan.data[PM2P5] =
+ pms7003_get_pm(frame->data + PMS7003_PM2P5_OFFSET);
+ state->scan.data[PM10] =
+ pms7003_get_pm(frame->data + PMS7003_PM10_OFFSET);
mutex_unlock(&state->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &state->scan,
iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index acb9f8ecbb3d..a88c1fb875a0 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -230,15 +230,18 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct sps30_state *state = iio_priv(indio_dev);
int ret;
- s32 data[4 + 2]; /* PM1, PM2P5, PM4, PM10, timestamp */
+ struct {
+ s32 data[4]; /* PM1, PM2P5, PM4, PM10 */
+ s64 ts;
+ } scan;
mutex_lock(&state->lock);
- ret = sps30_do_meas(state, data, 4);
+ ret = sps30_do_meas(state, scan.data, ARRAY_SIZE(scan.data));
mutex_unlock(&state->lock);
if (ret)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 906d87780419..ff375790b7e8 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -13,6 +13,8 @@
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
#include <linux/iio/buffer.h>
#include <linux/iio/sysfs.h>
#include "hid-sensor-trigger.h"
@@ -222,7 +224,8 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
return hid_sensor_power_state(iio_trigger_get_drvdata(trig), state);
}
-void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
+void hid_sensor_remove_trigger(struct iio_dev *indio_dev,
+ struct hid_sensor_common *attrb)
{
if (atomic_read(&attrb->runtime_pm_enable))
pm_runtime_disable(&attrb->pdev->dev);
@@ -233,6 +236,7 @@ void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
cancel_work_sync(&attrb->work);
iio_trigger_unregister(attrb->trigger);
iio_trigger_free(attrb->trigger);
+ iio_triggered_buffer_cleanup(indio_dev);
}
EXPORT_SYMBOL(hid_sensor_remove_trigger);
@@ -246,11 +250,18 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
int ret;
struct iio_trigger *trig;
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Triggered Buffer Setup Failed\n");
+ return ret;
+ }
+
trig = iio_trigger_alloc("%s-dev%d", name, indio_dev->id);
if (trig == NULL) {
dev_err(&indio_dev->dev, "Trigger Allocate Failed\n");
ret = -ENOMEM;
- goto error_ret;
+ goto error_triggered_buffer_cleanup;
}
trig->dev.parent = indio_dev->dev.parent;
@@ -284,7 +295,8 @@ error_unreg_trigger:
iio_trigger_unregister(trig);
error_free_trig:
iio_trigger_free(trig);
-error_ret:
+error_triggered_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
return ret;
}
EXPORT_SYMBOL(hid_sensor_setup_trigger);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
index f47b940ff170..bb45cc89e551 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
@@ -13,7 +13,8 @@ extern const struct dev_pm_ops hid_sensor_pm_ops;
int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
struct hid_sensor_common *attrb);
-void hid_sensor_remove_trigger(struct hid_sensor_common *attrb);
+void hid_sensor_remove_trigger(struct iio_dev *indio_dev,
+ struct hid_sensor_common *attrb);
int hid_sensor_power_state(struct hid_sensor_common *st, bool state);
#endif
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 13bdfbbf5f71..7a69c1be7393 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -20,11 +20,6 @@
#include "st_sensors_core.h"
-static inline u32 st_sensors_get_unaligned_le24(const u8 *p)
-{
- return (s32)((p[0] | p[1] << 8 | p[2] << 16) << 8) >> 8;
-}
-
int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
u8 reg_addr, u8 mask, u8 data)
{
@@ -150,8 +145,7 @@ static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
if (err < 0)
goto st_accel_set_fullscale_error;
- sdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &sdata->sensor_settings->fs.fs_avl[i];
+ sdata->current_fullscale = &sdata->sensor_settings->fs.fs_avl[i];
return err;
st_accel_set_fullscale_error:
@@ -278,8 +272,7 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
!sdata->sensor_settings->drdy_irq.int2.addr) {
if (pdata->drdy_int_pin)
dev_info(&indio_dev->dev,
- "DRDY on pin INT%d specified, but sensor "
- "does not support interrupts\n",
+ "DRDY on pin INT%d specified, but sensor does not support interrupts\n",
pdata->drdy_int_pin);
return 0;
}
@@ -545,7 +538,7 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
else if (byte_for_channel == 2)
*data = (s16)get_unaligned_le16(outdata);
else if (byte_for_channel == 3)
- *data = (s32)st_sensors_get_unaligned_le24(outdata);
+ *data = (s32)sign_extend32(get_unaligned_le24(outdata), 23);
st_sensors_free_memory:
kfree(outdata);
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index 286830fb5d35..b400560bac93 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -49,8 +49,8 @@ int st_sensors_i2c_configure(struct iio_dev *indio_dev,
sdata->regmap = devm_regmap_init_i2c(client, config);
if (IS_ERR(sdata->regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap (%d)\n",
- (int)PTR_ERR(sdata->regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap (%ld)\n",
+ PTR_ERR(sdata->regmap));
return PTR_ERR(sdata->regmap);
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
index 1275fb0eda31..ee70515bb89f 100644
--- a/drivers/iio/common/st_sensors/st_sensors_spi.c
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -44,7 +44,7 @@ static bool st_sensors_is_spi_3_wire(struct spi_device *spi)
if (device_property_read_bool(dev, "spi-3wire"))
return true;
- pdata = (struct st_sensors_platform_data *)dev->platform_data;
+ pdata = dev_get_platdata(dev);
if (pdata && pdata->spi_3wire)
return true;
@@ -101,8 +101,8 @@ int st_sensors_spi_configure(struct iio_dev *indio_dev,
sdata->regmap = devm_regmap_init_spi(spi, config);
if (IS_ERR(sdata->regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap (%d)\n",
- (int)PTR_ERR(sdata->regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap (%ld)\n",
+ PTR_ERR(sdata->regmap));
return PTR_ERR(sdata->regmap);
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index e817537cdfb5..0507283bd4c1 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -44,8 +44,7 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
sdata->sensor_settings->drdy_irq.stat_drdy.addr,
&status);
if (ret < 0) {
- dev_err(sdata->dev,
- "error checking samples available\n");
+ dev_err(sdata->dev, "error checking samples available\n");
return ret;
}
@@ -148,9 +147,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
case IRQF_TRIGGER_LOW:
if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
dev_err(&indio_dev->dev,
- "falling/low specified for IRQ "
- "but hardware supports only rising/high: "
- "will request rising/high\n");
+ "falling/low specified for IRQ but hardware supports only rising/high: will request rising/high\n");
if (irq_trig == IRQF_TRIGGER_FALLING)
irq_trig = IRQF_TRIGGER_RISING;
if (irq_trig == IRQF_TRIGGER_LOW)
@@ -163,8 +160,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
if (err < 0)
goto iio_trigger_free;
dev_info(&indio_dev->dev,
- "interrupts on the falling edge or "
- "active low level\n");
+ "interrupts on the falling edge or active low level\n");
}
break;
case IRQF_TRIGGER_RISING:
@@ -178,8 +174,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
default:
/* This is the most preferred mode, if possible */
dev_err(&indio_dev->dev,
- "unsupported IRQ trigger specified (%lx), enforce "
- "rising edge\n", irq_trig);
+ "unsupported IRQ trigger specified (%lx), enforce rising edge\n", irq_trig);
irq_trig = IRQF_TRIGGER_RISING;
}
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 93744011b63f..3728f6325501 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -279,12 +279,12 @@ config LTC1660
module will be called ltc1660.
config LTC2632
- tristate "Linear Technology LTC2632-12/10/8 and LTC2636-12/10/8 DAC spi driver"
+ tristate "Linear Technology LTC2632-12/10/8 and similar DAC spi driver"
depends on SPI
help
Say yes here to build support for Linear Technology
- LTC2632-12, LTC2632-10, LTC2632-8, LTC2636-12, LTC2636-10 and
- LTC2636-8 converters (DAC).
+ LTC2632, LTC2634 and LTC2636 DAC resolution 12/10/8 bit
+ low 0-2.5V and high 0-4.096V range converters.
To compile this driver as a module, choose M here: the
module will be called ltc2632.
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 2ac428b957e3..3e0c9e84e8da 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -67,6 +67,7 @@ struct ad5360_chip_info {
* @chip_info: chip model specific constants, available modes etc
* @vref_reg: vref supply regulators
* @ctrl: control register cache
+ * @lock lock to protect the data buffer during SPI ops
* @data: spi transfer buffers
*/
@@ -75,6 +76,7 @@ struct ad5360_state {
const struct ad5360_chip_info *chip_info;
struct regulator_bulk_data vref_reg[3];
unsigned int ctrl;
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -205,10 +207,11 @@ static int ad5360_write(struct iio_dev *indio_dev, unsigned int cmd,
unsigned int addr, unsigned int val, unsigned int shift)
{
int ret;
+ struct ad5360_state *st = iio_priv(indio_dev);
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5360_write_unlocked(indio_dev, cmd, addr, val, shift);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -229,7 +232,7 @@ static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32(AD5360_CMD(AD5360_CMD_SPECIAL_FUNCTION) |
AD5360_ADDR(AD5360_REG_SF_READBACK) |
@@ -240,7 +243,7 @@ static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -261,7 +264,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
struct ad5360_state *st = iio_priv(indio_dev);
unsigned int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->ctrl |= set;
st->ctrl &= ~clr;
@@ -269,7 +272,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
ret = ad5360_write_unlocked(indio_dev, AD5360_CMD_SPECIAL_FUNCTION,
AD5360_REG_SF_CTRL, st->ctrl, 0);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -479,6 +482,8 @@ static int ad5360_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->num_channels = st->chip_info->num_channels;
+ mutex_init(&st->lock);
+
ret = ad5360_alloc_channels(indio_dev);
if (ret) {
dev_err(&spi->dev, "Failed to allocate channel spec: %d\n", ret);
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 2ebe08326048..b37e5675f716 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -51,6 +51,7 @@ struct ad5380_chip_info {
* @vref_reg: vref supply regulator
* @vref: actual reference voltage used in uA
* @pwr_down: whether the chip is currently in power down mode
+ * @lock lock to protect the data buffer during regmap ops
*/
struct ad5380_state {
@@ -59,6 +60,7 @@ struct ad5380_state {
struct regulator *vref_reg;
int vref;
bool pwr_down;
+ struct mutex lock;
};
enum ad5380_type {
@@ -98,7 +100,7 @@ static ssize_t ad5380_write_dac_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (pwr_down)
ret = regmap_write(st->regmap, AD5380_REG_SF_PWR_DOWN, 0);
@@ -107,7 +109,7 @@ static ssize_t ad5380_write_dac_powerdown(struct iio_dev *indio_dev,
st->pwr_down = pwr_down;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -390,6 +392,8 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->num_channels = st->chip_info->num_channels;
+ mutex_init(&st->lock);
+
ret = ad5380_alloc_channels(indio_dev);
if (ret) {
dev_err(dev, "Failed to allocate channel spec: %d\n", ret);
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 63063e85cd0a..fec27764cea8 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -62,12 +62,14 @@
* @current_range: current range which the device is configured for
* @data: spi transfer buffers
* @fault_mask: software masking of events
+ * @lock lock to protect the data buffer during SPI ops
*/
struct ad5421_state {
struct spi_device *spi;
unsigned int ctrl;
enum ad5421_current_range current_range;
unsigned int fault_mask;
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -142,11 +144,12 @@ static int ad5421_write_unlocked(struct iio_dev *indio_dev,
static int ad5421_write(struct iio_dev *indio_dev, unsigned int reg,
unsigned int val)
{
+ struct ad5421_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5421_write_unlocked(indio_dev, reg, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -166,7 +169,7 @@ static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
@@ -174,7 +177,7 @@ static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -185,14 +188,14 @@ static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
struct ad5421_state *st = iio_priv(indio_dev);
unsigned int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->ctrl &= ~clr;
st->ctrl |= set;
ret = ad5421_write_unlocked(indio_dev, AD5421_REG_CTRL, st->ctrl);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -400,12 +403,12 @@ static int ad5421_write_event_config(struct iio_dev *indio_dev,
return -EINVAL;
}
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (state)
st->fault_mask |= mask;
else
st->fault_mask &= ~mask;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return 0;
}
@@ -491,6 +494,8 @@ static int ad5421_probe(struct spi_device *spi)
indio_dev->channels = ad5421_channels;
indio_dev->num_channels = ARRAY_SIZE(ad5421_channels);
+ mutex_init(&st->lock);
+
st->ctrl = AD5421_CTRL_WATCHDOG_DISABLE |
AD5421_CTRL_AUTO_FAULT_READBACK;
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 61c670f7fc5f..8f8afc8999bc 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -21,6 +21,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
#define MODE_PWRDWN_1k 0x1
#define MODE_PWRDWN_100k 0x2
#define MODE_PWRDWN_TRISTATE 0x3
@@ -31,6 +33,7 @@
* @chip_info: chip model specific constants, available modes etc
* @reg: supply regulator
* @vref_mv: actual reference voltage used
+ * @lock lock to protect the data buffer during write ops
*/
struct ad5446_state {
@@ -41,6 +44,7 @@ struct ad5446_state {
unsigned cached_val;
unsigned pwr_down_mode;
unsigned pwr_down;
+ struct mutex lock;
};
/**
@@ -110,7 +114,7 @@ static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->pwr_down = powerdown;
if (st->pwr_down) {
@@ -121,7 +125,7 @@ static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
}
ret = st->chip_info->write(st, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -195,11 +199,11 @@ static int ad5446_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
val <<= chan->scan_type.shift;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->cached_val = val;
if (!st->pwr_down)
ret = st->chip_info->write(st, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
break;
default:
ret = -EINVAL;
@@ -254,6 +258,8 @@ static int ad5446_probe(struct device *dev, const char *name,
indio_dev->channels = &st->chip_info->channel;
indio_dev->num_channels = 1;
+ mutex_init(&st->lock);
+
st->pwr_down_mode = MODE_PWRDWN_1k;
if (st->chip_info->int_vref_mv)
@@ -302,9 +308,7 @@ static int ad5660_write(struct ad5446_state *st, unsigned val)
struct spi_device *spi = to_spi_device(st->dev);
uint8_t data[3];
- data[0] = (val >> 16) & 0xFF;
- data[1] = (val >> 8) & 0xFF;
- data[2] = val & 0xFF;
+ put_unaligned_be24(val, &data[0]);
return spi_write(spi, data, sizeof(data));
}
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index fed3ebaccac4..d739b10e5236 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -56,11 +56,13 @@ struct ad5449_chip_info {
* @has_sdo: whether the SDO line is connected
* @dac_cache: Cache for the DAC values
* @data: spi transfer buffers
+ * @lock lock to protect the data buffer during SPI ops
*/
struct ad5449 {
struct spi_device *spi;
const struct ad5449_chip_info *chip_info;
struct regulator_bulk_data vref_reg[AD5449_MAX_VREFS];
+ struct mutex lock;
bool has_sdo;
uint16_t dac_cache[AD5449_MAX_CHANNELS];
@@ -87,10 +89,10 @@ static int ad5449_write(struct iio_dev *indio_dev, unsigned int addr,
struct ad5449 *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0] = cpu_to_be16((addr << 12) | val);
ret = spi_write(st->spi, st->data, 2);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -112,7 +114,7 @@ static int ad5449_read(struct iio_dev *indio_dev, unsigned int addr,
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0] = cpu_to_be16(addr << 12);
st->data[1] = cpu_to_be16(AD5449_CMD_NOOP);
@@ -123,7 +125,7 @@ static int ad5449_read(struct iio_dev *indio_dev, unsigned int addr,
*val = be16_to_cpu(st->data[1]);
out_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -302,6 +304,8 @@ static int ad5449_spi_probe(struct spi_device *spi)
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
+ mutex_init(&st->lock);
+
if (st->chip_info->has_ctrl) {
unsigned int ctrl = 0x00;
if (pdata) {
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index e2110113e884..410e90e5f75f 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -156,7 +156,6 @@ static void ad5592r_gpio_cleanup(struct ad5592r_state *st)
static int ad5592r_reset(struct ad5592r_state *st)
{
struct gpio_desc *gpio;
- struct iio_dev *iio_dev = iio_priv_to_dev(st);
gpio = devm_gpiod_get_optional(st->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpio))
@@ -166,10 +165,10 @@ static int ad5592r_reset(struct ad5592r_state *st)
udelay(1);
gpiod_set_value(gpio, 1);
} else {
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
/* Writing this magic value resets the device */
st->ops->reg_write(st, AD5592R_REG_RESET, 0xdac);
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
}
udelay(250);
@@ -197,7 +196,6 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st)
const struct ad5592r_rw_ops *ops = st->ops;
int ret;
unsigned i;
- struct iio_dev *iio_dev = iio_priv_to_dev(st);
u8 pulldown = 0, tristate = 0, dac = 0, adc = 0;
u16 read_back;
@@ -247,7 +245,7 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st)
}
}
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
/* Pull down unused pins to GND */
ret = ops->reg_write(st, AD5592R_REG_PULLDOWN, pulldown);
@@ -285,7 +283,7 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st)
ret = -EIO;
err_unlock:
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -314,11 +312,11 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
if (!chan->output)
return -EINVAL;
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
ret = st->ops->write_dac(st, chan->channel, val);
if (!ret)
st->cached_dac[chan->channel] = val;
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
case IIO_CHAN_INFO_SCALE:
if (chan->type == IIO_VOLTAGE) {
@@ -333,12 +331,12 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
else
return -EINVAL;
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
ret = st->ops->reg_read(st, AD5592R_REG_CTRL,
&st->cached_gp_ctrl);
if (ret < 0) {
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -360,7 +358,7 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
ret = st->ops->reg_write(st, AD5592R_REG_CTRL,
st->cached_gp_ctrl);
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -382,7 +380,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
if (!chan->output) {
ret = st->ops->read_adc(st, chan->channel, &read_val);
@@ -419,7 +417,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
} else {
int mult;
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
if (chan->output)
mult = !!(st->cached_gp_ctrl &
@@ -437,7 +435,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_OFFSET:
ret = ad5592r_get_vref(st);
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
if (st->cached_gp_ctrl & AD5592R_REG_CTRL_ADC_RANGE)
*val = (-34365 * 25) / ret;
@@ -450,7 +448,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
}
unlock:
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -625,6 +623,8 @@ int ad5592r_probe(struct device *dev, const char *name,
iio_dev->info = &ad5592r_info;
iio_dev->modes = INDIO_DIRECT_MODE;
+ mutex_init(&st->lock);
+
ad5592r_init_scales(st, ad5592r_get_vref(st));
ret = ad5592r_reset(st);
diff --git a/drivers/iio/dac/ad5592r-base.h b/drivers/iio/dac/ad5592r-base.h
index 4774e4cd9c11..23dac2f1ff8a 100644
--- a/drivers/iio/dac/ad5592r-base.h
+++ b/drivers/iio/dac/ad5592r-base.h
@@ -52,6 +52,7 @@ struct ad5592r_state {
struct regulator *reg;
struct gpio_chip gpiochip;
struct mutex gpio_lock; /* Protect cached gpio_out, gpio_val, etc. */
+ struct mutex lock;
unsigned int num_channels;
const struct ad5592r_rw_ops *ops;
int scale_avail[2][2];
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index 34ba059a77da..49308ad13c4b 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -98,7 +98,7 @@ static int ad5592r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
return 0;
}
-static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
+static int ad5592r_gpio_read(struct ad5592r_state *st, u8 *value)
{
int ret;
@@ -121,7 +121,7 @@ static const struct ad5592r_rw_ops ad5592r_rw_ops = {
.read_adc = ad5592r_read_adc,
.reg_write = ad5592r_reg_write,
.reg_read = ad5592r_reg_read,
- .gpio_read = ad5593r_gpio_read,
+ .gpio_read = ad5592r_gpio_read,
};
static int ad5592r_spi_probe(struct spi_device *spi)
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
index 44ea3b8117d0..1fbe9c019c7f 100644
--- a/drivers/iio/dac/ad5593r.c
+++ b/drivers/iio/dac/ad5593r.c
@@ -134,5 +134,5 @@ static struct i2c_driver ad5593r_driver = {
module_i2c_driver(ad5593r_driver);
MODULE_AUTHOR("Paul Cercueil <paul.cercueil@analog.com>");
-MODULE_DESCRIPTION("Analog Devices AD5592R multi-channel converters");
+MODULE_DESCRIPTION("Analog Devices AD5593R multi-channel converters");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index e6c022e1dc1c..2015a5df840c 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -18,6 +18,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
#include "ad5624r.h"
static int ad5624r_spi_write(struct spi_device *spi,
@@ -35,11 +37,9 @@ static int ad5624r_spi_write(struct spi_device *spi,
* for the AD5664R, AD5644R, and AD5624R, respectively.
*/
data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
- msg[0] = data >> 16;
- msg[1] = data >> 8;
- msg[2] = data;
+ put_unaligned_be24(data, &msg[0]);
- return spi_write(spi, msg, 3);
+ return spi_write(spi, msg, sizeof(msg));
}
static int ad5624r_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index e06b29c565b9..8dd67da0a7da 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -127,9 +127,9 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = st->read(st, chan->address);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
if (ret < 0)
return ret;
*val = (ret >> chan->scan_type.shift) &
@@ -157,12 +157,12 @@ static int ad5686_write_raw(struct iio_dev *indio_dev,
if (val > (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = st->write(st,
AD5686_CMD_WRITE_INPUT_N_UPDATE_N,
chan->address,
val << chan->scan_type.shift);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
break;
default:
ret = -EINVAL;
@@ -468,6 +468,8 @@ int ad5686_probe(struct device *dev,
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
+ mutex_init(&st->lock);
+
switch (st->chip_info->regmap_type) {
case AD5310_REGMAP:
cmd = AD5686_CMD_CONTROL_REG;
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
index 70a779939ddb..52009b5eef88 100644
--- a/drivers/iio/dac/ad5686.h
+++ b/drivers/iio/dac/ad5686.h
@@ -117,6 +117,7 @@ struct ad5686_chip_info {
* @pwr_down_mask: power down mask
* @pwr_down_mode: current power down mode
* @use_internal_vref: set to true if the internal reference voltage is used
+ * @lock lock to protect the data buffer during regmap ops
* @data: spi transfer buffers
*/
@@ -130,6 +131,7 @@ struct ad5686_state {
ad5686_write_func write;
ad5686_read_func read;
bool use_internal_vref;
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 388ddd14bfd0..7723bd313fc6 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -82,6 +82,7 @@ struct ad5755_chip_info {
* @pwr_down: bitmask which contains hether a channel is powered down or not
* @ctrl: software shadow of the channel ctrl registers
* @channels: iio channel spec for the device
+ * @lock lock to protect the data buffer during SPI ops
* @data: spi transfer buffers
*/
struct ad5755_state {
@@ -90,6 +91,7 @@ struct ad5755_state {
unsigned int pwr_down;
unsigned int ctrl[AD5755_NUM_CHANNELS];
struct iio_chan_spec channels[AD5755_NUM_CHANNELS];
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -174,11 +176,12 @@ static int ad5755_write_ctrl_unlocked(struct iio_dev *indio_dev,
static int ad5755_write(struct iio_dev *indio_dev, unsigned int reg,
unsigned int val)
{
+ struct ad5755_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5755_write_unlocked(indio_dev, reg, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -186,11 +189,12 @@ static int ad5755_write(struct iio_dev *indio_dev, unsigned int reg,
static int ad5755_write_ctrl(struct iio_dev *indio_dev, unsigned int channel,
unsigned int reg, unsigned int val)
{
+ struct ad5755_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5755_write_ctrl_unlocked(indio_dev, channel, reg, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -211,7 +215,7 @@ static int ad5755_read(struct iio_dev *indio_dev, unsigned int addr)
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32(AD5755_READ_FLAG | (addr << 16));
st->data[1].d32 = cpu_to_be32(AD5755_NOOP);
@@ -220,7 +224,7 @@ static int ad5755_read(struct iio_dev *indio_dev, unsigned int addr)
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -246,7 +250,7 @@ static int ad5755_set_channel_pwr_down(struct iio_dev *indio_dev,
struct ad5755_state *st = iio_priv(indio_dev);
unsigned int mask = BIT(channel);
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if ((bool)(st->pwr_down & mask) == pwr_down)
goto out_unlock;
@@ -266,7 +270,7 @@ static int ad5755_set_channel_pwr_down(struct iio_dev *indio_dev,
}
out_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return 0;
}
@@ -746,6 +750,8 @@ static int ad5755_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->num_channels = AD5755_NUM_CHANNELS;
+ mutex_init(&st->lock);
+
if (spi->dev.of_node)
pdata = ad5755_parse_dt(&spi->dev);
else
diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c
index 4fb42b743f0f..67c4fa75c6f1 100644
--- a/drivers/iio/dac/ad5761.c
+++ b/drivers/iio/dac/ad5761.c
@@ -3,7 +3,7 @@
* AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter
*
* Copyright 2016 Qtechnology A/S
- * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ * 2016 Ricardo Ribalda <ribalda@kernel.org>
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -57,11 +57,13 @@ enum ad5761_supported_device_ids {
* @use_intref: true when the internal voltage reference is used
* @vref: actual voltage reference in mVolts
* @range: output range mode used
+ * @lock lock to protect the data buffer during SPI ops
* @data: cache aligned spi buffer
*/
struct ad5761_state {
struct spi_device *spi;
struct regulator *vref_reg;
+ struct mutex lock;
bool use_intref;
int vref;
@@ -124,9 +126,9 @@ static int ad5761_spi_write(struct iio_dev *indio_dev, u8 addr, u16 val)
struct ad5761_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = _ad5761_spi_write(st, addr, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -163,9 +165,9 @@ static int ad5761_spi_read(struct iio_dev *indio_dev, u8 addr, u16 *val)
struct ad5761_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = _ad5761_spi_read(st, addr, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -368,6 +370,8 @@ static int ad5761_probe(struct spi_device *spi)
if (pdata)
voltage_range = pdata->voltage_range;
+ mutex_init(&st->lock);
+
ret = ad5761_spi_set_range(st, voltage_range);
if (ret)
goto disable_regulator_err;
@@ -423,6 +427,6 @@ static struct spi_driver ad5761_driver = {
};
module_spi_driver(ad5761_driver);
-MODULE_AUTHOR("Ricardo Ribalda <ricardo.ribalda@gmail.com>");
+MODULE_AUTHOR("Ricardo Ribalda <ribalda@kernel.org>");
MODULE_DESCRIPTION("Analog Devices AD5721, AD5721R, AD5761, AD5761R driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index f7ab211604a1..5b0f0fe354f6 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -46,6 +46,7 @@ struct ad5764_chip_info {
* @spi: spi_device
* @chip_info: chip info
* @vref_reg: vref supply regulators
+ * @lock lock to protect the data buffer during SPI ops
* @data: spi transfer buffers
*/
@@ -53,6 +54,7 @@ struct ad5764_state {
struct spi_device *spi;
const struct ad5764_chip_info *chip_info;
struct regulator_bulk_data vref_reg[2];
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -126,11 +128,11 @@ static int ad5764_write(struct iio_dev *indio_dev, unsigned int reg,
struct ad5764_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32((reg << 16) | val);
ret = spi_write(st->spi, &st->data[0].d8[1], 3);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -151,7 +153,7 @@ static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
@@ -159,7 +161,7 @@ static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
if (ret >= 0)
*val = be32_to_cpu(st->data[1].d32) & 0xffff;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -295,6 +297,8 @@ static int ad5764_probe(struct spi_device *spi)
indio_dev->num_channels = AD5764_NUM_CHANNELS;
indio_dev->channels = st->chip_info->channels;
+ mutex_init(&st->lock);
+
if (st->chip_info->int_vref == 0) {
st->vref_reg[0].supply = "vrefAB";
st->vref_reg[1].supply = "vrefCD";
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index 7adc91056aa1..f891311f05cf 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -12,6 +12,8 @@
#include <linux/iio/iio.h>
#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
#define LTC2632_CMD_WRITE_INPUT_N 0x0
#define LTC2632_CMD_UPDATE_DAC_N 0x1
#define LTC2632_CMD_WRITE_INPUT_N_UPDATE_ALL 0x2
@@ -24,6 +26,7 @@
/**
* struct ltc2632_chip_info - chip specific information
* @channels: channel spec for the DAC
+ * @num_channels: DAC channel count of the chip
* @vref_mv: internal reference voltage
*/
struct ltc2632_chip_info {
@@ -53,6 +56,12 @@ enum ltc2632_supported_device_ids {
ID_LTC2632H12,
ID_LTC2632H10,
ID_LTC2632H8,
+ ID_LTC2634L12,
+ ID_LTC2634L10,
+ ID_LTC2634L8,
+ ID_LTC2634H12,
+ ID_LTC2634H10,
+ ID_LTC2634H8,
ID_LTC2636L12,
ID_LTC2636L10,
ID_LTC2636L8,
@@ -75,9 +84,7 @@ static int ltc2632_spi_write(struct spi_device *spi,
* 10-, 8-bit input code followed by 4, 6, or 8 don't care bits.
*/
data = (cmd << 20) | (addr << 16) | (val << shift);
- msg[0] = data >> 16;
- msg[1] = data >> 8;
- msg[2] = data;
+ put_unaligned_be24(data, &msg[0]);
return spi_write(spi, msg, sizeof(msg));
}
@@ -235,6 +242,36 @@ static const struct ltc2632_chip_info ltc2632_chip_info_tbl[] = {
.num_channels = 2,
.vref_mv = 4096,
},
+ [ID_LTC2634L12] = {
+ .channels = ltc2632x12_channels,
+ .num_channels = 4,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2634L10] = {
+ .channels = ltc2632x10_channels,
+ .num_channels = 4,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2634L8] = {
+ .channels = ltc2632x8_channels,
+ .num_channels = 4,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2634H12] = {
+ .channels = ltc2632x12_channels,
+ .num_channels = 4,
+ .vref_mv = 4096,
+ },
+ [ID_LTC2634H10] = {
+ .channels = ltc2632x10_channels,
+ .num_channels = 4,
+ .vref_mv = 4096,
+ },
+ [ID_LTC2634H8] = {
+ .channels = ltc2632x8_channels,
+ .num_channels = 4,
+ .vref_mv = 4096,
+ },
[ID_LTC2636L12] = {
.channels = ltc2632x12_channels,
.num_channels = 8,
@@ -356,6 +393,12 @@ static const struct spi_device_id ltc2632_id[] = {
{ "ltc2632-h12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H12] },
{ "ltc2632-h10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H10] },
{ "ltc2632-h8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H8] },
+ { "ltc2634-l12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634L12] },
+ { "ltc2634-l10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634L10] },
+ { "ltc2634-l8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634L8] },
+ { "ltc2634-h12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634H12] },
+ { "ltc2634-h10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634H10] },
+ { "ltc2634-h8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634H8] },
{ "ltc2636-l12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636L12] },
{ "ltc2636-l10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636L10] },
{ "ltc2636-l8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636L8] },
@@ -386,6 +429,24 @@ static const struct of_device_id ltc2632_of_match[] = {
.compatible = "lltc,ltc2632-h8",
.data = &ltc2632_chip_info_tbl[ID_LTC2632H8]
}, {
+ .compatible = "lltc,ltc2634-l12",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634L12]
+ }, {
+ .compatible = "lltc,ltc2634-l10",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634L10]
+ }, {
+ .compatible = "lltc,ltc2634-l8",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634L8]
+ }, {
+ .compatible = "lltc,ltc2634-h12",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634H12]
+ }, {
+ .compatible = "lltc,ltc2634-h10",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634H10]
+ }, {
+ .compatible = "lltc,ltc2634-h8",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634H8]
+ }, {
.compatible = "lltc,ltc2636-l12",
.data = &ltc2632_chip_info_tbl[ID_LTC2636L12]
}, {
diff --git a/drivers/iio/dac/ti-dac7612.c b/drivers/iio/dac/ti-dac7612.c
index c46805144dd4..de0c6573cd97 100644
--- a/drivers/iio/dac/ti-dac7612.c
+++ b/drivers/iio/dac/ti-dac7612.c
@@ -3,7 +3,7 @@
* DAC7612 Dual, 12-Bit Serial input Digital-to-Analog Converter
*
* Copyright 2019 Qtechnology A/S
- * 2019 Ricardo Ribalda <ricardo@ribalda.com>
+ * 2019 Ricardo Ribalda <ribalda@kernel.org>
*
* Licensed under the GPL-2.
*/
@@ -179,6 +179,6 @@ static struct spi_driver dac7612_driver = {
};
module_spi_driver(dac7612_driver);
-MODULE_AUTHOR("Ricardo Ribalda <ricardo@ribalda.com>");
+MODULE_AUTHOR("Ricardo Ribalda <ribalda@kernel.org>");
MODULE_DESCRIPTION("Texas Instruments DAC7612 DAC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index 7f1e9317c3f3..9417a4a3e22a 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -36,6 +36,7 @@ struct vf610_dac {
struct device *dev;
enum vf610_conversion_mode_sel conv_mode;
void __iomem *regs;
+ struct mutex lock;
};
static void vf610_dac_init(struct vf610_dac *info)
@@ -64,7 +65,7 @@ static int vf610_set_conversion_mode(struct iio_dev *indio_dev,
struct vf610_dac *info = iio_priv(indio_dev);
int val;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&info->lock);
info->conv_mode = mode;
val = readl(info->regs + VF610_DACx_STATCTRL);
if (mode)
@@ -72,7 +73,7 @@ static int vf610_set_conversion_mode(struct iio_dev *indio_dev,
else
val &= ~VF610_DAC_LPEN;
writel(val, info->regs + VF610_DACx_STATCTRL);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&info->lock);
return 0;
}
@@ -147,9 +148,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&info->lock);
writel(VF610_DAC_DAT0(val), info->regs);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&info->lock);
return 0;
default:
@@ -205,6 +206,8 @@ static int vf610_dac_probe(struct platform_device *pdev)
indio_dev->channels = vf610_dac_iio_channels;
indio_dev->num_channels = ARRAY_SIZE(vf610_dac_iio_channels);
+ mutex_init(&info->lock);
+
ret = clk_prepare_enable(info->clk);
if (ret) {
dev_err(&pdev->dev,
diff --git a/drivers/iio/dummy/iio_dummy_evgen.c b/drivers/iio/dummy/iio_dummy_evgen.c
index a6edf30567aa..ee85d596e528 100644
--- a/drivers/iio/dummy/iio_dummy_evgen.c
+++ b/drivers/iio/dummy/iio_dummy_evgen.c
@@ -37,8 +37,7 @@ struct iio_dummy_eventgen {
struct iio_dummy_regs regs[IIO_EVENTGEN_NO];
struct mutex lock;
bool inuse[IIO_EVENTGEN_NO];
- struct irq_sim irq_sim;
- int base;
+ struct irq_domain *irq_sim_domain;
};
/* We can only ever have one instance of this 'device' */
@@ -52,13 +51,14 @@ static int iio_dummy_evgen_create(void)
if (!iio_evgen)
return -ENOMEM;
- ret = irq_sim_init(&iio_evgen->irq_sim, IIO_EVENTGEN_NO);
- if (ret < 0) {
+ iio_evgen->irq_sim_domain = irq_domain_create_sim(NULL,
+ IIO_EVENTGEN_NO);
+ if (IS_ERR(iio_evgen->irq_sim_domain)) {
+ ret = PTR_ERR(iio_evgen->irq_sim_domain);
kfree(iio_evgen);
return ret;
}
- iio_evgen->base = irq_sim_irqnum(&iio_evgen->irq_sim, 0);
mutex_init(&iio_evgen->lock);
return 0;
@@ -80,7 +80,7 @@ int iio_dummy_evgen_get_irq(void)
mutex_lock(&iio_evgen->lock);
for (i = 0; i < IIO_EVENTGEN_NO; i++) {
if (!iio_evgen->inuse[i]) {
- ret = irq_sim_irqnum(&iio_evgen->irq_sim, i);
+ ret = irq_create_mapping(iio_evgen->irq_sim_domain, i);
iio_evgen->inuse[i] = true;
break;
}
@@ -101,21 +101,27 @@ EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_irq);
*/
void iio_dummy_evgen_release_irq(int irq)
{
+ struct irq_data *irqd = irq_get_irq_data(irq);
+
mutex_lock(&iio_evgen->lock);
- iio_evgen->inuse[irq - iio_evgen->base] = false;
+ iio_evgen->inuse[irqd_to_hwirq(irqd)] = false;
+ irq_dispose_mapping(irq);
mutex_unlock(&iio_evgen->lock);
}
EXPORT_SYMBOL_GPL(iio_dummy_evgen_release_irq);
struct iio_dummy_regs *iio_dummy_evgen_get_regs(int irq)
{
- return &iio_evgen->regs[irq - iio_evgen->base];
+ struct irq_data *irqd = irq_get_irq_data(irq);
+
+ return &iio_evgen->regs[irqd_to_hwirq(irqd)];
+
}
EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_regs);
static void iio_dummy_evgen_free(void)
{
- irq_sim_fini(&iio_evgen->irq_sim);
+ irq_domain_remove_sim(iio_evgen->irq_sim_domain);
kfree(iio_evgen);
}
@@ -131,7 +137,7 @@ static ssize_t iio_evgen_poke(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
unsigned long event;
- int ret;
+ int ret, irq;
ret = kstrtoul(buf, 10, &event);
if (ret)
@@ -140,7 +146,10 @@ static ssize_t iio_evgen_poke(struct device *dev,
iio_evgen->regs[this_attr->address].reg_id = this_attr->address;
iio_evgen->regs[this_attr->address].reg_data = event;
- irq_sim_fire(&iio_evgen->irq_sim, this_attr->address);
+ irq = irq_find_mapping(iio_evgen->irq_sim_domain, this_attr->address);
+ ret = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true);
+ if (ret)
+ return ret;
return len;
}
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 7eaf77707b0b..6daeddf37f60 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -61,7 +61,7 @@ config BMG160
help
Say yes here to build support for BOSCH BMG160 Tri-axis Gyro Sensor
driver connected via I2C or SPI. This driver also supports BMI055
- gyroscope.
+ and BMI088 gyroscope.
This driver can also be built as a module. If so, the module
will be called bmg160_i2c or bmg160_spi.
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index 79e63c8a2ea8..2a9ec08ec561 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -12,6 +12,8 @@
#include <linux/iio/iio.h>
+#include <asm/unaligned.h>
+
#define ADIS16130_CON 0x0
#define ADIS16130_CON_RD (1 << 6)
#define ADIS16130_IOP 0x1
@@ -59,7 +61,7 @@ static int adis16130_spi_read(struct iio_dev *indio_dev, u8 reg_addr, u32 *val)
ret = spi_sync_transfer(st->us, &xfer, 1);
if (ret == 0)
- *val = (st->buf[1] << 16) | (st->buf[2] << 8) | st->buf[3];
+ *val = get_unaligned_be24(&st->buf[1]);
mutex_unlock(&st->buf_lock);
return ret;
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index a4c967a5fc5c..afdc57af475d 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -148,16 +148,14 @@ DEFINE_DEBUGFS_ATTRIBUTE(adis16136_flash_count_fops,
static int adis16136_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16136 *adis16136 = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
debugfs_create_file_unsafe("serial_number", 0400,
- indio_dev->debugfs_dentry, adis16136,
- &adis16136_serial_fops);
+ d, adis16136, &adis16136_serial_fops);
debugfs_create_file_unsafe("product_id", 0400,
- indio_dev->debugfs_dentry,
- adis16136, &adis16136_product_id_fops);
+ d, adis16136, &adis16136_product_id_fops);
debugfs_create_file_unsafe("flash_count", 0400,
- indio_dev->debugfs_dentry,
- adis16136, &adis16136_flash_count_fops);
+ d, adis16136, &adis16136_flash_count_fops);
return 0;
}
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index 4fc9c6a3321f..b3fa46bd02cb 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -21,8 +21,8 @@ static int bmg160_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &bmg160_regmap_i2c_conf);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
@@ -42,6 +42,7 @@ static int bmg160_i2c_remove(struct i2c_client *client)
static const struct acpi_device_id bmg160_acpi_match[] = {
{"BMG0160", 0},
{"BMI055B", 0},
+ {"BMI088B", 0},
{},
};
@@ -50,6 +51,7 @@ MODULE_DEVICE_TABLE(acpi, bmg160_acpi_match);
static const struct i2c_device_id bmg160_i2c_id[] = {
{"bmg160", 0},
{"bmi055_gyro", 0},
+ {"bmi088_gyro", 0},
{}
};
diff --git a/drivers/iio/gyro/bmg160_spi.c b/drivers/iio/gyro/bmg160_spi.c
index 182a59c42507..745962e1e423 100644
--- a/drivers/iio/gyro/bmg160_spi.c
+++ b/drivers/iio/gyro/bmg160_spi.c
@@ -19,8 +19,8 @@ static int bmg160_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &bmg160_regmap_spi_conf);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
@@ -37,6 +37,7 @@ static int bmg160_spi_remove(struct spi_device *spi)
static const struct spi_device_id bmg160_spi_id[] = {
{"bmg160", 0},
{"bmi055_gyro", 0},
+ {"bmi088_gyro", 0},
{}
};
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 08cacbbf31e6..7f382aae1dfd 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum gyro_3d_channel {
@@ -326,18 +324,13 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&gyro_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&gyro_state->common_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -361,9 +354,7 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&gyro_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &gyro_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -378,8 +369,7 @@ static int hid_gyro_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&gyro_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &gyro_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index afa8018b9238..ef5bcbc4b45b 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -51,8 +51,8 @@ static int mpu3050_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &mpu3050_i2c_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c
index 7465ad62391c..9c92ff7a82be 100644
--- a/drivers/iio/gyro/st_gyro_buffer.c
+++ b/drivers/iio/gyro/st_gyro_buffer.c
@@ -37,8 +37,7 @@ static int st_gyro_buffer_postenable(struct iio_dev *indio_dev)
if (err < 0)
return err;
- err = st_sensors_set_axis_enable(indio_dev,
- (u8)indio_dev->active_scan_mask[0]);
+ err = st_sensors_set_axis_enable(indio_dev, indio_dev->active_scan_mask[0]);
if (err < 0)
goto st_gyro_buffer_predisable;
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 26c50b24bc08..c8aa051995d3 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -460,6 +460,7 @@ EXPORT_SYMBOL(st_gyro_get_settings);
int st_gyro_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *gdata = iio_priv(indio_dev);
+ struct st_sensors_platform_data *pdata;
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -477,12 +478,12 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
indio_dev->channels = gdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
- gdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &gdata->sensor_settings->fs.fs_avl[0];
+ gdata->current_fullscale = &gdata->sensor_settings->fs.fs_avl[0];
gdata->odr = gdata->sensor_settings->odr.odr_avl[0].hz;
- err = st_sensors_init_sensor(indio_dev,
- (struct st_sensors_platform_data *)&gyro_pdata);
+ pdata = (struct st_sensors_platform_data *)&gyro_pdata;
+
+ err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
goto st_gyro_power_off;
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index dc22dc363a99..e9f87e42ff4f 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -23,6 +23,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
+#include <asm/unaligned.h>
+
#include "afe440x.h"
#define AFE4403_DRIVER_NAME "afe4403"
@@ -220,13 +222,11 @@ static int afe4403_read(struct afe4403_data *afe, unsigned int reg, u32 *val)
if (ret)
return ret;
- ret = spi_write_then_read(afe->spi, &reg, 1, rx, 3);
+ ret = spi_write_then_read(afe->spi, &reg, 1, rx, sizeof(rx));
if (ret)
return ret;
- *val = (rx[0] << 16) |
- (rx[1] << 8) |
- (rx[2]);
+ *val = get_unaligned_be24(&rx[0]);
/* Disable reading from the device */
tx[3] = AFE440X_CONTROL0_WRITE;
@@ -322,13 +322,11 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
indio_dev->masklength) {
ret = spi_write_then_read(afe->spi,
&afe4403_channel_values[bit], 1,
- rx, 3);
+ rx, sizeof(rx));
if (ret)
goto err;
- buffer[i++] = (rx[0] << 16) |
- (rx[1] << 8) |
- (rx[2]);
+ buffer[i++] = get_unaligned_be24(&rx[0]);
}
/* Disable reading from the device */
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 84010501762d..546fc37ad75d 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -16,7 +16,7 @@
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -267,11 +267,10 @@ static int max30100_get_current_idx(unsigned int val, int *reg)
static int max30100_led_init(struct max30100_data *data)
{
struct device *dev = &data->client->dev;
- struct device_node *np = dev->of_node;
unsigned int val[2];
int reg, ret;
- ret = of_property_read_u32_array(np, "maxim,led-current-microamp",
+ ret = device_property_read_u32_array(dev, "maxim,led-current-microamp",
(unsigned int *) &val, 2);
if (ret) {
/* Default to 24 mA RED LED, 50 mA IR LED */
@@ -502,7 +501,7 @@ MODULE_DEVICE_TABLE(of, max30100_dt_ids);
static struct i2c_driver max30100_driver = {
.driver = {
.name = MAX30100_DRV_NAME,
- .of_match_table = of_match_ptr(max30100_dt_ids),
+ .of_match_table = max30100_dt_ids,
},
.probe = max30100_probe,
.remove = max30100_remove,
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
index c99b54b0568d..d2318c4aab0f 100644
--- a/drivers/iio/humidity/hid-sensor-humidity.c
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -7,8 +7,6 @@
#include <linux/hid-sensor-hub.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
-#include <linux/iio/triggered_buffer.h>
-#include <linux/iio/trigger_consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -233,12 +231,8 @@ static int hid_humidity_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = devm_iio_triggered_buffer_setup(&pdev->dev, indio_dev,
- &iio_pollfunc_store_time, NULL, NULL);
- if (ret)
- return ret;
-
atomic_set(&humid_st->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&humid_st->common_attributes);
if (ret)
@@ -261,7 +255,7 @@ static int hid_humidity_probe(struct platform_device *pdev)
error_remove_callback:
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_HUMIDITY);
error_remove_trigger:
- hid_sensor_remove_trigger(&humid_st->common_attributes);
+ hid_sensor_remove_trigger(indio_dev, &humid_st->common_attributes);
return ret;
}
@@ -274,7 +268,7 @@ static int hid_humidity_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_HUMIDITY);
- hid_sensor_remove_trigger(&humid_st->common_attributes);
+ hid_sensor_remove_trigger(indio_dev, &humid_st->common_attributes);
return 0;
}
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
index 81d50a861c22..9fb3f33614d4 100644
--- a/drivers/iio/humidity/hts221_buffer.c
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -74,10 +74,9 @@ static irqreturn_t hts221_trigger_handler_thread(int irq, void *private)
int hts221_allocate_trigger(struct hts221_hw *hw)
{
+ struct st_sensors_platform_data *pdata = dev_get_platdata(hw->dev);
struct iio_dev *iio_dev = iio_priv_to_dev(hw);
bool irq_active_low = false, open_drain = false;
- struct device_node *np = hw->dev->of_node;
- struct st_sensors_platform_data *pdata;
unsigned long irq_type;
int err;
@@ -106,8 +105,7 @@ int hts221_allocate_trigger(struct hts221_hw *hw)
if (err < 0)
return err;
- pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
- if ((np && of_property_read_bool(np, "drive-open-drain")) ||
+ if (device_property_read_bool(hw->dev, "drive-open-drain") ||
(pdata && pdata->open_drain)) {
irq_type |= IRQF_SHARED;
open_drain = true;
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
index 4272b7030c44..cab39c4756f8 100644
--- a/drivers/iio/humidity/hts221_i2c.c
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -32,8 +32,8 @@ static int hts221_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &hts221_i2c_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap %ld\n",
+ PTR_ERR(regmap));
return PTR_ERR(regmap);
}
@@ -63,7 +63,7 @@ static struct i2c_driver hts221_driver = {
.driver = {
.name = "hts221_i2c",
.pm = &hts221_pm_ops,
- .of_match_table = of_match_ptr(hts221_i2c_of_match),
+ .of_match_table = hts221_i2c_of_match,
.acpi_match_table = ACPI_PTR(hts221_acpi_match),
},
.probe = hts221_i2c_probe,
diff --git a/drivers/iio/humidity/hts221_spi.c b/drivers/iio/humidity/hts221_spi.c
index 055dba8897d2..729e86e433b1 100644
--- a/drivers/iio/humidity/hts221_spi.c
+++ b/drivers/iio/humidity/hts221_spi.c
@@ -31,8 +31,8 @@ static int hts221_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &hts221_spi_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap %ld\n",
+ PTR_ERR(regmap));
return PTR_ERR(regmap);
}
@@ -56,7 +56,7 @@ static struct spi_driver hts221_driver = {
.driver = {
.name = "hts221_spi",
.pm = &hts221_pm_ops,
- .of_match_table = of_match_ptr(hts221_spi_of_match),
+ .of_match_table = hts221_spi_of_match,
},
.probe = hts221_spi_probe,
.id_table = hts221_spi_id_table,
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 60bb1029e759..fc4123d518bc 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -29,6 +29,19 @@ config ADIS16460
To compile this driver as a module, choose M here: the module will be
called adis16460.
+config ADIS16475
+ tristate "Analog Devices ADIS16475 and similar IMU driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for Analog Devices ADIS16470, ADIS16475,
+ ADIS16477, ADIS16465, ADIS16467, ADIS16500, ADIS16505, ADIS16507 inertial
+ sensors.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adis16475.
+
config ADIS16480
tristate "Analog Devices ADIS16480 and similar IMU driver"
depends on SPI
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 5237fd4bc384..88b2c4555230 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ADIS16400) += adis16400.o
obj-$(CONFIG_ADIS16460) += adis16460.o
+obj-$(CONFIG_ADIS16475) += adis16475.o
obj-$(CONFIG_ADIS16480) += adis16480.o
adis_lib-y += adis.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index a8afd01de4f3..c539dfa3b8d3 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -223,6 +223,31 @@ int __adis_read_reg(struct adis *adis, unsigned int reg,
return ret;
}
EXPORT_SYMBOL_GPL(__adis_read_reg);
+/**
+ * __adis_update_bits_base() - ADIS Update bits function - Unlocked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ * @size: Size of the register to update
+ *
+ * Updates the desired bits of @reg in accordance with @mask and @val.
+ */
+int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
+ const u32 val, u8 size)
+{
+ int ret;
+ u32 __val;
+
+ ret = __adis_read_reg(adis, reg, &__val, size);
+ if (ret)
+ return ret;
+
+ __val = (__val & ~mask) | (val & mask);
+
+ return __adis_write_reg(adis, reg, __val, size);
+}
+EXPORT_SYMBOL_GPL(__adis_update_bits_base);
#ifdef CONFIG_DEBUG_FS
@@ -419,7 +444,7 @@ int __adis_initial_startup(struct adis *adis)
if (prod_id != adis->data->prod_id)
dev_warn(&adis->spi->dev,
- "Device ID(%u) and product ID(%u) do not match.",
+ "Device ID(%u) and product ID(%u) do not match.\n",
adis->data->prod_id, prod_id);
return 0;
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 05e70c1c4835..229f2ff98469 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -258,7 +258,7 @@ static int adis16400_show_product_id(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16400_product_id_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16400_product_id_fops,
adis16400_show_product_id, NULL, "%lld\n");
static int adis16400_show_flash_count(void *arg, u64 *val)
@@ -275,23 +275,22 @@ static int adis16400_show_flash_count(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16400_flash_count_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16400_flash_count_fops,
adis16400_show_flash_count, NULL, "%lld\n");
static int adis16400_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16400_state *st = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
if (st->variant->flags & ADIS16400_HAS_SERIAL_NUMBER)
- debugfs_create_file("serial_number", 0400,
- indio_dev->debugfs_dentry, st,
- &adis16400_serial_number_fops);
+ debugfs_create_file_unsafe("serial_number", 0400,
+ d, st, &adis16400_serial_number_fops);
if (st->variant->flags & ADIS16400_HAS_PROD_ID)
- debugfs_create_file("product_id", 0400,
- indio_dev->debugfs_dentry, st,
- &adis16400_product_id_fops);
- debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
- st, &adis16400_flash_count_fops);
+ debugfs_create_file_unsafe("product_id", 0400,
+ d, st, &adis16400_product_id_fops);
+ debugfs_create_file_unsafe("flash_count", 0400,
+ d, st, &adis16400_flash_count_fops);
return 0;
}
@@ -1194,7 +1193,7 @@ static int adis16400_probe(struct spi_device *spi)
indio_dev->available_scan_masks = st->avail_scan_mask;
st->adis.burst = &adis16400_burst;
if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
- st->adis.burst->extra_len = sizeof(u16);
+ st->adis.burst_extra_len = sizeof(u16);
}
adis16400_data = &st->variant->adis_data;
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index 0027683d0256..ad20c488a3ba 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -87,8 +87,8 @@ static int adis16460_show_serial_number(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16460_serial_number_fops,
- adis16460_show_serial_number, NULL, "0x%.4llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(adis16460_serial_number_fops,
+ adis16460_show_serial_number, NULL, "0x%.4llx\n");
static int adis16460_show_product_id(void *arg, u64 *val)
{
@@ -105,8 +105,8 @@ static int adis16460_show_product_id(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16460_product_id_fops,
- adis16460_show_product_id, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(adis16460_product_id_fops,
+ adis16460_show_product_id, NULL, "%llu\n");
static int adis16460_show_flash_count(void *arg, u64 *val)
{
@@ -123,19 +123,20 @@ static int adis16460_show_flash_count(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16460_flash_count_fops,
- adis16460_show_flash_count, NULL, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(adis16460_flash_count_fops,
+ adis16460_show_flash_count, NULL, "%lld\n");
static int adis16460_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16460 *adis16460 = iio_priv(indio_dev);
-
- debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry,
- adis16460, &adis16460_serial_number_fops);
- debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry,
- adis16460, &adis16460_product_id_fops);
- debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
- adis16460, &adis16460_flash_count_fops);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+
+ debugfs_create_file_unsafe("serial_number", 0400,
+ d, adis16460, &adis16460_serial_number_fops);
+ debugfs_create_file_unsafe("product_id", 0400,
+ d, adis16460, &adis16460_product_id_fops);
+ debugfs_create_file_unsafe("flash_count", 0400,
+ d, adis16460, &adis16460_flash_count_fops);
return 0;
}
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
new file mode 100644
index 000000000000..c6dac4fc67a1
--- /dev/null
+++ b/drivers/iio/imu/adis16475.c
@@ -0,0 +1,1338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADIS16475 IMU driver
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/imu/adis.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#define ADIS16475_REG_DIAG_STAT 0x02
+#define ADIS16475_REG_X_GYRO_L 0x04
+#define ADIS16475_REG_Y_GYRO_L 0x08
+#define ADIS16475_REG_Z_GYRO_L 0x0C
+#define ADIS16475_REG_X_ACCEL_L 0x10
+#define ADIS16475_REG_Y_ACCEL_L 0x14
+#define ADIS16475_REG_Z_ACCEL_L 0x18
+#define ADIS16475_REG_TEMP_OUT 0x1c
+#define ADIS16475_REG_X_GYRO_BIAS_L 0x40
+#define ADIS16475_REG_Y_GYRO_BIAS_L 0x44
+#define ADIS16475_REG_Z_GYRO_BIAS_L 0x48
+#define ADIS16475_REG_X_ACCEL_BIAS_L 0x4c
+#define ADIS16475_REG_Y_ACCEL_BIAS_L 0x50
+#define ADIS16475_REG_Z_ACCEL_BIAS_L 0x54
+#define ADIS16475_REG_FILT_CTRL 0x5c
+#define ADIS16475_FILT_CTRL_MASK GENMASK(2, 0)
+#define ADIS16475_FILT_CTRL(x) FIELD_PREP(ADIS16475_FILT_CTRL_MASK, x)
+#define ADIS16475_REG_MSG_CTRL 0x60
+#define ADIS16475_MSG_CTRL_DR_POL_MASK BIT(0)
+#define ADIS16475_MSG_CTRL_DR_POL(x) \
+ FIELD_PREP(ADIS16475_MSG_CTRL_DR_POL_MASK, x)
+#define ADIS16475_SYNC_MODE_MASK GENMASK(4, 2)
+#define ADIS16475_SYNC_MODE(x) FIELD_PREP(ADIS16475_SYNC_MODE_MASK, x)
+#define ADIS16475_REG_UP_SCALE 0x62
+#define ADIS16475_REG_DEC_RATE 0x64
+#define ADIS16475_REG_GLOB_CMD 0x68
+#define ADIS16475_REG_FIRM_REV 0x6c
+#define ADIS16475_REG_FIRM_DM 0x6e
+#define ADIS16475_REG_FIRM_Y 0x70
+#define ADIS16475_REG_PROD_ID 0x72
+#define ADIS16475_REG_SERIAL_NUM 0x74
+#define ADIS16475_REG_FLASH_CNT 0x7c
+#define ADIS16500_BURST32_MASK BIT(9)
+#define ADIS16500_BURST32(x) FIELD_PREP(ADIS16500_BURST32_MASK, x)
+/* number of data elements in burst mode */
+#define ADIS16475_BURST32_MAX_DATA 32
+#define ADIS16475_BURST_MAX_DATA 20
+#define ADIS16475_MAX_SCAN_DATA 20
+/* spi max speed in brust mode */
+#define ADIS16475_BURST_MAX_SPEED 1000000
+#define ADIS16475_LSB_DEC_MASK BIT(0)
+#define ADIS16475_LSB_FIR_MASK BIT(1)
+
+enum {
+ ADIS16475_SYNC_DIRECT = 1,
+ ADIS16475_SYNC_SCALED,
+ ADIS16475_SYNC_OUTPUT,
+ ADIS16475_SYNC_PULSE = 5,
+};
+
+struct adis16475_sync {
+ u16 sync_mode;
+ u16 min_rate;
+ u16 max_rate;
+};
+
+struct adis16475_chip_info {
+ const struct iio_chan_spec *channels;
+ const struct adis16475_sync *sync;
+ const struct adis_data adis_data;
+ const char *name;
+ u32 num_channels;
+ u32 gyro_max_val;
+ u32 gyro_max_scale;
+ u32 accel_max_val;
+ u32 accel_max_scale;
+ u32 temp_scale;
+ u32 int_clk;
+ u16 max_dec;
+ u8 num_sync;
+ bool has_burst32;
+};
+
+struct adis16475 {
+ const struct adis16475_chip_info *info;
+ struct adis adis;
+ u32 clk_freq;
+ bool burst32;
+ unsigned long lsb_flag;
+ /* Alignment needed for the timestamp */
+ __be16 data[ADIS16475_MAX_SCAN_DATA] __aligned(8);
+};
+
+enum {
+ ADIS16475_SCAN_GYRO_X,
+ ADIS16475_SCAN_GYRO_Y,
+ ADIS16475_SCAN_GYRO_Z,
+ ADIS16475_SCAN_ACCEL_X,
+ ADIS16475_SCAN_ACCEL_Y,
+ ADIS16475_SCAN_ACCEL_Z,
+ ADIS16475_SCAN_TEMP,
+ ADIS16475_SCAN_DIAG_S_FLAGS,
+ ADIS16475_SCAN_CRC_FAILURE,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t adis16475_show_firmware_revision(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct adis16475 *st = file->private_data;
+ char buf[7];
+ size_t len;
+ u16 rev;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FIRM_REV, &rev);
+ if (ret)
+ return ret;
+
+ len = scnprintf(buf, sizeof(buf), "%x.%x\n", rev >> 8, rev & 0xff);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16475_firmware_revision_fops = {
+ .open = simple_open,
+ .read = adis16475_show_firmware_revision,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t adis16475_show_firmware_date(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct adis16475 *st = file->private_data;
+ u16 md, year;
+ char buf[12];
+ size_t len;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FIRM_Y, &year);
+ if (ret)
+ return ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FIRM_DM, &md);
+ if (ret)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf), "%.2x-%.2x-%.4x\n", md >> 8, md & 0xff,
+ year);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16475_firmware_date_fops = {
+ .open = simple_open,
+ .read = adis16475_show_firmware_date,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int adis16475_show_serial_number(void *arg, u64 *val)
+{
+ struct adis16475 *st = arg;
+ u16 serial;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_SERIAL_NUM, &serial);
+ if (ret)
+ return ret;
+
+ *val = serial;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16475_serial_number_fops,
+ adis16475_show_serial_number, NULL, "0x%.4llx\n");
+
+static int adis16475_show_product_id(void *arg, u64 *val)
+{
+ struct adis16475 *st = arg;
+ u16 prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_PROD_ID, &prod_id);
+ if (ret)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16475_product_id_fops,
+ adis16475_show_product_id, NULL, "%llu\n");
+
+static int adis16475_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16475 *st = arg;
+ u32 flash_count;
+ int ret;
+
+ ret = adis_read_reg_32(&st->adis, ADIS16475_REG_FLASH_CNT,
+ &flash_count);
+ if (ret)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16475_flash_count_fops,
+ adis16475_show_flash_count, NULL, "%lld\n");
+
+static void adis16475_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16475 *st = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+
+ debugfs_create_file_unsafe("serial_number", 0400,
+ d, st, &adis16475_serial_number_fops);
+ debugfs_create_file_unsafe("product_id", 0400,
+ d, st, &adis16475_product_id_fops);
+ debugfs_create_file_unsafe("flash_count", 0400,
+ d, st, &adis16475_flash_count_fops);
+ debugfs_create_file("firmware_revision", 0400,
+ d, st, &adis16475_firmware_revision_fops);
+ debugfs_create_file("firmware_date", 0400, d,
+ st, &adis16475_firmware_date_fops);
+}
+#else
+static void adis16475_debugfs_init(struct iio_dev *indio_dev)
+{
+}
+#endif
+
+static int adis16475_get_freq(struct adis16475 *st, u32 *freq)
+{
+ int ret;
+ u16 dec;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, &dec);
+ if (ret)
+ return -EINVAL;
+
+ *freq = DIV_ROUND_CLOSEST(st->clk_freq, dec + 1);
+
+ return 0;
+}
+
+static int adis16475_set_freq(struct adis16475 *st, const u32 freq)
+{
+ u16 dec;
+ int ret;
+
+ if (!freq)
+ return -EINVAL;
+
+ dec = DIV_ROUND_CLOSEST(st->clk_freq, freq);
+
+ if (dec)
+ dec--;
+
+ if (dec > st->info->max_dec)
+ dec = st->info->max_dec;
+
+ ret = adis_write_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, dec);
+ if (ret)
+ return ret;
+
+ /*
+ * If decimation is used, then gyro and accel data will have meaningful
+ * bits on the LSB registers. This info is used on the trigger handler.
+ */
+ assign_bit(ADIS16475_LSB_DEC_MASK, &st->lsb_flag, dec);
+
+ return 0;
+}
+
+/* The values are approximated. */
+static const u32 adis16475_3db_freqs[] = {
+ [0] = 720, /* Filter disabled, full BW (~720Hz) */
+ [1] = 360,
+ [2] = 164,
+ [3] = 80,
+ [4] = 40,
+ [5] = 20,
+ [6] = 10,
+};
+
+static int adis16475_get_filter(struct adis16475 *st, u32 *filter)
+{
+ u16 filter_sz;
+ int ret;
+ const int mask = ADIS16475_FILT_CTRL_MASK;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FILT_CTRL, &filter_sz);
+ if (ret)
+ return ret;
+
+ *filter = adis16475_3db_freqs[filter_sz & mask];
+
+ return 0;
+}
+
+static int adis16475_set_filter(struct adis16475 *st, const u32 filter)
+{
+ int i = ARRAY_SIZE(adis16475_3db_freqs);
+ int ret;
+
+ while (--i) {
+ if (adis16475_3db_freqs[i] >= filter)
+ break;
+ }
+
+ ret = adis_write_reg_16(&st->adis, ADIS16475_REG_FILT_CTRL,
+ ADIS16475_FILT_CTRL(i));
+ if (ret)
+ return ret;
+
+ /*
+ * If FIR is used, then gyro and accel data will have meaningful
+ * bits on the LSB registers. This info is used on the trigger handler.
+ */
+ assign_bit(ADIS16475_LSB_FIR_MASK, &st->lsb_flag, i);
+
+ return 0;
+}
+
+static const u32 adis16475_calib_regs[] = {
+ [ADIS16475_SCAN_GYRO_X] = ADIS16475_REG_X_GYRO_BIAS_L,
+ [ADIS16475_SCAN_GYRO_Y] = ADIS16475_REG_Y_GYRO_BIAS_L,
+ [ADIS16475_SCAN_GYRO_Z] = ADIS16475_REG_Z_GYRO_BIAS_L,
+ [ADIS16475_SCAN_ACCEL_X] = ADIS16475_REG_X_ACCEL_BIAS_L,
+ [ADIS16475_SCAN_ACCEL_Y] = ADIS16475_REG_Y_ACCEL_BIAS_L,
+ [ADIS16475_SCAN_ACCEL_Z] = ADIS16475_REG_Z_ACCEL_BIAS_L,
+};
+
+static int adis16475_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ struct adis16475 *st = iio_priv(indio_dev);
+ int ret;
+ u32 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = st->info->gyro_max_val;
+ *val2 = st->info->gyro_max_scale;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_ACCEL:
+ *val = st->info->accel_max_val;
+ *val2 = st->info->accel_max_scale;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_TEMP:
+ *val = st->info->temp_scale;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = adis_read_reg_32(&st->adis,
+ adis16475_calib_regs[chan->scan_index],
+ val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ ret = adis16475_get_filter(st, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = adis16475_get_freq(st, &tmp);
+ if (ret)
+ return ret;
+
+ *val = tmp / 1000;
+ *val2 = (tmp % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16475_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long info)
+{
+ struct adis16475 *st = iio_priv(indio_dev);
+ u32 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ tmp = val * 1000 + val2 / 1000;
+ return adis16475_set_freq(st, tmp);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return adis16475_set_filter(st, val);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adis_write_reg_32(&st->adis,
+ adis16475_calib_regs[chan->scan_index],
+ val);
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ADIS16475_MOD_CHAN(_type, _mod, _address, _si, _r_bits, _s_bits) \
+ { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .address = (_address), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (_r_bits), \
+ .storagebits = (_s_bits), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16475_GYRO_CHANNEL(_mod) \
+ ADIS16475_MOD_CHAN(IIO_ANGL_VEL, IIO_MOD_ ## _mod, \
+ ADIS16475_REG_ ## _mod ## _GYRO_L, \
+ ADIS16475_SCAN_GYRO_ ## _mod, 32, 32)
+
+#define ADIS16475_ACCEL_CHANNEL(_mod) \
+ ADIS16475_MOD_CHAN(IIO_ACCEL, IIO_MOD_ ## _mod, \
+ ADIS16475_REG_ ## _mod ## _ACCEL_L, \
+ ADIS16475_SCAN_ACCEL_ ## _mod, 32, 32)
+
+#define ADIS16475_TEMP_CHANNEL() { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .address = ADIS16475_REG_TEMP_OUT, \
+ .scan_index = ADIS16475_SCAN_TEMP, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec adis16475_channels[] = {
+ ADIS16475_GYRO_CHANNEL(X),
+ ADIS16475_GYRO_CHANNEL(Y),
+ ADIS16475_GYRO_CHANNEL(Z),
+ ADIS16475_ACCEL_CHANNEL(X),
+ ADIS16475_ACCEL_CHANNEL(Y),
+ ADIS16475_ACCEL_CHANNEL(Z),
+ ADIS16475_TEMP_CHANNEL(),
+ IIO_CHAN_SOFT_TIMESTAMP(7)
+};
+
+enum adis16475_variant {
+ ADIS16470,
+ ADIS16475_1,
+ ADIS16475_2,
+ ADIS16475_3,
+ ADIS16477_1,
+ ADIS16477_2,
+ ADIS16477_3,
+ ADIS16465_1,
+ ADIS16465_2,
+ ADIS16465_3,
+ ADIS16467_1,
+ ADIS16467_2,
+ ADIS16467_3,
+ ADIS16500,
+ ADIS16505_1,
+ ADIS16505_2,
+ ADIS16505_3,
+ ADIS16507_1,
+ ADIS16507_2,
+ ADIS16507_3,
+};
+
+enum {
+ ADIS16475_DIAG_STAT_DATA_PATH = 1,
+ ADIS16475_DIAG_STAT_FLASH_MEM,
+ ADIS16475_DIAG_STAT_SPI,
+ ADIS16475_DIAG_STAT_STANDBY,
+ ADIS16475_DIAG_STAT_SENSOR,
+ ADIS16475_DIAG_STAT_MEMORY,
+ ADIS16475_DIAG_STAT_CLK,
+};
+
+static const char * const adis16475_status_error_msgs[] = {
+ [ADIS16475_DIAG_STAT_DATA_PATH] = "Data Path Overrun",
+ [ADIS16475_DIAG_STAT_FLASH_MEM] = "Flash memory update failure",
+ [ADIS16475_DIAG_STAT_SPI] = "SPI communication error",
+ [ADIS16475_DIAG_STAT_STANDBY] = "Standby mode",
+ [ADIS16475_DIAG_STAT_SENSOR] = "Sensor failure",
+ [ADIS16475_DIAG_STAT_MEMORY] = "Memory failure",
+ [ADIS16475_DIAG_STAT_CLK] = "Clock error",
+};
+
+static int adis16475_enable_irq(struct adis *adis, bool enable)
+{
+ /*
+ * There is no way to gate the data-ready signal internally inside the
+ * ADIS16475. We can only control it's polarity...
+ */
+ if (enable)
+ enable_irq(adis->spi->irq);
+ else
+ disable_irq(adis->spi->irq);
+
+ return 0;
+}
+
+#define ADIS16475_DATA(_prod_id, _timeouts) \
+{ \
+ .msc_ctrl_reg = ADIS16475_REG_MSG_CTRL, \
+ .glob_cmd_reg = ADIS16475_REG_GLOB_CMD, \
+ .diag_stat_reg = ADIS16475_REG_DIAG_STAT, \
+ .prod_id_reg = ADIS16475_REG_PROD_ID, \
+ .prod_id = (_prod_id), \
+ .self_test_mask = BIT(2), \
+ .self_test_reg = ADIS16475_REG_GLOB_CMD, \
+ .cs_change_delay = 16, \
+ .read_delay = 5, \
+ .write_delay = 5, \
+ .status_error_msgs = adis16475_status_error_msgs, \
+ .status_error_mask = BIT(ADIS16475_DIAG_STAT_DATA_PATH) | \
+ BIT(ADIS16475_DIAG_STAT_FLASH_MEM) | \
+ BIT(ADIS16475_DIAG_STAT_SPI) | \
+ BIT(ADIS16475_DIAG_STAT_STANDBY) | \
+ BIT(ADIS16475_DIAG_STAT_SENSOR) | \
+ BIT(ADIS16475_DIAG_STAT_MEMORY) | \
+ BIT(ADIS16475_DIAG_STAT_CLK), \
+ .enable_irq = adis16475_enable_irq, \
+ .timeouts = (_timeouts), \
+}
+
+static const struct adis16475_sync adis16475_sync_mode[] = {
+ { ADIS16475_SYNC_OUTPUT },
+ { ADIS16475_SYNC_DIRECT, 1900, 2100 },
+ { ADIS16475_SYNC_SCALED, 1, 128 },
+ { ADIS16475_SYNC_PULSE, 1000, 2100 },
+};
+
+static const struct adis_timeout adis16475_timeouts = {
+ .reset_ms = 200,
+ .sw_reset_ms = 200,
+ .self_test_ms = 20,
+};
+
+static const struct adis_timeout adis1650x_timeouts = {
+ .reset_ms = 260,
+ .sw_reset_ms = 260,
+ .self_test_ms = 30,
+};
+
+static const struct adis16475_chip_info adis16475_chip_info[] = {
+ [ADIS16470] = {
+ .name = "adis16470",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16470, &adis16475_timeouts),
+ },
+ [ADIS16475_1] = {
+ .name = "adis16475-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts),
+ },
+ [ADIS16475_2] = {
+ .name = "adis16475-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts),
+ },
+ [ADIS16475_3] = {
+ .name = "adis16475-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts),
+ },
+ [ADIS16477_1] = {
+ .name = "adis16477-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
+ },
+ [ADIS16477_2] = {
+ .name = "adis16477-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
+ },
+ [ADIS16477_3] = {
+ .name = "adis16477-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
+ },
+ [ADIS16465_1] = {
+ .name = "adis16465-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts),
+ },
+ [ADIS16465_2] = {
+ .name = "adis16465-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts),
+ },
+ [ADIS16465_3] = {
+ .name = "adis16465-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts),
+ },
+ [ADIS16467_1] = {
+ .name = "adis16467-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts),
+ },
+ [ADIS16467_2] = {
+ .name = "adis16467-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts),
+ },
+ [ADIS16467_3] = {
+ .name = "adis16467-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts),
+ },
+ [ADIS16500] = {
+ .name = "adis16500",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 392,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16500, &adis1650x_timeouts),
+ },
+ [ADIS16505_1] = {
+ .name = "adis16505-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 78,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
+ },
+ [ADIS16505_2] = {
+ .name = "adis16505-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 78,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
+ },
+ [ADIS16505_3] = {
+ .name = "adis16505-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 78,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
+ },
+ [ADIS16507_1] = {
+ .name = "adis16507-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 392,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
+ },
+ [ADIS16507_2] = {
+ .name = "adis16507-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 392,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
+ },
+ [ADIS16507_3] = {
+ .name = "adis16507-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 392,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
+ },
+};
+
+static const struct iio_info adis16475_info = {
+ .read_raw = &adis16475_read_raw,
+ .write_raw = &adis16475_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
+};
+
+static struct adis_burst adis16475_burst = {
+ .en = true,
+ .reg_cmd = ADIS16475_REG_GLOB_CMD,
+ /*
+ * adis_update_scan_mode_burst() sets the burst length in respect with
+ * the number of channels and allocates 16 bits for each. However,
+ * adis1647x devices also need space for DIAG_STAT, DATA_CNTR or
+ * TIME_STAMP (depending on the clock mode but for us these bytes are
+ * don't care...) and CRC.
+ */
+ .extra_len = 3 * sizeof(u16),
+ .burst_max_len = ADIS16475_BURST32_MAX_DATA,
+};
+
+static bool adis16475_validate_crc(const u8 *buffer, u16 crc,
+ const bool burst32)
+{
+ int i;
+ /* extra 6 elements for low gyro and accel */
+ const u16 sz = burst32 ? ADIS16475_BURST32_MAX_DATA :
+ ADIS16475_BURST_MAX_DATA;
+
+ for (i = 0; i < sz - 2; i++)
+ crc -= buffer[i];
+
+ return crc == 0;
+}
+
+static void adis16475_burst32_check(struct adis16475 *st)
+{
+ int ret;
+ struct adis *adis = &st->adis;
+
+ if (!st->info->has_burst32)
+ return;
+
+ if (st->lsb_flag && !st->burst32) {
+ const u16 en = ADIS16500_BURST32(1);
+
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16500_BURST32_MASK, en);
+ if (ret)
+ return;
+
+ st->burst32 = true;
+
+ /*
+ * In 32-bit mode we need extra 2 bytes for all gyro
+ * and accel channels.
+ */
+ adis->burst_extra_len = 6 * sizeof(u16);
+ adis->xfer[1].len += 6 * sizeof(u16);
+ dev_dbg(&adis->spi->dev, "Enable burst32 mode, xfer:%d",
+ adis->xfer[1].len);
+
+ } else if (!st->lsb_flag && st->burst32) {
+ const u16 en = ADIS16500_BURST32(0);
+
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16500_BURST32_MASK, en);
+ if (ret)
+ return;
+
+ st->burst32 = false;
+
+ /* Remove the extra bits */
+ adis->burst_extra_len = 0;
+ adis->xfer[1].len -= 6 * sizeof(u16);
+ dev_dbg(&adis->spi->dev, "Disable burst32 mode, xfer:%d\n",
+ adis->xfer[1].len);
+ }
+}
+
+static irqreturn_t adis16475_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16475 *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ int ret, bit, i = 0;
+ __be16 *buffer;
+ u16 crc;
+ bool valid;
+ /* offset until the first element after gyro and accel */
+ const u8 offset = st->burst32 ? 13 : 7;
+ const u32 cached_spi_speed_hz = adis->spi->max_speed_hz;
+
+ adis->spi->max_speed_hz = ADIS16475_BURST_MAX_SPEED;
+
+ ret = spi_sync(adis->spi, &adis->msg);
+ if (ret)
+ return ret;
+
+ adis->spi->max_speed_hz = cached_spi_speed_hz;
+ buffer = adis->buffer;
+
+ crc = be16_to_cpu(buffer[offset + 2]);
+ valid = adis16475_validate_crc(adis->buffer, crc, st->burst32);
+ if (!valid) {
+ dev_err(&adis->spi->dev, "Invalid crc\n");
+ goto check_burst32;
+ }
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ /*
+ * When burst mode is used, system flags is the first data
+ * channel in the sequence, but the scan index is 7.
+ */
+ switch (bit) {
+ case ADIS16475_SCAN_TEMP:
+ st->data[i++] = buffer[offset];
+ break;
+ case ADIS16475_SCAN_GYRO_X ... ADIS16475_SCAN_ACCEL_Z:
+ /*
+ * The first 2 bytes on the received data are the
+ * DIAG_STAT reg, hence the +1 offset here...
+ */
+ if (st->burst32) {
+ /* upper 16 */
+ st->data[i++] = buffer[bit * 2 + 2];
+ /* lower 16 */
+ st->data[i++] = buffer[bit * 2 + 1];
+ } else {
+ st->data[i++] = buffer[bit + 1];
+ /*
+ * Don't bother in doing the manual read if the
+ * device supports burst32. burst32 will be
+ * enabled in the next call to
+ * adis16475_burst32_check()...
+ */
+ if (st->lsb_flag && !st->info->has_burst32) {
+ u16 val = 0;
+ const u32 reg = ADIS16475_REG_X_GYRO_L +
+ bit * 4;
+
+ adis_read_reg_16(adis, reg, &val);
+ st->data[i++] = cpu_to_be16(val);
+ } else {
+ /* lower not used */
+ st->data[i++] = 0;
+ }
+ }
+ break;
+ }
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data, pf->timestamp);
+check_burst32:
+ /*
+ * We only check the burst mode at the end of the current capture since
+ * it takes a full data ready cycle for the device to update the burst
+ * array.
+ */
+ adis16475_burst32_check(st);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static void adis16475_disable_clk(void *data)
+{
+ clk_disable_unprepare((struct clk *)data);
+}
+
+static int adis16475_config_sync_mode(struct adis16475 *st)
+{
+ int ret;
+ struct device *dev = &st->adis.spi->dev;
+ const struct adis16475_sync *sync;
+ u32 sync_mode;
+
+ /* default to internal clk */
+ st->clk_freq = st->info->int_clk * 1000;
+
+ ret = device_property_read_u32(dev, "adi,sync-mode", &sync_mode);
+ if (ret)
+ return 0;
+
+ if (sync_mode >= st->info->num_sync) {
+ dev_err(dev, "Invalid sync mode: %u for %s\n", sync_mode,
+ st->info->name);
+ return -EINVAL;
+ }
+
+ sync = &st->info->sync[sync_mode];
+
+ /* All the other modes require external input signal */
+ if (sync->sync_mode != ADIS16475_SYNC_OUTPUT) {
+ struct clk *clk = devm_clk_get(dev, NULL);
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adis16475_disable_clk, clk);
+ if (ret)
+ return ret;
+
+ st->clk_freq = clk_get_rate(clk);
+ if (st->clk_freq < sync->min_rate ||
+ st->clk_freq > sync->max_rate) {
+ dev_err(dev,
+ "Clk rate:%u not in a valid range:[%u %u]\n",
+ st->clk_freq, sync->min_rate, sync->max_rate);
+ return -EINVAL;
+ }
+
+ if (sync->sync_mode == ADIS16475_SYNC_SCALED) {
+ u16 up_scale;
+ u32 scaled_out_freq = 0;
+ /*
+ * If we are in scaled mode, we must have an up_scale.
+ * In scaled mode the allowable input clock range is
+ * 1 Hz to 128 Hz, and the allowable output range is
+ * 1900 to 2100 Hz. Hence, a scale must be given to
+ * get the allowable output.
+ */
+ ret = device_property_read_u32(dev,
+ "adi,scaled-output-hz",
+ &scaled_out_freq);
+ if (ret) {
+ dev_err(dev, "adi,scaled-output-hz must be given when in scaled sync mode");
+ return -EINVAL;
+ } else if (scaled_out_freq < 1900 ||
+ scaled_out_freq > 2100) {
+ dev_err(dev, "Invalid value: %u for adi,scaled-output-hz",
+ scaled_out_freq);
+ return -EINVAL;
+ }
+
+ up_scale = DIV_ROUND_CLOSEST(scaled_out_freq,
+ st->clk_freq);
+
+ ret = __adis_write_reg_16(&st->adis,
+ ADIS16475_REG_UP_SCALE,
+ up_scale);
+ if (ret)
+ return ret;
+
+ st->clk_freq = scaled_out_freq;
+ }
+
+ st->clk_freq *= 1000;
+ }
+ /*
+ * Keep in mind that the mask for the clk modes in adis1650*
+ * chips is different (1100 instead of 11100). However, we
+ * are not configuring BIT(4) in these chips and the default
+ * value is 0, so we are fine in doing the below operations.
+ * I'm keeping this for simplicity and avoiding extra variables
+ * in chip_info.
+ */
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16475_SYNC_MODE_MASK, sync->sync_mode);
+ if (ret)
+ return ret;
+
+ usleep_range(250, 260);
+
+ return 0;
+}
+
+static int adis16475_config_irq_pin(struct adis16475 *st)
+{
+ int ret;
+ struct irq_data *desc;
+ u32 irq_type;
+ u16 val = 0;
+ u8 polarity;
+ struct spi_device *spi = st->adis.spi;
+
+ desc = irq_get_irq_data(spi->irq);
+ if (!desc) {
+ dev_err(&spi->dev, "Could not find IRQ %d\n", spi->irq);
+ return -EINVAL;
+ }
+ /*
+ * It is possible to configure the data ready polarity. Furthermore, we
+ * need to update the adis struct if we want data ready as active low.
+ */
+ irq_type = irqd_get_trigger_type(desc);
+ if (irq_type == IRQ_TYPE_EDGE_RISING) {
+ polarity = 1;
+ st->adis.irq_flag = IRQF_TRIGGER_RISING;
+ } else if (irq_type == IRQ_TYPE_EDGE_FALLING) {
+ polarity = 0;
+ st->adis.irq_flag = IRQF_TRIGGER_FALLING;
+ } else {
+ dev_err(&spi->dev, "Invalid interrupt type 0x%x specified\n",
+ irq_type);
+ return -EINVAL;
+ }
+
+ val = ADIS16475_MSG_CTRL_DR_POL(polarity);
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16475_MSG_CTRL_DR_POL_MASK, val);
+ if (ret)
+ return ret;
+ /*
+ * There is a delay writing to any bits written to the MSC_CTRL
+ * register. It should not be bigger than 200us, so 250 should be more
+ * than enough!
+ */
+ usleep_range(250, 260);
+
+ return 0;
+}
+
+static const struct of_device_id adis16475_of_match[] = {
+ { .compatible = "adi,adis16470",
+ .data = &adis16475_chip_info[ADIS16470] },
+ { .compatible = "adi,adis16475-1",
+ .data = &adis16475_chip_info[ADIS16475_1] },
+ { .compatible = "adi,adis16475-2",
+ .data = &adis16475_chip_info[ADIS16475_2] },
+ { .compatible = "adi,adis16475-3",
+ .data = &adis16475_chip_info[ADIS16475_3] },
+ { .compatible = "adi,adis16477-1",
+ .data = &adis16475_chip_info[ADIS16477_1] },
+ { .compatible = "adi,adis16477-2",
+ .data = &adis16475_chip_info[ADIS16477_2] },
+ { .compatible = "adi,adis16477-3",
+ .data = &adis16475_chip_info[ADIS16477_3] },
+ { .compatible = "adi,adis16465-1",
+ .data = &adis16475_chip_info[ADIS16465_1] },
+ { .compatible = "adi,adis16465-2",
+ .data = &adis16475_chip_info[ADIS16465_2] },
+ { .compatible = "adi,adis16465-3",
+ .data = &adis16475_chip_info[ADIS16465_3] },
+ { .compatible = "adi,adis16467-1",
+ .data = &adis16475_chip_info[ADIS16467_1] },
+ { .compatible = "adi,adis16467-2",
+ .data = &adis16475_chip_info[ADIS16467_2] },
+ { .compatible = "adi,adis16467-3",
+ .data = &adis16475_chip_info[ADIS16467_3] },
+ { .compatible = "adi,adis16500",
+ .data = &adis16475_chip_info[ADIS16500] },
+ { .compatible = "adi,adis16505-1",
+ .data = &adis16475_chip_info[ADIS16505_1] },
+ { .compatible = "adi,adis16505-2",
+ .data = &adis16475_chip_info[ADIS16505_2] },
+ { .compatible = "adi,adis16505-3",
+ .data = &adis16475_chip_info[ADIS16505_3] },
+ { .compatible = "adi,adis16507-1",
+ .data = &adis16475_chip_info[ADIS16507_1] },
+ { .compatible = "adi,adis16507-2",
+ .data = &adis16475_chip_info[ADIS16507_2] },
+ { .compatible = "adi,adis16507-3",
+ .data = &adis16475_chip_info[ADIS16507_3] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adis16475_of_match);
+
+static int adis16475_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct adis16475 *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+ st->adis.burst = &adis16475_burst;
+
+ st->info = device_get_match_data(&spi->dev);
+ if (!st->info)
+ return -EINVAL;
+
+ ret = adis_init(&st->adis, indio_dev, spi, &st->info->adis_data);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = st->info->name;
+ indio_dev->channels = st->info->channels;
+ indio_dev->num_channels = st->info->num_channels;
+ indio_dev->info = &adis16475_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = __adis_initial_startup(&st->adis);
+ if (ret)
+ return ret;
+
+ ret = adis16475_config_irq_pin(st);
+ if (ret)
+ return ret;
+
+ ret = adis16475_config_sync_mode(st);
+ if (ret)
+ return ret;
+
+ ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
+ adis16475_trigger_handler);
+ if (ret)
+ return ret;
+
+ adis16475_enable_irq(&st->adis, false);
+
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ adis16475_debugfs_init(indio_dev);
+
+ return 0;
+}
+
+static struct spi_driver adis16475_driver = {
+ .driver = {
+ .name = "adis16475",
+ .of_match_table = adis16475_of_match,
+ },
+ .probe = adis16475_probe,
+};
+module_spi_driver(adis16475_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16475 IMU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index cfae0e4476e7..6a471eee110e 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -284,22 +284,18 @@ DEFINE_DEBUGFS_ATTRIBUTE(adis16480_flash_count_fops,
static int adis16480_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16480 *adis16480 = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
debugfs_create_file_unsafe("firmware_revision", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_firmware_revision_fops);
+ d, adis16480, &adis16480_firmware_revision_fops);
debugfs_create_file_unsafe("firmware_date", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_firmware_date_fops);
+ d, adis16480, &adis16480_firmware_date_fops);
debugfs_create_file_unsafe("serial_number", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_serial_number_fops);
+ d, adis16480, &adis16480_serial_number_fops);
debugfs_create_file_unsafe("product_id", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_product_id_fops);
+ d, adis16480, &adis16480_product_id_fops);
debugfs_create_file_unsafe("flash_count", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_flash_count_fops);
+ d, adis16480, &adis16480_flash_count_fops);
return 0;
}
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 04e5e2a0fd6b..5b4225ee09b9 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -23,25 +23,30 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct adis *adis = iio_device_get_drvdata(indio_dev);
- unsigned int burst_length;
+ unsigned int burst_length, burst_max_length;
u8 *tx;
/* All but the timestamp channel */
burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
- burst_length += adis->burst->extra_len;
+ burst_length += adis->burst->extra_len + adis->burst_extra_len;
+
+ if (adis->burst->burst_max_len)
+ burst_max_length = adis->burst->burst_max_len;
+ else
+ burst_max_length = burst_length;
adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
if (!adis->xfer)
return -ENOMEM;
- adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
+ adis->buffer = kzalloc(burst_max_length + sizeof(u16), GFP_KERNEL);
if (!adis->buffer) {
kfree(adis->xfer);
adis->xfer = NULL;
return -ENOMEM;
}
- tx = adis->buffer + burst_length;
+ tx = adis->buffer + burst_max_length;
tx[0] = ADIS_READ_REG(adis->burst->reg_cmd);
tx[1] = 0;
@@ -156,6 +161,14 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
return IRQ_HANDLED;
}
+static void adis_buffer_cleanup(void *arg)
+{
+ struct adis *adis = arg;
+
+ kfree(adis->buffer);
+ kfree(adis->xfer);
+}
+
/**
* adis_setup_buffer_and_trigger() - Sets up buffer and trigger for the adis device
* @adis: The adis device.
@@ -199,6 +212,43 @@ error_buffer_cleanup:
EXPORT_SYMBOL_GPL(adis_setup_buffer_and_trigger);
/**
+ * devm_adis_setup_buffer_and_trigger() - Sets up buffer and trigger for
+ * the managed adis device
+ * @adis: The adis device
+ * @indio_dev: The IIO device
+ * @trigger_handler: Optional trigger handler, may be NULL.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function perfoms exactly the same as adis_setup_buffer_and_trigger()
+ */
+int
+devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler)
+{
+ int ret;
+
+ if (!trigger_handler)
+ trigger_handler = adis_trigger_handler;
+
+ ret = devm_iio_triggered_buffer_setup(&adis->spi->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ if (adis->spi->irq) {
+ ret = devm_adis_probe_trigger(adis, indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ return devm_add_action_or_reset(&adis->spi->dev, adis_buffer_cleanup,
+ adis);
+}
+EXPORT_SYMBOL_GPL(devm_adis_setup_buffer_and_trigger);
+
+/**
* adis_cleanup_buffer_and_trigger() - Free buffer and trigger resources
* @adis: The adis device.
* @indio_dev: The IIO device.
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index 8b9cd02c0f9f..8afe71947c00 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -27,6 +27,34 @@ static const struct iio_trigger_ops adis_trigger_ops = {
.set_trigger_state = &adis_data_rdy_trigger_set_state,
};
+static void adis_trigger_setup(struct adis *adis)
+{
+ adis->trig->dev.parent = &adis->spi->dev;
+ adis->trig->ops = &adis_trigger_ops;
+ iio_trigger_set_drvdata(adis->trig, adis);
+}
+
+static int adis_validate_irq_flag(struct adis *adis)
+{
+ /*
+ * Typically this devices have data ready either on the rising edge or
+ * on the falling edge of the data ready pin. This checks enforces that
+ * one of those is set in the drivers... It defaults to
+ * IRQF_TRIGGER_RISING for backward compatibility wiht devices that
+ * don't support changing the pin polarity.
+ */
+ if (!adis->irq_flag) {
+ adis->irq_flag = IRQF_TRIGGER_RISING;
+ return 0;
+ } else if (adis->irq_flag != IRQF_TRIGGER_RISING &&
+ adis->irq_flag != IRQF_TRIGGER_FALLING) {
+ dev_err(&adis->spi->dev, "Invalid IRQ mask: %08lx\n",
+ adis->irq_flag);
+ return -EINVAL;
+ }
+
+ return 0;
+}
/**
* adis_probe_trigger() - Sets up trigger for a adis device
* @adis: The adis device
@@ -45,13 +73,15 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (adis->trig == NULL)
return -ENOMEM;
- adis->trig->dev.parent = &adis->spi->dev;
- adis->trig->ops = &adis_trigger_ops;
- iio_trigger_set_drvdata(adis->trig, adis);
+ adis_trigger_setup(adis);
+
+ ret = adis_validate_irq_flag(adis);
+ if (ret)
+ return ret;
ret = request_irq(adis->spi->irq,
&iio_trigger_generic_data_rdy_poll,
- IRQF_TRIGGER_RISING,
+ adis->irq_flag,
indio_dev->name,
adis->trig);
if (ret)
@@ -74,6 +104,40 @@ error_free_trig:
EXPORT_SYMBOL_GPL(adis_probe_trigger);
/**
+ * devm_adis_probe_trigger() - Sets up trigger for a managed adis device
+ * @adis: The adis device
+ * @indio_dev: The IIO device
+ *
+ * Returns 0 on success or a negative error code
+ */
+int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
+{
+ int ret;
+
+ adis->trig = devm_iio_trigger_alloc(&adis->spi->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!adis->trig)
+ return -ENOMEM;
+
+ adis_trigger_setup(adis);
+
+ ret = adis_validate_irq_flag(adis);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(&adis->spi->dev, adis->spi->irq,
+ &iio_trigger_generic_data_rdy_poll,
+ adis->irq_flag,
+ indio_dev->name,
+ adis->trig);
+ if (ret)
+ return ret;
+
+ return devm_iio_trigger_register(&adis->spi->dev, adis->trig);
+}
+EXPORT_SYMBOL_GPL(devm_adis_probe_trigger);
+
+/**
* adis_remove_trigger() - Remove trigger for a adis devices
* @adis: The adis device
*
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
index e36f5e82d400..26398614eddf 100644
--- a/drivers/iio/imu/bmi160/bmi160_i2c.c
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -24,8 +24,8 @@ static int bmi160_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &bmi160_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c
index c19e3df35559..61389b41c6d9 100644
--- a/drivers/iio/imu/bmi160/bmi160_spi.c
+++ b/drivers/iio/imu/bmi160/bmi160_spi.c
@@ -20,8 +20,8 @@ static int bmi160_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &bmi160_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
return bmi160_core_probe(&spi->dev, regmap, id->name, true);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index 2f8560ba4572..c27d06035c8b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -135,6 +135,7 @@ int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
st->mux_client = NULL;
if (ACPI_HANDLE(&client->dev)) {
struct i2c_board_info info;
+ struct i2c_client *mux_client;
struct acpi_device *adev;
int ret = -1;
@@ -172,9 +173,10 @@ int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
} else
return 0; /* no secondary addr, which is OK */
}
- st->mux_client = i2c_new_device(st->muxc->adapter[0], &info);
- if (!st->mux_client)
- return -ENODEV;
+ mux_client = i2c_new_client_device(st->muxc->adapter[0], &info);
+ if (IS_ERR(mux_client))
+ return PTR_ERR(mux_client);
+ st->mux_client = mux_client;
}
return 0;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 0b8d2f7a0165..4d604fe842e5 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -526,7 +526,7 @@ static int inv_mpu6050_sensor_set(struct inv_mpu6050_state *st, int reg,
__be16 d = cpu_to_be16(val);
ind = (axis - IIO_MOD_X) * 2;
- result = regmap_bulk_write(st->map, reg + ind, (u8 *)&d, 2);
+ result = regmap_bulk_write(st->map, reg + ind, &d, sizeof(d));
if (result)
return -EINVAL;
@@ -540,7 +540,7 @@ static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg,
__be16 d;
ind = (axis - IIO_MOD_X) * 2;
- result = regmap_bulk_read(st->map, reg + ind, (u8 *)&d, 2);
+ result = regmap_bulk_read(st->map, reg + ind, &d, sizeof(d));
if (result)
return -EINVAL;
*val = (short)be16_to_cpup(&d);
@@ -1248,12 +1248,31 @@ static const struct attribute_group inv_attribute_group = {
.attrs = inv_attributes
};
+static int inv_mpu6050_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ if (readval)
+ ret = regmap_read(st->map, reg, readval);
+ else
+ ret = regmap_write(st->map, reg, writeval);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
static const struct iio_info mpu_info = {
.read_raw = &inv_mpu6050_read_raw,
.write_raw = &inv_mpu6050_write_raw,
.write_raw_get_fmt = &inv_write_raw_get_fmt,
.attrs = &inv_attribute_group,
.validate_trigger = inv_mpu6050_validate_trigger,
+ .debugfs_reg_access = &inv_mpu6050_reg_access,
};
/**
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 6993d3b87bb0..28cfae1e61cf 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -122,8 +122,8 @@ static int inv_mpu_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 673b198e6368..6f968ce687e1 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -53,8 +53,8 @@ static int inv_mpu_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 41cb20cb3809..b56df409ed0f 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -111,7 +111,7 @@ struct st_lsm6dsx_odr {
u8 val;
};
-#define ST_LSM6DSX_ODR_LIST_SIZE 6
+#define ST_LSM6DSX_ODR_LIST_SIZE 8
struct st_lsm6dsx_odr_table_entry {
struct st_lsm6dsx_reg reg;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 4426524b59f2..0b776cb91928 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -27,7 +27,8 @@
* - FIFO size: 4KB
*
* - LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX:
- * - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416
+ * - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416,
+ * 833
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 3KB
@@ -791,7 +792,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -804,7 +806,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
},
.fs_table = {
@@ -994,7 +997,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -1007,7 +1011,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
},
.fs_table = {
@@ -1171,7 +1176,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -1184,7 +1190,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
},
.fs_table = {
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index 1cf98195f84d..c1f83fe0d8da 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -88,6 +88,69 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
.len = 6,
},
},
+ /* LIS3MDL */
+ {
+ .i2c_addr = { 0x1e },
+ .wai = {
+ .addr = 0x0f,
+ .val = 0x3d,
+ },
+ .id = ST_LSM6DSX_ID_MAGN,
+ .odr_table = {
+ .reg = {
+ .addr = 0x20,
+ .mask = GENMASK(4, 2),
+ },
+ .odr_avl[0] = { 1000, 0x0 },
+ .odr_avl[1] = { 2000, 0x1 },
+ .odr_avl[2] = { 3000, 0x2 },
+ .odr_avl[3] = { 5000, 0x3 },
+ .odr_avl[4] = { 10000, 0x4 },
+ .odr_avl[5] = { 20000, 0x5 },
+ .odr_avl[6] = { 40000, 0x6 },
+ .odr_avl[7] = { 80000, 0x7 },
+ .odr_len = 8,
+ },
+ .fs_table = {
+ .reg = {
+ .addr = 0x21,
+ .mask = GENMASK(6, 5),
+ },
+ .fs_avl[0] = {
+ .gain = 146,
+ .val = 0x00,
+ }, /* 4000 uG/LSB */
+ .fs_avl[1] = {
+ .gain = 292,
+ .val = 0x01,
+ }, /* 8000 uG/LSB */
+ .fs_avl[2] = {
+ .gain = 438,
+ .val = 0x02,
+ }, /* 12000 uG/LSB */
+ .fs_avl[3] = {
+ .gain = 584,
+ .val = 0x03,
+ }, /* 16000 uG/LSB */
+ .fs_len = 4,
+ },
+ .pwr_table = {
+ .reg = {
+ .addr = 0x22,
+ .mask = GENMASK(1, 0),
+ },
+ .off_val = 0x2,
+ .on_val = 0x0,
+ },
+ .bdu = {
+ .addr = 0x24,
+ .mask = BIT(6),
+ },
+ .out = {
+ .addr = 0x28,
+ .len = 6,
+ },
+ },
};
static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
@@ -519,6 +582,36 @@ st_lsm6dsx_shub_read_raw(struct iio_dev *iio_dev,
}
static int
+st_lsm6dsx_shub_set_full_scale(struct st_lsm6dsx_sensor *sensor,
+ u32 gain)
+{
+ const struct st_lsm6dsx_fs_table_entry *fs_table;
+ int i, err;
+
+ fs_table = &sensor->ext_info.settings->fs_table;
+ if (!fs_table->reg.addr)
+ return -ENOTSUPP;
+
+ for (i = 0; i < fs_table->fs_len; i++) {
+ if (fs_table->fs_avl[i].gain == gain)
+ break;
+ }
+
+ if (i == fs_table->fs_len)
+ return -EINVAL;
+
+ err = st_lsm6dsx_shub_write_with_mask(sensor, fs_table->reg.addr,
+ fs_table->reg.mask,
+ fs_table->fs_avl[i].val);
+ if (err < 0)
+ return err;
+
+ sensor->gain = gain;
+
+ return 0;
+}
+
+static int
st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@@ -554,6 +647,9 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
}
break;
}
+ case IIO_CHAN_INFO_SCALE:
+ err = st_lsm6dsx_shub_set_full_scale(sensor, val2);
+ break;
default:
err = -EINVAL;
break;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 4ada5592aa2b..9fa238c0a7d4 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -189,10 +189,12 @@ __poll_t iio_buffer_poll(struct file *filp,
*/
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
{
- if (!indio_dev->buffer)
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (!buffer)
return;
- wake_up(&indio_dev->buffer->pollq);
+ wake_up(&buffer->pollq);
}
void iio_buffer_init(struct iio_buffer *buffer)
@@ -262,10 +264,11 @@ static ssize_t iio_scan_el_show(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
/* Ensure ret is 0 or 1. */
ret = !!test_bit(to_iio_dev_attr(attr)->address,
- indio_dev->buffer->scan_mask);
+ buffer->scan_mask);
return sprintf(buf, "%d\n", ret);
}
@@ -316,8 +319,7 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
const unsigned long *mask;
unsigned long *trialmask;
- trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
- sizeof(*trialmask), GFP_KERNEL);
+ trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
@@ -382,7 +384,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
if (ret < 0)
return ret;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev->buffer)) {
+ if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}
@@ -411,7 +413,9 @@ static ssize_t iio_scan_el_ts_show(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ return sprintf(buf, "%d\n", buffer->scan_timestamp);
}
static ssize_t iio_scan_el_ts_store(struct device *dev,
@@ -421,6 +425,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
bool state;
ret = strtobool(buf, &state);
@@ -428,11 +433,11 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
return ret;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev->buffer)) {
+ if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}
- indio_dev->buffer->scan_timestamp = state;
+ buffer->scan_timestamp = state;
error_ret:
mutex_unlock(&indio_dev->mlock);
@@ -440,10 +445,10 @@ error_ret:
}
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer,
const struct iio_chan_spec *chan)
{
int ret, attrcount = 0;
- struct iio_buffer *buffer = indio_dev->buffer;
ret = __iio_add_chan_devattr("index",
chan,
@@ -519,7 +524,7 @@ static ssize_t iio_buffer_write_length(struct device *dev,
return len;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev->buffer)) {
+ if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
} else {
buffer->access->set_length(buffer, val);
@@ -540,7 +545,9 @@ static ssize_t iio_buffer_show_enable(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ return sprintf(buf, "%d\n", iio_buffer_is_active(buffer));
}
static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
@@ -687,6 +694,13 @@ static int iio_verify_update(struct iio_dev *indio_dev,
bool scan_timestamp;
unsigned int modes;
+ if (insert_buffer &&
+ bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
+ dev_dbg(&indio_dev->dev,
+ "At least one scan element must be enabled first\n");
+ return -EINVAL;
+ }
+
memset(config, 0, sizeof(*config));
config->watermark = ~0;
@@ -913,6 +927,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
indio_dev->active_scan_mask = config->scan_mask;
indio_dev->scan_timestamp = config->scan_timestamp;
indio_dev->scan_bytes = config->scan_bytes;
+ indio_dev->currentmode = config->mode;
iio_update_demux(indio_dev);
@@ -948,8 +963,6 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
goto err_disable_buffers;
}
- indio_dev->currentmode = config->mode;
-
if (indio_dev->setup_ops->postenable) {
ret = indio_dev->setup_ops->postenable(indio_dev);
if (ret) {
@@ -966,10 +979,10 @@ err_disable_buffers:
buffer_list)
iio_buffer_disable(buffer, indio_dev);
err_run_postdisable:
- indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
err_undo_config:
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
indio_dev->active_scan_mask = NULL;
return ret;
@@ -1004,8 +1017,6 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
ret = ret2;
}
- indio_dev->currentmode = INDIO_DIRECT_MODE;
-
if (indio_dev->setup_ops->postdisable) {
ret2 = indio_dev->setup_ops->postdisable(indio_dev);
if (ret2 && !ret)
@@ -1014,6 +1025,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
indio_dev->active_scan_mask = NULL;
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
return ret;
}
@@ -1123,6 +1135,7 @@ static ssize_t iio_buffer_store_enable(struct device *dev,
int ret;
bool requested_state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
bool inlist;
ret = strtobool(buf, &requested_state);
@@ -1132,17 +1145,15 @@ static ssize_t iio_buffer_store_enable(struct device *dev,
mutex_lock(&indio_dev->mlock);
/* Find out if it is in the list */
- inlist = iio_buffer_is_active(indio_dev->buffer);
+ inlist = iio_buffer_is_active(buffer);
/* Already in desired state */
if (inlist == requested_state)
goto done;
if (requested_state)
- ret = __iio_update_buffers(indio_dev,
- indio_dev->buffer, NULL);
+ ret = __iio_update_buffers(indio_dev, buffer, NULL);
else
- ret = __iio_update_buffers(indio_dev,
- NULL, indio_dev->buffer);
+ ret = __iio_update_buffers(indio_dev, NULL, buffer);
done:
mutex_unlock(&indio_dev->mlock);
@@ -1184,7 +1195,7 @@ static ssize_t iio_buffer_store_watermark(struct device *dev,
goto out;
}
- if (iio_buffer_is_active(indio_dev->buffer)) {
+ if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto out;
}
@@ -1201,11 +1212,9 @@ static ssize_t iio_dma_show_data_available(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- size_t bytes;
-
- bytes = iio_buffer_data_available(indio_dev->buffer);
+ struct iio_buffer *buffer = indio_dev->buffer;
- return sprintf(buf, "%zu\n", bytes);
+ return sprintf(buf, "%zu\n", iio_buffer_data_available(buffer));
}
static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
@@ -1233,7 +1242,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
struct iio_dev_attr *p;
struct attribute **attr;
struct iio_buffer *buffer = indio_dev->buffer;
- int ret, i, attrn, attrcount, attrcount_orig = 0;
+ int ret, i, attrn, attrcount;
const struct iio_chan_spec *channels;
channels = indio_dev->channels;
@@ -1277,12 +1286,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
- if (buffer->scan_el_attrs != NULL) {
- attr = buffer->scan_el_attrs->attrs;
- while (*attr++ != NULL)
- attrcount_orig++;
- }
- attrcount = attrcount_orig;
+ attrcount = 0;
INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
channels = indio_dev->channels;
if (channels) {
@@ -1291,7 +1295,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
if (channels[i].scan_index < 0)
continue;
- ret = iio_buffer_add_channel_sysfs(indio_dev,
+ ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
&channels[i]);
if (ret < 0)
goto error_cleanup_dynamic;
@@ -1319,10 +1323,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_free_scan_mask;
}
- if (buffer->scan_el_attrs)
- memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
- sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
- attrn = attrcount_orig;
+ attrn = 0;
list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
@@ -1334,20 +1335,22 @@ error_free_scan_mask:
bitmap_free(buffer->scan_mask);
error_cleanup_dynamic:
iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
- kfree(indio_dev->buffer->buffer_group.attrs);
+ kfree(buffer->buffer_group.attrs);
return ret;
}
void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
{
- if (!indio_dev->buffer)
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (!buffer)
return;
- bitmap_free(indio_dev->buffer->scan_mask);
- kfree(indio_dev->buffer->buffer_group.attrs);
- kfree(indio_dev->buffer->scan_el_group.attrs);
- iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
+ bitmap_free(buffer->scan_mask);
+ kfree(buffer->buffer_group.attrs);
+ kfree(buffer->scan_el_group.attrs);
+ iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
}
/**
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 24f7bbff4938..1527f01a44f1 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -572,46 +572,46 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
switch (type) {
case IIO_VAL_INT:
- return snprintf(buf, len, "%d", vals[0]);
+ return scnprintf(buf, len, "%d", vals[0]);
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
/* fall through */
case IIO_VAL_INT_PLUS_MICRO:
if (vals[1] < 0)
- return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
+ return scnprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
-vals[1], scale_db ? " dB" : "");
else
- return snprintf(buf, len, "%d.%06u%s", vals[0], vals[1],
+ return scnprintf(buf, len, "%d.%06u%s", vals[0], vals[1],
scale_db ? " dB" : "");
case IIO_VAL_INT_PLUS_NANO:
if (vals[1] < 0)
- return snprintf(buf, len, "-%d.%09u", abs(vals[0]),
+ return scnprintf(buf, len, "-%d.%09u", abs(vals[0]),
-vals[1]);
else
- return snprintf(buf, len, "%d.%09u", vals[0], vals[1]);
+ return scnprintf(buf, len, "%d.%09u", vals[0], vals[1]);
case IIO_VAL_FRACTIONAL:
tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
tmp1 = vals[1];
tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
- return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
+ return scnprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_FRACTIONAL_LOG2:
tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1);
- return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
+ return scnprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_INT_MULTIPLE:
{
int i;
int l = 0;
for (i = 0; i < size; ++i) {
- l += snprintf(&buf[l], len - l, "%d ", vals[i]);
+ l += scnprintf(&buf[l], len - l, "%d ", vals[i]);
if (l >= len)
break;
}
return l;
}
case IIO_VAL_CHAR:
- return snprintf(buf, len, "%c", (char)vals[0]);
+ return scnprintf(buf, len, "%c", (char)vals[0]);
default:
return 0;
}
@@ -682,10 +682,10 @@ static ssize_t iio_format_avail_list(char *buf, const int *vals,
if (len >= PAGE_SIZE)
return -EFBIG;
if (i < length - 1)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
" ");
else
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n");
if (len >= PAGE_SIZE)
return -EFBIG;
@@ -698,10 +698,10 @@ static ssize_t iio_format_avail_list(char *buf, const int *vals,
if (len >= PAGE_SIZE)
return -EFBIG;
if (i < length / 2 - 1)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
" ");
else
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n");
if (len >= PAGE_SIZE)
return -EFBIG;
@@ -725,10 +725,10 @@ static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
if (len >= PAGE_SIZE)
return -EFBIG;
if (i < 2)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
" ");
else
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"]\n");
if (len >= PAGE_SIZE)
return -EFBIG;
@@ -741,10 +741,10 @@ static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
if (len >= PAGE_SIZE)
return -EFBIG;
if (i < 2)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
" ");
else
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"]\n");
if (len >= PAGE_SIZE)
return -EFBIG;
@@ -1507,27 +1507,27 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
alloc_size += IIO_ALIGN - 1;
dev = kzalloc(alloc_size, GFP_KERNEL);
+ if (!dev)
+ return NULL;
- if (dev) {
- dev->dev.groups = dev->groups;
- dev->dev.type = &iio_device_type;
- dev->dev.bus = &iio_bus_type;
- device_initialize(&dev->dev);
- dev_set_drvdata(&dev->dev, (void *)dev);
- mutex_init(&dev->mlock);
- mutex_init(&dev->info_exist_lock);
- INIT_LIST_HEAD(&dev->channel_attr_list);
-
- dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
- if (dev->id < 0) {
- /* cannot use a dev_err as the name isn't available */
- pr_err("failed to get device id\n");
- kfree(dev);
- return NULL;
- }
- dev_set_name(&dev->dev, "iio:device%d", dev->id);
- INIT_LIST_HEAD(&dev->buffer_list);
+ dev->dev.groups = dev->groups;
+ dev->dev.type = &iio_device_type;
+ dev->dev.bus = &iio_bus_type;
+ device_initialize(&dev->dev);
+ dev_set_drvdata(&dev->dev, (void *)dev);
+ mutex_init(&dev->mlock);
+ mutex_init(&dev->info_exist_lock);
+ INIT_LIST_HEAD(&dev->channel_attr_list);
+
+ dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
+ if (dev->id < 0) {
+ /* cannot use a dev_err as the name isn't available */
+ pr_err("failed to get device id\n");
+ kfree(dev);
+ return NULL;
}
+ dev_set_name(&dev->dev, "iio:device%d", dev->id);
+ INIT_LIST_HEAD(&dev->buffer_list);
return dev;
}
@@ -1549,17 +1549,6 @@ static void devm_iio_device_release(struct device *dev, void *res)
iio_device_free(*(struct iio_dev **)res);
}
-int devm_iio_device_match(struct device *dev, void *res, void *data)
-{
- struct iio_dev **r = res;
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
- return *r == data;
-}
-EXPORT_SYMBOL_GPL(devm_iio_device_match);
-
/**
* devm_iio_device_alloc - Resource-managed iio_device_alloc()
* @dev: Device to allocate iio_dev for
@@ -1568,9 +1557,6 @@ EXPORT_SYMBOL_GPL(devm_iio_device_match);
* Managed iio_device_alloc. iio_dev allocated with this function is
* automatically freed on driver detach.
*
- * If an iio_dev allocated with this function needs to be freed separately,
- * devm_iio_device_free() must be used.
- *
* RETURNS:
* Pointer to allocated iio_dev on success, NULL on failure.
*/
@@ -1596,23 +1582,6 @@ struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv)
EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
/**
- * devm_iio_device_free - Resource-managed iio_device_free()
- * @dev: Device this iio_dev belongs to
- * @iio_dev: the iio_dev associated with the device
- *
- * Free iio_dev allocated with devm_iio_device_alloc().
- */
-void devm_iio_device_free(struct device *dev, struct iio_dev *iio_dev)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_device_release,
- devm_iio_device_match, iio_dev);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_device_free);
-
-/**
* iio_chrdev_open() - chrdev file open for buffer access and ioctls
* @inode: Inode structure for identifying the device in the file system
* @filp: File structure for iio device used to keep and later access
@@ -1714,6 +1683,9 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
int ret;
+ if (!indio_dev->info)
+ return -EINVAL;
+
indio_dev->driver_module = this_mod;
/* If the calling driver did not initialize of_node, do it here */
if (!indio_dev->dev.of_node && indio_dev->dev.parent)
@@ -1726,9 +1698,6 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
if (ret < 0)
return ret;
- if (!indio_dev->info)
- return -EINVAL;
-
/* configure elements for the chrdev */
indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
@@ -1834,23 +1803,6 @@ int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
EXPORT_SYMBOL_GPL(__devm_iio_device_register);
/**
- * devm_iio_device_unregister - Resource-managed iio_device_unregister()
- * @dev: Device this iio_dev belongs to
- * @indio_dev: the iio_dev associated with the device
- *
- * Unregister iio_dev registered with devm_iio_device_register().
- */
-void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_device_unreg,
- devm_iio_device_match, indio_dev);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_device_unregister);
-
-/**
* iio_device_claim_direct_mode - Keep device in direct mode
* @indio_dev: the iio_dev associated with the device
*
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 3908a9a90035..53d1931f6be8 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -585,18 +585,6 @@ static void devm_iio_trigger_release(struct device *dev, void *res)
iio_trigger_free(*(struct iio_trigger **)res);
}
-static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
-{
- struct iio_trigger **r = res;
-
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
-
- return *r == data;
-}
-
/**
* devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
* @dev: Device to allocate iio_trigger for
@@ -608,9 +596,6 @@ static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
* Managed iio_trigger_alloc. iio_trigger allocated with this function is
* automatically freed on driver detach.
*
- * If an iio_trigger allocated with this function needs to be freed separately,
- * devm_iio_trigger_free() must be used.
- *
* RETURNS:
* Pointer to allocated iio_trigger on success, NULL on failure.
*/
@@ -640,23 +625,6 @@ struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
-/**
- * devm_iio_trigger_free - Resource-managed iio_trigger_free()
- * @dev: Device this iio_dev belongs to
- * @iio_trig: the iio_trigger associated with the device
- *
- * Free iio_trigger allocated with devm_iio_trigger_alloc().
- */
-void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_trigger_release,
- devm_iio_trigger_match, iio_trig);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
-
static void devm_iio_trigger_unreg(struct device *dev, void *res)
{
iio_trigger_unregister(*(struct iio_trigger **)res);
@@ -673,9 +641,6 @@ static void devm_iio_trigger_unreg(struct device *dev, void *res)
* calls iio_trigger_register() internally. Refer to that function for more
* information.
*
- * If an iio_trigger registered with this function needs to be unregistered
- * separately, devm_iio_trigger_unregister() must be used.
- *
* RETURNS:
* 0 on success, negative error number on failure.
*/
@@ -701,24 +666,6 @@ int __devm_iio_trigger_register(struct device *dev,
}
EXPORT_SYMBOL_GPL(__devm_iio_trigger_register);
-/**
- * devm_iio_trigger_unregister - Resource-managed iio_trigger_unregister()
- * @dev: device this iio_trigger belongs to
- * @trig_info: the trigger associated with the device
- *
- * Unregister trigger registered with devm_iio_trigger_register().
- */
-void devm_iio_trigger_unregister(struct device *dev,
- struct iio_trigger *trig_info)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_trigger_unreg, devm_iio_trigger_match,
- trig_info);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_trigger_unregister);
-
bool iio_trigger_using_own(struct iio_dev *indio_dev)
{
return indio_dev->trig->attached_own_device;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 5a8351c9a426..ede99e0d5371 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -360,18 +360,6 @@ static void devm_iio_channel_free(struct device *dev, void *res)
iio_channel_release(channel);
}
-static int devm_iio_channel_match(struct device *dev, void *res, void *data)
-{
- struct iio_channel **r = res;
-
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
-
- return *r == data;
-}
-
struct iio_channel *devm_iio_channel_get(struct device *dev,
const char *channel_name)
{
@@ -394,13 +382,6 @@ struct iio_channel *devm_iio_channel_get(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_channel_get);
-void devm_iio_channel_release(struct device *dev, struct iio_channel *channel)
-{
- WARN_ON(devres_release(dev, devm_iio_channel_free,
- devm_iio_channel_match, channel));
-}
-EXPORT_SYMBOL_GPL(devm_iio_channel_release);
-
struct iio_channel *iio_channel_get_all(struct device *dev)
{
const char *name;
@@ -514,14 +495,6 @@ struct iio_channel *devm_iio_channel_get_all(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
-void devm_iio_channel_release_all(struct device *dev,
- struct iio_channel *channels)
-{
- WARN_ON(devres_release(dev, devm_iio_channel_free_all,
- devm_iio_channel_match, channels));
-}
-EXPORT_SYMBOL_GPL(devm_iio_channel_release_all);
-
static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
enum iio_chan_info_enum info)
{
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index b27719cefcf9..182bd18c4bb2 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -516,6 +516,8 @@ config US5182D
config VCNL4000
tristate "VCNL4000/4010/4020/4200 combined ALS and proximity sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
depends on I2C
help
Say Y here if you want to build a driver for the Vishay VCNL4000,
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index a8361006dcd9..03f2d8d123c4 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -273,13 +273,11 @@ static const struct i2c_device_id bh1780_id[] = {
MODULE_DEVICE_TABLE(i2c, bh1780_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_bh1780_match[] = {
{ .compatible = "rohm,bh1780gli", },
{},
};
MODULE_DEVICE_TABLE(of, of_bh1780_match);
-#endif
static struct i2c_driver bh1780_driver = {
.probe = bh1780_probe,
@@ -288,7 +286,7 @@ static struct i2c_driver bh1780_driver = {
.driver = {
.name = "bh1780",
.pm = &bh1780_dev_pm_ops,
- .of_match_table = of_match_ptr(of_bh1780_match),
+ .of_match_table = of_bh1780_match,
},
};
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index 5f4fb5674fa0..160eb3f99795 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -4,11 +4,13 @@
* Author: Kevin Tsai <ktsai@capellamicro.com>
*/
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
@@ -18,17 +20,24 @@
/* Registers Address */
#define CM32181_REG_ADDR_CMD 0x00
+#define CM32181_REG_ADDR_WH 0x01
+#define CM32181_REG_ADDR_WL 0x02
+#define CM32181_REG_ADDR_TEST 0x03
#define CM32181_REG_ADDR_ALS 0x04
#define CM32181_REG_ADDR_STATUS 0x06
#define CM32181_REG_ADDR_ID 0x07
/* Number of Configurable Registers */
-#define CM32181_CONF_REG_NUM 0x01
+#define CM32181_CONF_REG_NUM 4
/* CMD register */
-#define CM32181_CMD_ALS_ENABLE 0x00
-#define CM32181_CMD_ALS_DISABLE 0x01
-#define CM32181_CMD_ALS_INT_EN 0x02
+#define CM32181_CMD_ALS_DISABLE BIT(0)
+#define CM32181_CMD_ALS_INT_EN BIT(1)
+#define CM32181_CMD_ALS_THRES_WINDOW BIT(2)
+
+#define CM32181_CMD_ALS_PERS_SHIFT 4
+#define CM32181_CMD_ALS_PERS_MASK (0x03 << CM32181_CMD_ALS_PERS_SHIFT)
+#define CM32181_CMD_ALS_PERS_DEFAULT (0x01 << CM32181_CMD_ALS_PERS_SHIFT)
#define CM32181_CMD_ALS_IT_SHIFT 6
#define CM32181_CMD_ALS_IT_MASK (0x0F << CM32181_CMD_ALS_IT_SHIFT)
@@ -38,27 +47,133 @@
#define CM32181_CMD_ALS_SM_MASK (0x03 << CM32181_CMD_ALS_SM_SHIFT)
#define CM32181_CMD_ALS_SM_DEFAULT (0x01 << CM32181_CMD_ALS_SM_SHIFT)
-#define CM32181_MLUX_PER_BIT 5 /* ALS_SM=01 IT=800ms */
-#define CM32181_MLUX_PER_BIT_BASE_IT 800000 /* Based on IT=800ms */
-#define CM32181_CALIBSCALE_DEFAULT 1000
-#define CM32181_CALIBSCALE_RESOLUTION 1000
-#define MLUX_PER_LUX 1000
+#define CM32181_LUX_PER_BIT 500 /* ALS_SM=01 IT=800ms */
+#define CM32181_LUX_PER_BIT_RESOLUTION 100000
+#define CM32181_LUX_PER_BIT_BASE_IT 800000 /* Based on IT=800ms */
+#define CM32181_CALIBSCALE_DEFAULT 100000
+#define CM32181_CALIBSCALE_RESOLUTION 100000
-static const u8 cm32181_reg[CM32181_CONF_REG_NUM] = {
- CM32181_REG_ADDR_CMD,
-};
+#define SMBUS_ALERT_RESPONSE_ADDRESS 0x0c
+
+/* CPM0 Index 0: device-id (3218 or 32181), 1: Unknown, 2: init_regs_bitmap */
+#define CPM0_REGS_BITMAP 2
+#define CPM0_HEADER_SIZE 3
-static const int als_it_bits[] = {12, 8, 0, 1, 2, 3};
-static const int als_it_value[] = {25000, 50000, 100000, 200000, 400000,
- 800000};
+/* CPM1 Index 0: lux_per_bit, 1: calibscale, 2: resolution (100000) */
+#define CPM1_LUX_PER_BIT 0
+#define CPM1_CALIBSCALE 1
+#define CPM1_SIZE 3
+
+/* CM3218 Family */
+static const int cm3218_als_it_bits[] = { 0, 1, 2, 3 };
+static const int cm3218_als_it_values[] = { 100000, 200000, 400000, 800000 };
+
+/* CM32181 Family */
+static const int cm32181_als_it_bits[] = { 12, 8, 0, 1, 2, 3 };
+static const int cm32181_als_it_values[] = {
+ 25000, 50000, 100000, 200000, 400000, 800000
+};
struct cm32181_chip {
struct i2c_client *client;
+ struct device *dev;
struct mutex lock;
u16 conf_regs[CM32181_CONF_REG_NUM];
+ unsigned long init_regs_bitmap;
int calibscale;
+ int lux_per_bit;
+ int lux_per_bit_base_it;
+ int num_als_it;
+ const int *als_it_bits;
+ const int *als_it_values;
};
+static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2);
+
+#ifdef CONFIG_ACPI
+/**
+ * cm32181_acpi_get_cpm() - Get CPM object from ACPI
+ * @client pointer of struct i2c_client.
+ * @obj_name pointer of ACPI object name.
+ * @count maximum size of return array.
+ * @vals pointer of array for return elements.
+ *
+ * Convert ACPI CPM table to array.
+ *
+ * Return: -ENODEV for fail. Otherwise is number of elements.
+ */
+static int cm32181_acpi_get_cpm(struct device *dev, char *obj_name,
+ u64 *values, int count)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *cpm, *elem;
+ acpi_handle handle;
+ acpi_status status;
+ int i;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -ENODEV;
+
+ status = acpi_evaluate_object(handle, obj_name, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "object %s not found\n", obj_name);
+ return -ENODEV;
+ }
+
+ cpm = buffer.pointer;
+ if (cpm->package.count > count)
+ dev_warn(dev, "%s table contains %u values, only using first %d values\n",
+ obj_name, cpm->package.count, count);
+
+ count = min_t(int, cpm->package.count, count);
+ for (i = 0; i < count; i++) {
+ elem = &(cpm->package.elements[i]);
+ values[i] = elem->integer.value;
+ }
+
+ kfree(buffer.pointer);
+
+ return count;
+}
+
+static void cm32181_acpi_parse_cpm_tables(struct cm32181_chip *cm32181)
+{
+ u64 vals[CPM0_HEADER_SIZE + CM32181_CONF_REG_NUM];
+ struct device *dev = cm32181->dev;
+ int i, count;
+
+ count = cm32181_acpi_get_cpm(dev, "CPM0", vals, ARRAY_SIZE(vals));
+ if (count <= CPM0_HEADER_SIZE)
+ return;
+
+ count -= CPM0_HEADER_SIZE;
+
+ cm32181->init_regs_bitmap = vals[CPM0_REGS_BITMAP];
+ cm32181->init_regs_bitmap &= GENMASK(count - 1, 0);
+ for_each_set_bit(i, &cm32181->init_regs_bitmap, count)
+ cm32181->conf_regs[i] = vals[CPM0_HEADER_SIZE + i];
+
+ count = cm32181_acpi_get_cpm(dev, "CPM1", vals, ARRAY_SIZE(vals));
+ if (count != CPM1_SIZE)
+ return;
+
+ cm32181->lux_per_bit = vals[CPM1_LUX_PER_BIT];
+
+ /* Check for uncalibrated devices */
+ if (vals[CPM1_CALIBSCALE] == CM32181_CALIBSCALE_DEFAULT)
+ return;
+
+ cm32181->calibscale = vals[CPM1_CALIBSCALE];
+ /* CPM1 lux_per_bit is for the current it value */
+ cm32181_read_als_it(cm32181, &cm32181->lux_per_bit_base_it);
+}
+#else
+static void cm32181_acpi_parse_cpm_tables(struct cm32181_chip *cm32181)
+{
+}
+#endif /* CONFIG_ACPI */
+
/**
* cm32181_reg_init() - Initialize CM32181 registers
* @cm32181: pointer of struct cm32181.
@@ -78,18 +193,37 @@ static int cm32181_reg_init(struct cm32181_chip *cm32181)
return ret;
/* check device ID */
- if ((ret & 0xFF) != 0x81)
+ switch (ret & 0xFF) {
+ case 0x18: /* CM3218 */
+ cm32181->num_als_it = ARRAY_SIZE(cm3218_als_it_bits);
+ cm32181->als_it_bits = cm3218_als_it_bits;
+ cm32181->als_it_values = cm3218_als_it_values;
+ break;
+ case 0x81: /* CM32181 */
+ case 0x82: /* CM32182, fully compat. with CM32181 */
+ cm32181->num_als_it = ARRAY_SIZE(cm32181_als_it_bits);
+ cm32181->als_it_bits = cm32181_als_it_bits;
+ cm32181->als_it_values = cm32181_als_it_values;
+ break;
+ default:
return -ENODEV;
+ }
/* Default Values */
- cm32181->conf_regs[CM32181_REG_ADDR_CMD] = CM32181_CMD_ALS_ENABLE |
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD] =
CM32181_CMD_ALS_IT_DEFAULT | CM32181_CMD_ALS_SM_DEFAULT;
+ cm32181->init_regs_bitmap = BIT(CM32181_REG_ADDR_CMD);
cm32181->calibscale = CM32181_CALIBSCALE_DEFAULT;
+ cm32181->lux_per_bit = CM32181_LUX_PER_BIT;
+ cm32181->lux_per_bit_base_it = CM32181_LUX_PER_BIT_BASE_IT;
+
+ if (ACPI_HANDLE(cm32181->dev))
+ cm32181_acpi_parse_cpm_tables(cm32181);
/* Initialize registers*/
- for (i = 0; i < CM32181_CONF_REG_NUM; i++) {
- ret = i2c_smbus_write_word_data(client, cm32181_reg[i],
- cm32181->conf_regs[i]);
+ for_each_set_bit(i, &cm32181->init_regs_bitmap, CM32181_CONF_REG_NUM) {
+ ret = i2c_smbus_write_word_data(client, i,
+ cm32181->conf_regs[i]);
if (ret < 0)
return ret;
}
@@ -102,7 +236,7 @@ static int cm32181_reg_init(struct cm32181_chip *cm32181)
* @cm32181: pointer of struct cm32181
* @val2: pointer of int to load the als_it value.
*
- * Report the current integartion time by millisecond.
+ * Report the current integration time in milliseconds.
*
* Return: IIO_VAL_INT_PLUS_MICRO for success, otherwise -EINVAL.
*/
@@ -114,9 +248,9 @@ static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2)
als_it = cm32181->conf_regs[CM32181_REG_ADDR_CMD];
als_it &= CM32181_CMD_ALS_IT_MASK;
als_it >>= CM32181_CMD_ALS_IT_SHIFT;
- for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) {
- if (als_it == als_it_bits[i]) {
- *val2 = als_it_value[i];
+ for (i = 0; i < cm32181->num_als_it; i++) {
+ if (als_it == cm32181->als_it_bits[i]) {
+ *val2 = cm32181->als_it_values[i];
return IIO_VAL_INT_PLUS_MICRO;
}
}
@@ -139,14 +273,14 @@ static int cm32181_write_als_it(struct cm32181_chip *cm32181, int val)
u16 als_it;
int ret, i, n;
- n = ARRAY_SIZE(als_it_value);
+ n = cm32181->num_als_it;
for (i = 0; i < n; i++)
- if (val <= als_it_value[i])
+ if (val <= cm32181->als_it_values[i])
break;
if (i >= n)
i = n - 1;
- als_it = als_it_bits[i];
+ als_it = cm32181->als_it_bits[i];
als_it <<= CM32181_CMD_ALS_IT_SHIFT;
mutex_lock(&cm32181->lock);
@@ -175,15 +309,15 @@ static int cm32181_get_lux(struct cm32181_chip *cm32181)
struct i2c_client *client = cm32181->client;
int ret;
int als_it;
- unsigned long lux;
+ u64 lux;
ret = cm32181_read_als_it(cm32181, &als_it);
if (ret < 0)
return -EINVAL;
- lux = CM32181_MLUX_PER_BIT;
- lux *= CM32181_MLUX_PER_BIT_BASE_IT;
- lux /= als_it;
+ lux = cm32181->lux_per_bit;
+ lux *= cm32181->lux_per_bit_base_it;
+ lux = div_u64(lux, als_it);
ret = i2c_smbus_read_word_data(client, CM32181_REG_ADDR_ALS);
if (ret < 0)
@@ -191,8 +325,8 @@ static int cm32181_get_lux(struct cm32181_chip *cm32181)
lux *= ret;
lux *= cm32181->calibscale;
- lux /= CM32181_CALIBSCALE_RESOLUTION;
- lux /= MLUX_PER_LUX;
+ lux = div_u64(lux, CM32181_CALIBSCALE_RESOLUTION);
+ lux = div_u64(lux, CM32181_LUX_PER_BIT_RESOLUTION);
if (lux > 0xFFFF)
lux = 0xFFFF;
@@ -258,11 +392,12 @@ static int cm32181_write_raw(struct iio_dev *indio_dev,
static ssize_t cm32181_get_it_available(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct cm32181_chip *cm32181 = iio_priv(dev_to_iio_dev(dev));
int i, n, len;
- n = ARRAY_SIZE(als_it_value);
+ n = cm32181->num_als_it;
for (i = 0, len = 0; i < n; i++)
- len += sprintf(buf + len, "0.%06u ", als_it_value[i]);
+ len += sprintf(buf + len, "0.%06u ", cm32181->als_it_values[i]);
return len + sprintf(buf + len, "\n");
}
@@ -294,70 +429,86 @@ static const struct iio_info cm32181_info = {
.attrs = &cm32181_attribute_group,
};
-static int cm32181_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int cm32181_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct cm32181_chip *cm32181;
struct iio_dev *indio_dev;
int ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*cm32181));
- if (!indio_dev) {
- dev_err(&client->dev, "devm_iio_device_alloc failed\n");
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*cm32181));
+ if (!indio_dev)
return -ENOMEM;
+
+ /*
+ * Some ACPI systems list 2 I2C resources for the CM3218 sensor, the
+ * SMBus Alert Response Address (ARA, 0x0c) and the actual I2C address.
+ * Detect this and take the following step to deal with it:
+ * 1. When a SMBus Alert capable sensor has an Alert asserted, it will
+ * not respond on its actual I2C address. Read a byte from the ARA
+ * to clear any pending Alerts.
+ * 2. Create a "dummy" client for the actual I2C address and
+ * use that client to communicate with the sensor.
+ */
+ if (ACPI_HANDLE(dev) && client->addr == SMBUS_ALERT_RESPONSE_ADDRESS) {
+ struct i2c_board_info board_info = { .type = "dummy" };
+
+ i2c_smbus_read_byte(client);
+
+ client = i2c_acpi_new_device(dev, 1, &board_info);
+ if (IS_ERR(client))
+ return PTR_ERR(client);
}
cm32181 = iio_priv(indio_dev);
- i2c_set_clientdata(client, indio_dev);
cm32181->client = client;
+ cm32181->dev = dev;
mutex_init(&cm32181->lock);
- indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.parent = dev;
indio_dev->channels = cm32181_channels;
indio_dev->num_channels = ARRAY_SIZE(cm32181_channels);
indio_dev->info = &cm32181_info;
- indio_dev->name = id->name;
+ indio_dev->name = dev_name(dev);
indio_dev->modes = INDIO_DIRECT_MODE;
ret = cm32181_reg_init(cm32181);
if (ret) {
- dev_err(&client->dev,
- "%s: register init failed\n",
- __func__);
+ dev_err(dev, "%s: register init failed\n", __func__);
return ret;
}
- ret = devm_iio_device_register(&client->dev, indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret) {
- dev_err(&client->dev,
- "%s: regist device failed\n",
- __func__);
+ dev_err(dev, "%s: regist device failed\n", __func__);
return ret;
}
return 0;
}
-static const struct i2c_device_id cm32181_id[] = {
- { "cm32181", 0 },
- { }
-};
-
-MODULE_DEVICE_TABLE(i2c, cm32181_id);
-
static const struct of_device_id cm32181_of_match[] = {
+ { .compatible = "capella,cm3218" },
{ .compatible = "capella,cm32181" },
{ }
};
MODULE_DEVICE_TABLE(of, cm32181_of_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cm32181_acpi_match[] = {
+ { "CPLM3218", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cm32181_acpi_match);
+#endif
+
static struct i2c_driver cm32181_driver = {
.driver = {
.name = "cm32181",
- .of_match_table = of_match_ptr(cm32181_of_match),
+ .acpi_match_table = ACPI_PTR(cm32181_acpi_match),
+ .of_match_table = cm32181_of_match,
},
- .id_table = cm32181_id,
- .probe = cm32181_probe,
+ .probe_new = cm32181_probe,
};
module_i2c_driver(cm32181_driver);
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index cd3cfb7d02bd..867200825686 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -10,6 +10,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/init.h>
@@ -418,7 +419,7 @@ MODULE_DEVICE_TABLE(of, cm3232_of_match);
static struct i2c_driver cm3232_driver = {
.driver = {
.name = "cm3232",
- .of_match_table = of_match_ptr(cm3232_of_match),
+ .of_match_table = cm3232_of_match,
#ifdef CONFIG_PM_SLEEP
.pm = &cm3232_pm_ops,
#endif
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index b7ef16b28280..7a2679bdc987 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -158,6 +158,9 @@ static irqreturn_t gp2ap002_prox_irq(int irq, void *d)
int val;
int ret;
+ if (!gp2ap002->enabled)
+ goto err_retrig;
+
ret = regmap_read(gp2ap002->map, GP2AP002_PROX, &val);
if (ret) {
dev_err(gp2ap002->dev, "error reading proximity\n");
@@ -247,6 +250,8 @@ static int gp2ap002_read_raw(struct iio_dev *indio_dev,
struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
int ret;
+ pm_runtime_get_sync(gp2ap002->dev);
+
switch (mask) {
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
@@ -255,13 +260,21 @@ static int gp2ap002_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
*val = ret;
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ goto out;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+
+out:
+ pm_runtime_mark_last_busy(gp2ap002->dev);
+ pm_runtime_put_autosuspend(gp2ap002->dev);
+
+ return ret;
}
static int gp2ap002_init(struct gp2ap002 *gp2ap002)
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 7fbbce0d4bc7..070d4cd0cf54 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -38,8 +38,8 @@
#include <linux/irq.h>
#include <linux/irq_work.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -1617,18 +1617,16 @@ static const struct i2c_device_id gp2ap020a00f_id[] = {
MODULE_DEVICE_TABLE(i2c, gp2ap020a00f_id);
-#ifdef CONFIG_OF
static const struct of_device_id gp2ap020a00f_of_match[] = {
{ .compatible = "sharp,gp2ap020a00f" },
{ }
};
MODULE_DEVICE_TABLE(of, gp2ap020a00f_of_match);
-#endif
static struct i2c_driver gp2ap020a00f_driver = {
.driver = {
.name = GP2A_I2C_NAME,
- .of_match_table = of_match_ptr(gp2ap020a00f_of_match),
+ .of_match_table = gp2ap020a00f_of_match,
},
.probe = gp2ap020a00f_probe,
.remove = gp2ap020a00f_remove,
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index b6cd299517d1..81fa2a422797 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum {
@@ -308,18 +306,13 @@ static int hid_als_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&als_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&als_state->common_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -343,9 +336,7 @@ static int hid_als_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&als_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &als_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -360,8 +351,7 @@ static int hid_als_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&als_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &als_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 7e1030af9ba3..e9c04df07344 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
#define CHANNEL_SCAN_INDEX_PRESENCE 0
@@ -286,18 +284,13 @@ static int hid_prox_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&prox_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&prox_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -321,9 +314,7 @@ static int hid_prox_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&prox_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &prox_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -338,8 +329,7 @@ static int hid_prox_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PROX);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&prox_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &prox_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index e37894f0ae0b..95611f5eff01 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -213,13 +213,24 @@ static const struct iio_info isl29125_info = {
.attrs = &isl29125_attribute_group,
};
-static int isl29125_buffer_preenable(struct iio_dev *indio_dev)
+static int isl29125_buffer_postenable(struct iio_dev *indio_dev)
{
struct isl29125_data *data = iio_priv(indio_dev);
+ int err;
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err)
+ return err;
data->conf1 |= ISL29125_MODE_RGB;
- return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ err = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
data->conf1);
+ if (err) {
+ iio_triggered_buffer_predisable(indio_dev);
+ return err;
+ }
+
+ return 0;
}
static int isl29125_buffer_predisable(struct iio_dev *indio_dev)
@@ -227,19 +238,18 @@ static int isl29125_buffer_predisable(struct iio_dev *indio_dev)
struct isl29125_data *data = iio_priv(indio_dev);
int ret;
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret < 0)
- return ret;
-
data->conf1 &= ~ISL29125_MODE_MASK;
data->conf1 |= ISL29125_MODE_PD;
- return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ ret = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
data->conf1);
+
+ iio_triggered_buffer_predisable(indio_dev);
+
+ return ret;
}
static const struct iio_buffer_setup_ops isl29125_buffer_setup_ops = {
- .preenable = isl29125_buffer_preenable,
- .postenable = &iio_triggered_buffer_postenable,
+ .postenable = isl29125_buffer_postenable,
.predisable = isl29125_buffer_predisable,
};
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 71f99d2a22c1..5a3fcb127cd2 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -101,12 +101,12 @@ struct ltr501_gain {
int uscale;
};
-static struct ltr501_gain ltr501_als_gain_tbl[] = {
+static const struct ltr501_gain ltr501_als_gain_tbl[] = {
{1, 0},
{0, 5000},
};
-static struct ltr501_gain ltr559_als_gain_tbl[] = {
+static const struct ltr501_gain ltr559_als_gain_tbl[] = {
{1, 0},
{0, 500000},
{0, 250000},
@@ -117,14 +117,14 @@ static struct ltr501_gain ltr559_als_gain_tbl[] = {
{0, 10000},
};
-static struct ltr501_gain ltr501_ps_gain_tbl[] = {
+static const struct ltr501_gain ltr501_ps_gain_tbl[] = {
{1, 0},
{0, 250000},
{0, 125000},
{0, 62500},
};
-static struct ltr501_gain ltr559_ps_gain_tbl[] = {
+static const struct ltr501_gain ltr559_ps_gain_tbl[] = {
{0, 62500}, /* x16 gain */
{0, 31250}, /* x32 gain */
{0, 15625}, /* bits X1 are for x64 gain */
@@ -133,9 +133,9 @@ static struct ltr501_gain ltr559_ps_gain_tbl[] = {
struct ltr501_chip_info {
u8 partid;
- struct ltr501_gain *als_gain;
+ const struct ltr501_gain *als_gain;
int als_gain_tbl_size;
- struct ltr501_gain *ps_gain;
+ const struct ltr501_gain *ps_gain;
int ps_gain_tbl_size;
u8 als_mode_active;
u8 als_gain_mask;
@@ -192,7 +192,7 @@ static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
return -EINVAL;
}
-static int ltr501_als_read_samp_freq(struct ltr501_data *data,
+static int ltr501_als_read_samp_freq(const struct ltr501_data *data,
int *val, int *val2)
{
int ret, i;
@@ -210,7 +210,7 @@ static int ltr501_als_read_samp_freq(struct ltr501_data *data,
return IIO_VAL_INT_PLUS_MICRO;
}
-static int ltr501_ps_read_samp_freq(struct ltr501_data *data,
+static int ltr501_ps_read_samp_freq(const struct ltr501_data *data,
int *val, int *val2)
{
int ret, i;
@@ -266,7 +266,7 @@ static int ltr501_ps_write_samp_freq(struct ltr501_data *data,
return ret;
}
-static int ltr501_als_read_samp_period(struct ltr501_data *data, int *val)
+static int ltr501_als_read_samp_period(const struct ltr501_data *data, int *val)
{
int ret, i;
@@ -282,7 +282,7 @@ static int ltr501_als_read_samp_period(struct ltr501_data *data, int *val)
return IIO_VAL_INT;
}
-static int ltr501_ps_read_samp_period(struct ltr501_data *data, int *val)
+static int ltr501_ps_read_samp_period(const struct ltr501_data *data, int *val)
{
int ret, i;
@@ -321,7 +321,7 @@ static unsigned long ltr501_calculate_lux(u16 vis_data, u16 ir_data)
return lux / 1000;
}
-static int ltr501_drdy(struct ltr501_data *data, u8 drdy_mask)
+static int ltr501_drdy(const struct ltr501_data *data, u8 drdy_mask)
{
int tries = 100;
int ret, status;
@@ -373,7 +373,8 @@ static int ltr501_set_it_time(struct ltr501_data *data, int it)
}
/* read int time in micro seconds */
-static int ltr501_read_it_time(struct ltr501_data *data, int *val, int *val2)
+static int ltr501_read_it_time(const struct ltr501_data *data,
+ int *val, int *val2)
{
int ret, index;
@@ -391,7 +392,7 @@ static int ltr501_read_it_time(struct ltr501_data *data, int *val, int *val2)
return IIO_VAL_INT_PLUS_MICRO;
}
-static int ltr501_read_als(struct ltr501_data *data, __le16 buf[2])
+static int ltr501_read_als(const struct ltr501_data *data, __le16 buf[2])
{
int ret;
@@ -403,7 +404,7 @@ static int ltr501_read_als(struct ltr501_data *data, __le16 buf[2])
buf, 2 * sizeof(__le16));
}
-static int ltr501_read_ps(struct ltr501_data *data)
+static int ltr501_read_ps(const struct ltr501_data *data)
{
int ret, status;
@@ -419,7 +420,7 @@ static int ltr501_read_ps(struct ltr501_data *data)
return status;
}
-static int ltr501_read_intr_prst(struct ltr501_data *data,
+static int ltr501_read_intr_prst(const struct ltr501_data *data,
enum iio_chan_type type,
int *val2)
{
@@ -716,7 +717,7 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int ltr501_get_gain_index(struct ltr501_gain *gain, int size,
+static int ltr501_get_gain_index(const struct ltr501_gain *gain, int size,
int val, int val2)
{
int i;
@@ -848,14 +849,14 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static int ltr501_read_thresh(struct iio_dev *indio_dev,
+static int ltr501_read_thresh(const struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
enum iio_event_info info,
int *val, int *val2)
{
- struct ltr501_data *data = iio_priv(indio_dev);
+ const struct ltr501_data *data = iio_priv(indio_dev);
int ret, thresh_data;
switch (chan->type) {
@@ -1263,7 +1264,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
if (mask & LTR501_STATUS_ALS_RDY) {
ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1,
- (u8 *)als_buf, sizeof(als_buf));
+ als_buf, sizeof(als_buf));
if (ret < 0)
return ret;
if (test_bit(0, indio_dev->active_scan_mask))
@@ -1359,7 +1360,7 @@ static bool ltr501_is_volatile_reg(struct device *dev, unsigned int reg)
}
}
-static struct regmap_config ltr501_regmap_config = {
+static const struct regmap_config ltr501_regmap_config = {
.name = LTR501_REGMAP_NAME,
.reg_bits = 8,
.val_bits = 8,
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 92004a2563ea..82abfa57b59c 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -16,6 +16,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -844,7 +845,7 @@ static struct i2c_driver opt3001_driver = {
.driver = {
.name = "opt3001",
- .of_match_table = of_match_ptr(opt3001_of_match),
+ .of_match_table = opt3001_of_match,
},
};
diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c
index 9174ab928880..c1adab2a50fd 100644
--- a/drivers/iio/light/si1133.c
+++ b/drivers/iio/light/si1133.c
@@ -17,6 +17,8 @@
#include <linux/util_macros.h>
+#include <asm/unaligned.h>
+
#define SI1133_REG_PART_ID 0x00
#define SI1133_REG_REV_ID 0x01
#define SI1133_REG_MFR_ID 0x02
@@ -104,8 +106,6 @@
#define SI1133_LUX_BUFFER_SIZE 9
#define SI1133_MEASURE_BUFFER_SIZE 3
-#define SI1133_SIGN_BIT_INDEX 23
-
static const int si1133_scale_available[] = {
1, 2, 4, 8, 16, 32, 64, 128};
@@ -633,8 +633,7 @@ static int si1133_measure(struct si1133_data *data,
if (err)
return err;
- *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
- SI1133_SIGN_BIT_INDEX);
+ *val = sign_extend32(get_unaligned_be24(&buffer[0]), 23);
return err;
}
@@ -723,16 +722,11 @@ static int si1133_get_lux(struct si1133_data *data, int *val)
if (err)
return err;
- high_vis =
- sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
- SI1133_SIGN_BIT_INDEX);
+ high_vis = sign_extend32(get_unaligned_be24(&buffer[0]), 23);
- low_vis =
- sign_extend32((buffer[3] << 16) | (buffer[4] << 8) | buffer[5],
- SI1133_SIGN_BIT_INDEX);
+ low_vis = sign_extend32(get_unaligned_be24(&buffer[3]), 23);
- ir = sign_extend32((buffer[6] << 16) | (buffer[7] << 8) | buffer[8],
- SI1133_SIGN_BIT_INDEX);
+ ir = sign_extend32(get_unaligned_be24(&buffer[6]), 23);
if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD)
lux = si1133_calc_polynomial(high_vis, ir,
diff --git a/drivers/iio/light/st_uvis25_i2c.c b/drivers/iio/light/st_uvis25_i2c.c
index 4889bbeb0c73..98cd49eefe45 100644
--- a/drivers/iio/light/st_uvis25_i2c.c
+++ b/drivers/iio/light/st_uvis25_i2c.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -31,8 +32,8 @@ static int st_uvis25_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &st_uvis25_i2c_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap %ld\n",
+ PTR_ERR(regmap));
return PTR_ERR(regmap);
}
@@ -55,7 +56,7 @@ static struct i2c_driver st_uvis25_driver = {
.driver = {
.name = "st_uvis25_i2c",
.pm = &st_uvis25_pm_ops,
- .of_match_table = of_match_ptr(st_uvis25_i2c_of_match),
+ .of_match_table = st_uvis25_i2c_of_match,
},
.probe = st_uvis25_i2c_probe,
.id_table = st_uvis25_i2c_id_table,
diff --git a/drivers/iio/light/st_uvis25_spi.c b/drivers/iio/light/st_uvis25_spi.c
index a9ceae4f58b3..af9d94d12787 100644
--- a/drivers/iio/light/st_uvis25_spi.c
+++ b/drivers/iio/light/st_uvis25_spi.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -31,8 +32,8 @@ static int st_uvis25_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &st_uvis25_spi_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap %ld\n",
+ PTR_ERR(regmap));
return PTR_ERR(regmap);
}
@@ -55,7 +56,7 @@ static struct spi_driver st_uvis25_driver = {
.driver = {
.name = "st_uvis25_spi",
.pm = &st_uvis25_pm_ops,
- .of_match_table = of_match_ptr(st_uvis25_spi_of_match),
+ .of_match_table = st_uvis25_spi_of_match,
},
.probe = st_uvis25_spi_probe,
.id_table = st_uvis25_spi_id_table,
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index d8c40a83097d..27a5c28aac7f 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -69,7 +69,7 @@
#define TSL2563_TIMING_GAIN16 0x10
#define TSL2563_TIMING_GAIN1 0x00
-#define TSL2563_INT_DISBLED 0x00
+#define TSL2563_INT_DISABLED 0x00
#define TSL2563_INT_LEVEL 0x10
#define TSL2563_INT_PERSIST(n) ((n) & 0x0F)
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index be37fcbd4654..9fbde9b71b63 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -932,7 +932,7 @@ static ssize_t in_illuminance0_target_input_show(struct device *dev,
{
struct tsl2772_chip *chip = iio_priv(dev_to_iio_dev(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", chip->settings.als_cal_target);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", chip->settings.als_cal_target);
}
static ssize_t in_illuminance0_target_input_store(struct device *dev,
@@ -986,7 +986,7 @@ static ssize_t in_illuminance0_lux_table_show(struct device *dev,
int offset = 0;
while (i < TSL2772_MAX_LUX_TABLE_SIZE) {
- offset += snprintf(buf + offset, PAGE_SIZE, "%u,%u,",
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%u,%u,",
chip->tsl2772_device_lux[i].ch0,
chip->tsl2772_device_lux[i].ch1);
if (chip->tsl2772_device_lux[i].ch0 == 0) {
@@ -1000,7 +1000,7 @@ static ssize_t in_illuminance0_lux_table_show(struct device *dev,
i++;
}
- offset += snprintf(buf + offset, PAGE_SIZE, "\n");
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n");
return offset;
}
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index ec803c1e81df..2a4b3d331055 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -5,6 +5,7 @@
*
* Copyright 2012 Peter Meerwald <pmeerw@pmeerw.net>
* Copyright 2019 Pursim SPC
+ * Copyright 2020 Mathieu Othacehe <m.othacehe@gmail.com>
*
* IIO driver for:
* VCNL4000/10/20 (7-bit I2C slave address 0x13)
@@ -13,9 +14,7 @@
*
* TODO:
* allow to adjust IR current
- * proximity threshold and event handling
- * periodic ALS/proximity measurement (VCNL4010/20)
- * interrupts (VCNL4010/20/40, VCNL4200)
+ * interrupts (VCNL4040, VCNL4200)
*/
#include <linux/module.h>
@@ -23,9 +22,15 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#define VCNL4000_DRV_NAME "vcnl4000"
#define VCNL4000_PROD_ID 0x01
@@ -35,14 +40,22 @@
#define VCNL4000_COMMAND 0x80 /* Command register */
#define VCNL4000_PROD_REV 0x81 /* Product ID and Revision ID */
+#define VCNL4010_PROX_RATE 0x82 /* Proximity rate */
#define VCNL4000_LED_CURRENT 0x83 /* IR LED current for proximity mode */
#define VCNL4000_AL_PARAM 0x84 /* Ambient light parameter register */
+#define VCNL4010_ALS_PARAM 0x84 /* ALS rate */
#define VCNL4000_AL_RESULT_HI 0x85 /* Ambient light result register, MSB */
#define VCNL4000_AL_RESULT_LO 0x86 /* Ambient light result register, LSB */
#define VCNL4000_PS_RESULT_HI 0x87 /* Proximity result register, MSB */
#define VCNL4000_PS_RESULT_LO 0x88 /* Proximity result register, LSB */
#define VCNL4000_PS_MEAS_FREQ 0x89 /* Proximity test signal frequency */
+#define VCNL4010_INT_CTRL 0x89 /* Interrupt control */
#define VCNL4000_PS_MOD_ADJ 0x8a /* Proximity modulator timing adjustment */
+#define VCNL4010_LOW_THR_HI 0x8a /* Low threshold, MSB */
+#define VCNL4010_LOW_THR_LO 0x8b /* Low threshold, LSB */
+#define VCNL4010_HIGH_THR_HI 0x8c /* High threshold, MSB */
+#define VCNL4010_HIGH_THR_LO 0x8d /* High threshold, LSB */
+#define VCNL4010_ISR 0x8e /* Interrupt status */
#define VCNL4200_AL_CONF 0x00 /* Ambient light configuration */
#define VCNL4200_PS_CONF1 0x03 /* Proximity configuration */
@@ -57,6 +70,36 @@
#define VCNL4000_PS_RDY BIT(5) /* proximity data ready? */
#define VCNL4000_AL_OD BIT(4) /* start on-demand ALS measurement */
#define VCNL4000_PS_OD BIT(3) /* start on-demand proximity measurement */
+#define VCNL4000_ALS_EN BIT(2) /* start ALS measurement */
+#define VCNL4000_PROX_EN BIT(1) /* start proximity measurement */
+#define VCNL4000_SELF_TIMED_EN BIT(0) /* start self-timed measurement */
+
+/* Bit masks for interrupt registers. */
+#define VCNL4010_INT_THR_SEL BIT(0) /* Select threshold interrupt source */
+#define VCNL4010_INT_THR_EN BIT(1) /* Threshold interrupt type */
+#define VCNL4010_INT_ALS_EN BIT(2) /* Enable on ALS data ready */
+#define VCNL4010_INT_PROX_EN BIT(3) /* Enable on proximity data ready */
+
+#define VCNL4010_INT_THR_HIGH 0 /* High threshold exceeded */
+#define VCNL4010_INT_THR_LOW 1 /* Low threshold exceeded */
+#define VCNL4010_INT_ALS 2 /* ALS data ready */
+#define VCNL4010_INT_PROXIMITY 3 /* Proximity data ready */
+
+#define VCNL4010_INT_THR \
+ (BIT(VCNL4010_INT_THR_LOW) | BIT(VCNL4010_INT_THR_HIGH))
+#define VCNL4010_INT_DRDY \
+ (BIT(VCNL4010_INT_PROXIMITY) | BIT(VCNL4010_INT_ALS))
+
+static const int vcnl4010_prox_sampling_frequency[][2] = {
+ {1, 950000},
+ {3, 906250},
+ {7, 812500},
+ {16, 625000},
+ {31, 250000},
+ {62, 500000},
+ {125, 0},
+ {250, 0},
+};
#define VCNL4000_SLEEP_DELAY_MS 2000 /* before we enter pm_runtime_suspend */
@@ -83,10 +126,15 @@ struct vcnl4000_data {
struct mutex vcnl4000_lock;
struct vcnl4200_channel vcnl4200_al;
struct vcnl4200_channel vcnl4200_ps;
+ uint32_t near_level;
};
struct vcnl4000_chip_spec {
const char *prod;
+ struct iio_chan_spec const *channels;
+ const int num_channels;
+ const struct iio_info *info;
+ bool irq_support;
int (*init)(struct vcnl4000_data *data);
int (*measure_light)(struct vcnl4000_data *data, int *val);
int (*measure_proximity)(struct vcnl4000_data *data, int *val);
@@ -215,11 +263,31 @@ static int vcnl4200_init(struct vcnl4000_data *data)
return 0;
};
+static int vcnl4000_read_data(struct vcnl4000_data *data, u8 data_reg, int *val)
+{
+ s32 ret;
+
+ ret = i2c_smbus_read_word_swapped(data->client, data_reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+}
+
+static int vcnl4000_write_data(struct vcnl4000_data *data, u8 data_reg, int val)
+{
+ if (val > U16_MAX)
+ return -ERANGE;
+
+ return i2c_smbus_write_word_swapped(data->client, data_reg, val);
+}
+
+
static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
u8 rdy_mask, u8 data_reg, int *val)
{
int tries = 20;
- __be16 buf;
int ret;
mutex_lock(&data->vcnl4000_lock);
@@ -246,13 +314,11 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
goto fail;
}
- ret = i2c_smbus_read_i2c_block_data(data->client,
- data_reg, sizeof(buf), (u8 *) &buf);
+ ret = vcnl4000_read_data(data, data_reg, val);
if (ret < 0)
goto fail;
mutex_unlock(&data->vcnl4000_lock);
- *val = be16_to_cpu(buf);
return 0;
@@ -312,47 +378,34 @@ static int vcnl4200_measure_proximity(struct vcnl4000_data *data, int *val)
return vcnl4200_measure(data, &data->vcnl4200_ps, val);
}
-static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
- [VCNL4000] = {
- .prod = "VCNL4000",
- .init = vcnl4000_init,
- .measure_light = vcnl4000_measure_light,
- .measure_proximity = vcnl4000_measure_proximity,
- .set_power_state = vcnl4000_set_power_state,
- },
- [VCNL4010] = {
- .prod = "VCNL4010/4020",
- .init = vcnl4000_init,
- .measure_light = vcnl4000_measure_light,
- .measure_proximity = vcnl4000_measure_proximity,
- .set_power_state = vcnl4000_set_power_state,
- },
- [VCNL4040] = {
- .prod = "VCNL4040",
- .init = vcnl4200_init,
- .measure_light = vcnl4200_measure_light,
- .measure_proximity = vcnl4200_measure_proximity,
- .set_power_state = vcnl4200_set_power_state,
- },
- [VCNL4200] = {
- .prod = "VCNL4200",
- .init = vcnl4200_init,
- .measure_light = vcnl4200_measure_light,
- .measure_proximity = vcnl4200_measure_proximity,
- .set_power_state = vcnl4200_set_power_state,
- },
-};
+static int vcnl4010_read_proxy_samp_freq(struct vcnl4000_data *data, int *val,
+ int *val2)
+{
+ int ret;
-static const struct iio_chan_spec vcnl4000_channels[] = {
- {
- .type = IIO_LIGHT,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- }, {
- .type = IIO_PROXIMITY,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }
-};
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_PROX_RATE);
+ if (ret < 0)
+ return ret;
+
+ if (ret >= ARRAY_SIZE(vcnl4010_prox_sampling_frequency))
+ return -EINVAL;
+
+ *val = vcnl4010_prox_sampling_frequency[ret][0];
+ *val2 = vcnl4010_prox_sampling_frequency[ret][1];
+
+ return 0;
+}
+
+static bool vcnl4010_is_in_periodic_mode(struct vcnl4000_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4000_COMMAND);
+ if (ret < 0)
+ return false;
+
+ return !!(ret & VCNL4000_SELF_TIMED_EN);
+}
static int vcnl4000_set_pm_runtime_state(struct vcnl4000_data *data, bool on)
{
@@ -412,10 +465,571 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
}
}
+static int vcnl4010_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_SCALE:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Protect against event capture. */
+ if (vcnl4010_is_in_periodic_mode(data)) {
+ ret = -EBUSY;
+ } else {
+ ret = vcnl4000_read_raw(indio_dev, chan, val, val2,
+ mask);
+ }
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = vcnl4010_read_proxy_samp_freq(data, val, val2);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4010_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (int *)vcnl4010_prox_sampling_frequency;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = 2 * ARRAY_SIZE(vcnl4010_prox_sampling_frequency);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4010_write_proxy_samp_freq(struct vcnl4000_data *data, int val,
+ int val2)
+{
+ unsigned int i;
+ int index = -1;
+
+ for (i = 0; i < ARRAY_SIZE(vcnl4010_prox_sampling_frequency); i++) {
+ if (val == vcnl4010_prox_sampling_frequency[i][0] &&
+ val2 == vcnl4010_prox_sampling_frequency[i][1]) {
+ index = i;
+ break;
+ }
+ }
+
+ if (index < 0)
+ return -EINVAL;
+
+ return i2c_smbus_write_byte_data(data->client, VCNL4010_PROX_RATE,
+ index);
+}
+
+static int vcnl4010_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Protect against event capture. */
+ if (vcnl4010_is_in_periodic_mode(data)) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = vcnl4010_write_proxy_samp_freq(data, val, val2);
+ goto end;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+
+end:
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+}
+
+static int vcnl4010_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = vcnl4000_read_data(data, VCNL4010_HIGH_THR_HI,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ ret = vcnl4000_read_data(data, VCNL4010_LOW_THR_HI,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4010_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = vcnl4000_write_data(data, VCNL4010_HIGH_THR_HI,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ ret = vcnl4000_write_data(data, VCNL4010_LOW_THR_HI,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static bool vcnl4010_is_thr_enabled(struct vcnl4000_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_INT_CTRL);
+ if (ret < 0)
+ return false;
+
+ return !!(ret & VCNL4010_INT_THR_EN);
+}
+
+static int vcnl4010_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return vcnl4010_is_thr_enabled(data);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4010_config_threshold(struct iio_dev *indio_dev, bool state)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret;
+ int icr;
+ int command;
+
+ if (state) {
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Enable periodic measurement of proximity data. */
+ command = VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN;
+
+ /*
+ * Enable interrupts on threshold, for proximity data by
+ * default.
+ */
+ icr = VCNL4010_INT_THR_EN;
+ } else {
+ if (!vcnl4010_is_thr_enabled(data))
+ return 0;
+
+ command = 0;
+ icr = 0;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND,
+ command);
+ if (ret < 0)
+ goto end;
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, icr);
+
+end:
+ if (state)
+ iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+}
+
+static int vcnl4010_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return vcnl4010_config_threshold(indio_dev, state);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t vcnl4000_read_near_level(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", data->near_level);
+}
+
+static const struct iio_chan_spec_ext_info vcnl4000_ext_info[] = {
+ {
+ .name = "nearlevel",
+ .shared = IIO_SEPARATE,
+ .read = vcnl4000_read_near_level,
+ },
+ { /* sentinel */ }
+};
+
+static const struct iio_event_spec vcnl4000_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ }
+};
+
+static const struct iio_chan_spec vcnl4000_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }, {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .ext_info = vcnl4000_ext_info,
+ }
+};
+
+static const struct iio_chan_spec vcnl4010_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .scan_index = -1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }, {
+ .type = IIO_PROXIMITY,
+ .scan_index = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .event_spec = vcnl4000_event_spec,
+ .num_event_specs = ARRAY_SIZE(vcnl4000_event_spec),
+ .ext_info = vcnl4000_ext_info,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
static const struct iio_info vcnl4000_info = {
.read_raw = vcnl4000_read_raw,
};
+static const struct iio_info vcnl4010_info = {
+ .read_raw = vcnl4010_read_raw,
+ .read_avail = vcnl4010_read_avail,
+ .write_raw = vcnl4010_write_raw,
+ .read_event_value = vcnl4010_read_event,
+ .write_event_value = vcnl4010_write_event,
+ .read_event_config = vcnl4010_read_event_config,
+ .write_event_config = vcnl4010_write_event_config,
+};
+
+static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
+ [VCNL4000] = {
+ .prod = "VCNL4000",
+ .init = vcnl4000_init,
+ .measure_light = vcnl4000_measure_light,
+ .measure_proximity = vcnl4000_measure_proximity,
+ .set_power_state = vcnl4000_set_power_state,
+ .channels = vcnl4000_channels,
+ .num_channels = ARRAY_SIZE(vcnl4000_channels),
+ .info = &vcnl4000_info,
+ .irq_support = false,
+ },
+ [VCNL4010] = {
+ .prod = "VCNL4010/4020",
+ .init = vcnl4000_init,
+ .measure_light = vcnl4000_measure_light,
+ .measure_proximity = vcnl4000_measure_proximity,
+ .set_power_state = vcnl4000_set_power_state,
+ .channels = vcnl4010_channels,
+ .num_channels = ARRAY_SIZE(vcnl4010_channels),
+ .info = &vcnl4010_info,
+ .irq_support = true,
+ },
+ [VCNL4040] = {
+ .prod = "VCNL4040",
+ .init = vcnl4200_init,
+ .measure_light = vcnl4200_measure_light,
+ .measure_proximity = vcnl4200_measure_proximity,
+ .set_power_state = vcnl4200_set_power_state,
+ .channels = vcnl4000_channels,
+ .num_channels = ARRAY_SIZE(vcnl4000_channels),
+ .info = &vcnl4000_info,
+ .irq_support = false,
+ },
+ [VCNL4200] = {
+ .prod = "VCNL4200",
+ .init = vcnl4200_init,
+ .measure_light = vcnl4200_measure_light,
+ .measure_proximity = vcnl4200_measure_proximity,
+ .set_power_state = vcnl4200_set_power_state,
+ .channels = vcnl4000_channels,
+ .num_channels = ARRAY_SIZE(vcnl4000_channels),
+ .info = &vcnl4000_info,
+ .irq_support = false,
+ },
+};
+
+static irqreturn_t vcnl4010_irq_thread(int irq, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ unsigned long isr;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_ISR);
+ if (ret < 0)
+ goto end;
+
+ isr = ret;
+
+ if (isr & VCNL4010_INT_THR) {
+ if (test_bit(VCNL4010_INT_THR_LOW, &isr)) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_PROXIMITY,
+ 1,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ if (test_bit(VCNL4010_INT_THR_HIGH, &isr)) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_PROXIMITY,
+ 1,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ i2c_smbus_write_byte_data(data->client, VCNL4010_ISR,
+ isr & VCNL4010_INT_THR);
+ }
+
+ if (isr & VCNL4010_INT_DRDY && iio_buffer_enabled(indio_dev))
+ iio_trigger_poll_chained(indio_dev->trig);
+
+end:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ const unsigned long *active_scan_mask = indio_dev->active_scan_mask;
+ u16 buffer[8] = {0}; /* 1x16-bit + ts */
+ bool data_read = false;
+ unsigned long isr;
+ int val = 0;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_ISR);
+ if (ret < 0)
+ goto end;
+
+ isr = ret;
+
+ if (test_bit(0, active_scan_mask)) {
+ if (test_bit(VCNL4010_INT_PROXIMITY, &isr)) {
+ ret = vcnl4000_read_data(data,
+ VCNL4000_PS_RESULT_HI,
+ &val);
+ if (ret < 0)
+ goto end;
+
+ buffer[0] = val;
+ data_read = true;
+ }
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_ISR,
+ isr & VCNL4010_INT_DRDY);
+ if (ret < 0)
+ goto end;
+
+ if (!data_read)
+ goto end;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_get_time_ns(indio_dev));
+
+end:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int vcnl4010_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret;
+ int cmd;
+
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Do not enable the buffer if we are already capturing events. */
+ if (vcnl4010_is_in_periodic_mode(data)) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL,
+ VCNL4010_INT_PROX_EN);
+ if (ret < 0)
+ goto end;
+
+ cmd = VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN;
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, cmd);
+ if (ret < 0)
+ goto end;
+
+ return 0;
+end:
+ iio_triggered_buffer_predisable(indio_dev);
+
+ return ret;
+}
+
+static int vcnl4010_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret, ret_disable;
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, 0);
+ if (ret < 0)
+ goto end;
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, 0);
+
+end:
+ ret_disable = iio_triggered_buffer_predisable(indio_dev);
+ if (ret == 0)
+ ret = ret_disable;
+
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops vcnl4010_buffer_ops = {
+ .postenable = &vcnl4010_buffer_postenable,
+ .predisable = &vcnl4010_buffer_predisable,
+};
+
+static const struct iio_trigger_ops vcnl4010_trigger_ops = {
+ .validate_device = iio_trigger_validate_own_device,
+};
+
+static int vcnl4010_probe_trigger(struct iio_dev *indio_dev)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ struct iio_trigger *trigger;
+
+ trigger = devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!trigger)
+ return -ENOMEM;
+
+ trigger->dev.parent = &client->dev;
+ trigger->ops = &vcnl4010_trigger_ops;
+ iio_trigger_set_drvdata(trigger, indio_dev);
+
+ return devm_iio_trigger_register(&client->dev, trigger);
+}
+
static int vcnl4000_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -440,13 +1054,44 @@ static int vcnl4000_probe(struct i2c_client *client,
dev_dbg(&client->dev, "%s Ambient light/proximity sensor, Rev: %02x\n",
data->chip_spec->prod, data->rev);
+ if (device_property_read_u32(&client->dev, "proximity-near-level",
+ &data->near_level))
+ data->near_level = 0;
+
indio_dev->dev.parent = &client->dev;
- indio_dev->info = &vcnl4000_info;
- indio_dev->channels = vcnl4000_channels;
- indio_dev->num_channels = ARRAY_SIZE(vcnl4000_channels);
+ indio_dev->info = data->chip_spec->info;
+ indio_dev->channels = data->chip_spec->channels;
+ indio_dev->num_channels = data->chip_spec->num_channels;
indio_dev->name = VCNL4000_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
+ if (client->irq && data->chip_spec->irq_support) {
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ NULL,
+ vcnl4010_trigger_handler,
+ &vcnl4010_buffer_ops);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "unable to setup iio triggered buffer\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, vcnl4010_irq_thread,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "vcnl4010_irq",
+ indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "irq request failed\n");
+ return ret;
+ }
+
+ ret = vcnl4010_probe_trigger(indio_dev);
+ if (ret < 0)
+ return ret;
+ }
+
ret = pm_runtime_set_active(&client->dev);
if (ret < 0)
goto fail_poweroff;
@@ -540,5 +1185,6 @@ static struct i2c_driver vcnl4000_driver = {
module_i2c_driver(vcnl4000_driver);
MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_AUTHOR("Mathieu Othacehe <m.othacehe@gmail.com>");
MODULE_DESCRIPTION("Vishay VCNL4000 proximity/ambient light sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index d9533a76b8f6..ed7b02765b97 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -16,6 +16,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/err.h>
@@ -537,7 +538,7 @@ MODULE_DEVICE_TABLE(i2c, vl6180_id);
static struct i2c_driver vl6180_driver = {
.driver = {
.name = VL6180_DRV_NAME,
- .of_match_table = of_match_ptr(vl6180_of_match),
+ .of_match_table = vl6180_of_match,
},
.probe = vl6180_probe,
.id_table = vl6180_id,
diff --git a/drivers/iio/light/zopt2201.c b/drivers/iio/light/zopt2201.c
index 5f54f39e7a4c..80ae530720cd 100644
--- a/drivers/iio/light/zopt2201.c
+++ b/drivers/iio/light/zopt2201.c
@@ -19,6 +19,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
#define ZOPT2201_DRV_NAME "zopt2201"
/* Registers */
@@ -219,7 +221,7 @@ static int zopt2201_read(struct zopt2201_data *data, u8 reg)
goto fail;
mutex_unlock(&data->lock);
- return (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ return get_unaligned_le24(&buf[0]);
fail:
mutex_unlock(&data->lock);
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index d32996702110..810fdfd37c88 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -49,6 +49,7 @@
#define AK8974_WHOAMI_VALUE_AMI306 0x46
#define AK8974_WHOAMI_VALUE_AMI305 0x47
#define AK8974_WHOAMI_VALUE_AK8974 0x48
+#define AK8974_WHOAMI_VALUE_HSCDTD008A 0x49
#define AK8974_DATA_X 0x10
#define AK8974_DATA_Y 0x12
@@ -140,6 +141,12 @@
#define AK8974_INT_CTRL_PULSE BIT(1) /* 0 = latched; 1 = pulse (50 usec) */
#define AK8974_INT_CTRL_RESDEF (AK8974_INT_CTRL_XYZEN | AK8974_INT_CTRL_POL)
+/* HSCDTD008A-specific control register */
+#define HSCDTD008A_CTRL4 0x1E
+#define HSCDTD008A_CTRL4_MMD BIT(7) /* must be set to 1 */
+#define HSCDTD008A_CTRL4_RANGE BIT(4) /* 0 = 14-bit output; 1 = 15-bit output */
+#define HSCDTD008A_CTRL4_RESDEF (HSCDTD008A_CTRL4_MMD | HSCDTD008A_CTRL4_RANGE)
+
/* The AMI305 has elaborate FW version and serial number registers */
#define AMI305_VER 0xE8
#define AMI305_SN 0xEA
@@ -241,10 +248,17 @@ static int ak8974_reset(struct ak8974 *ak8974)
ret = regmap_write(ak8974->map, AK8974_CTRL3, AK8974_CTRL3_RESDEF);
if (ret)
return ret;
- ret = regmap_write(ak8974->map, AK8974_INT_CTRL,
- AK8974_INT_CTRL_RESDEF);
- if (ret)
- return ret;
+ if (ak8974->variant != AK8974_WHOAMI_VALUE_HSCDTD008A) {
+ ret = regmap_write(ak8974->map, AK8974_INT_CTRL,
+ AK8974_INT_CTRL_RESDEF);
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_write(ak8974->map, HSCDTD008A_CTRL4,
+ HSCDTD008A_CTRL4_RESDEF);
+ if (ret)
+ return ret;
+ }
/* After reset, power off is default state */
return ak8974_set_power(ak8974, AK8974_PWR_OFF);
@@ -267,6 +281,8 @@ static int ak8974_configure(struct ak8974 *ak8974)
if (ret)
return ret;
}
+ if (ak8974->variant == AK8974_WHOAMI_VALUE_HSCDTD008A)
+ return 0;
ret = regmap_write(ak8974->map, AK8974_INT_CTRL, AK8974_INT_CTRL_POL);
if (ret)
return ret;
@@ -495,6 +511,10 @@ static int ak8974_detect(struct ak8974 *ak8974)
name = "ak8974";
dev_info(&ak8974->i2c->dev, "detected AK8974\n");
break;
+ case AK8974_WHOAMI_VALUE_HSCDTD008A:
+ name = "hscdtd008a";
+ dev_info(&ak8974->i2c->dev, "detected hscdtd008a\n");
+ break;
default:
dev_err(&ak8974->i2c->dev, "unsupported device (%02x) ",
whoami);
@@ -534,47 +554,103 @@ static int ak8974_detect(struct ak8974 *ak8974)
return 0;
}
+static int ak8974_measure_channel(struct ak8974 *ak8974, unsigned long address,
+ int *val)
+{
+ __le16 hw_values[3];
+ int ret;
+
+ pm_runtime_get_sync(&ak8974->i2c->dev);
+ mutex_lock(&ak8974->lock);
+
+ /*
+ * We read all axes and discard all but one, for optimized
+ * reading, use the triggered buffer.
+ */
+ ret = ak8974_trigmeas(ak8974);
+ if (ret)
+ goto out_unlock;
+ ret = ak8974_getresult(ak8974, hw_values);
+ if (ret)
+ goto out_unlock;
+ /*
+ * This explicit cast to (s16) is necessary as the measurement
+ * is done in 2's complement with positive and negative values.
+ * The follwing assignment to *val will then convert the signed
+ * s16 value to a signed int value.
+ */
+ *val = (s16)le16_to_cpu(hw_values[address]);
+out_unlock:
+ mutex_unlock(&ak8974->lock);
+ pm_runtime_mark_last_busy(&ak8974->i2c->dev);
+ pm_runtime_put_autosuspend(&ak8974->i2c->dev);
+
+ return ret;
+}
+
static int ak8974_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
long mask)
{
struct ak8974 *ak8974 = iio_priv(indio_dev);
- __le16 hw_values[3];
- int ret = -EINVAL;
-
- pm_runtime_get_sync(&ak8974->i2c->dev);
- mutex_lock(&ak8974->lock);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (chan->address > 2) {
dev_err(&ak8974->i2c->dev, "faulty channel address\n");
- ret = -EIO;
- goto out_unlock;
+ return -EIO;
}
- ret = ak8974_trigmeas(ak8974);
- if (ret)
- goto out_unlock;
- ret = ak8974_getresult(ak8974, hw_values);
+ ret = ak8974_measure_channel(ak8974, chan->address, val);
if (ret)
- goto out_unlock;
-
- /*
- * We read all axes and discard all but one, for optimized
- * reading, use the triggered buffer.
- */
- *val = (s16)le16_to_cpu(hw_values[chan->address]);
-
- ret = IIO_VAL_INT;
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (ak8974->variant) {
+ case AK8974_WHOAMI_VALUE_AMI306:
+ case AK8974_WHOAMI_VALUE_AMI305:
+ /*
+ * The datasheet for AMI305 and AMI306, page 6
+ * specifies the range of the sensor to be
+ * +/- 12 Gauss.
+ */
+ *val = 12;
+ /*
+ * 12 bits are used, +/- 2^11
+ * [ -2048 .. 2047 ] (manual page 20)
+ * [ 0xf800 .. 0x07ff ]
+ */
+ *val2 = 11;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case AK8974_WHOAMI_VALUE_HSCDTD008A:
+ /*
+ * The datasheet for HSCDTF008A, page 3 specifies the
+ * range of the sensor as +/- 2.4 mT per axis, which
+ * corresponds to +/- 2400 uT = +/- 24 Gauss.
+ */
+ *val = 24;
+ /*
+ * 15 bits are used (set up in CTRL4), +/- 2^14
+ * [ -16384 .. 16383 ] (manual page 24)
+ * [ 0xc000 .. 0x3fff ]
+ */
+ *val2 = 14;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ /* GUESSING +/- 12 Gauss */
+ *val = 12;
+ /* GUESSING 12 bits ADC +/- 2^11 */
+ *val2 = 11;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ break;
+ default:
+ /* Unknown request */
+ break;
}
- out_unlock:
- mutex_unlock(&ak8974->lock);
- pm_runtime_mark_last_busy(&ak8974->i2c->dev);
- pm_runtime_put_autosuspend(&ak8974->i2c->dev);
-
- return ret;
+ return -EINVAL;
}
static void ak8974_fill_buffer(struct iio_dev *indio_dev)
@@ -631,27 +707,44 @@ static const struct iio_chan_spec_ext_info ak8974_ext_info[] = {
{ },
};
-#define AK8974_AXIS_CHANNEL(axis, index) \
+#define AK8974_AXIS_CHANNEL(axis, index, bits) \
{ \
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
.ext_info = ak8974_ext_info, \
.address = index, \
.scan_index = index, \
.scan_type = { \
.sign = 's', \
- .realbits = 16, \
+ .realbits = bits, \
.storagebits = 16, \
.endianness = IIO_LE \
}, \
}
-static const struct iio_chan_spec ak8974_channels[] = {
- AK8974_AXIS_CHANNEL(X, 0),
- AK8974_AXIS_CHANNEL(Y, 1),
- AK8974_AXIS_CHANNEL(Z, 2),
+/*
+ * We have no datasheet for the AK8974 but we guess that its
+ * ADC is 12 bits. The AMI305 and AMI306 certainly has 12bit
+ * ADC.
+ */
+static const struct iio_chan_spec ak8974_12_bits_channels[] = {
+ AK8974_AXIS_CHANNEL(X, 0, 12),
+ AK8974_AXIS_CHANNEL(Y, 1, 12),
+ AK8974_AXIS_CHANNEL(Z, 2, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+/*
+ * The HSCDTD008A has 15 bits resolution the way we set it up
+ * in CTRL4.
+ */
+static const struct iio_chan_spec ak8974_15_bits_channels[] = {
+ AK8974_AXIS_CHANNEL(X, 0, 15),
+ AK8974_AXIS_CHANNEL(Y, 1, 15),
+ AK8974_AXIS_CHANNEL(Z, 2, 15),
IIO_CHAN_SOFT_TIMESTAMP(3),
};
@@ -674,18 +767,18 @@ static bool ak8974_writeable_reg(struct device *dev, unsigned int reg)
case AK8974_INT_CTRL:
case AK8974_INT_THRES:
case AK8974_INT_THRES + 1:
+ return true;
case AK8974_PRESET:
case AK8974_PRESET + 1:
- return true;
+ return ak8974->variant != AK8974_WHOAMI_VALUE_HSCDTD008A;
case AK8974_OFFSET_X:
case AK8974_OFFSET_X + 1:
case AK8974_OFFSET_Y:
case AK8974_OFFSET_Y + 1:
case AK8974_OFFSET_Z:
case AK8974_OFFSET_Z + 1:
- if (ak8974->variant == AK8974_WHOAMI_VALUE_AK8974)
- return true;
- return false;
+ return ak8974->variant == AK8974_WHOAMI_VALUE_AK8974 ||
+ ak8974->variant == AK8974_WHOAMI_VALUE_HSCDTD008A;
case AMI305_OFFSET_X:
case AMI305_OFFSET_X + 1:
case AMI305_OFFSET_Y:
@@ -746,7 +839,12 @@ static int ak8974_probe(struct i2c_client *i2c,
ARRAY_SIZE(ak8974->regs),
ak8974->regs);
if (ret < 0) {
- dev_err(&i2c->dev, "cannot get regulators\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&i2c->dev, "cannot get regulators: %d\n", ret);
+ else
+ dev_dbg(&i2c->dev,
+ "regulators unavailable, deferring probe\n");
+
return ret;
}
@@ -795,8 +893,21 @@ static int ak8974_probe(struct i2c_client *i2c,
pm_runtime_put(&i2c->dev);
indio_dev->dev.parent = &i2c->dev;
- indio_dev->channels = ak8974_channels;
- indio_dev->num_channels = ARRAY_SIZE(ak8974_channels);
+ switch (ak8974->variant) {
+ case AK8974_WHOAMI_VALUE_AMI306:
+ case AK8974_WHOAMI_VALUE_AMI305:
+ indio_dev->channels = ak8974_12_bits_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8974_12_bits_channels);
+ break;
+ case AK8974_WHOAMI_VALUE_HSCDTD008A:
+ indio_dev->channels = ak8974_15_bits_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8974_15_bits_channels);
+ break;
+ default:
+ indio_dev->channels = ak8974_12_bits_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8974_12_bits_channels);
+ break;
+ }
indio_dev->info = &ak8974_info;
indio_dev->available_scan_masks = ak8974_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -926,12 +1037,14 @@ static const struct i2c_device_id ak8974_id[] = {
{"ami305", 0 },
{"ami306", 0 },
{"ak8974", 0 },
+ {"hscdtd008a", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, ak8974_id);
static const struct of_device_id ak8974_of_match[] = {
{ .compatible = "asahi-kasei,ak8974", },
+ { .compatible = "alps,hscdtd008a", },
{}
};
MODULE_DEVICE_TABLE(of, ak8974_of_match);
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
index ed9be0490d77..c6ed3ea8460a 100644
--- a/drivers/iio/magnetometer/bmc150_magn_spi.c
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -22,8 +22,8 @@ static int bmc150_magn_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &bmc150_magn_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
return bmc150_magn_probe(&spi->dev, regmap, spi->irq, id->name);
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 25e60b233e08..0c09daf87794 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum magn_3d_channel {
@@ -519,18 +517,13 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- return ret;
- }
atomic_set(&magn_state->magn_flux_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&magn_state->magn_flux_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ return ret;
}
ret = iio_device_register(indio_dev);
@@ -554,9 +547,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &magn_state->magn_flux_attributes);
return ret;
}
@@ -569,8 +560,7 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &magn_state->magn_flux_attributes);
return 0;
}
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 425cdd07b4e5..1787d656d009 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -239,7 +239,7 @@ static int mmc35240_init(struct mmc35240_data *data)
return ret;
ret = regmap_bulk_read(data->regmap, MMC35240_OTP_START_ADDR,
- (u8 *)otp_data, sizeof(otp_data));
+ otp_data, sizeof(otp_data));
if (ret < 0)
return ret;
@@ -295,7 +295,7 @@ static int mmc35240_read_measurement(struct mmc35240_data *data, __le16 buf[3])
if (ret < 0)
return ret;
- return regmap_bulk_read(data->regmap, MMC35240_REG_XOUT_L, (u8 *)buf,
+ return regmap_bulk_read(data->regmap, MMC35240_REG_XOUT_L, buf,
3 * sizeof(__le16));
}
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index 7c20918d8108..43a2e420c9c4 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -22,6 +22,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
+#include <asm/unaligned.h>
+
#include "rm3100.h"
/* Cycle Count Registers. */
@@ -223,8 +225,7 @@ static int rm3100_read_mag(struct rm3100_data *data, int idx, int *val)
goto unlock_return;
mutex_unlock(&data->lock);
- *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
- 23);
+ *val = sign_extend32(get_unaligned_be24(&buffer[0]), 23);
return IIO_VAL_INT;
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index e68184a93a6d..79de721e6015 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -506,8 +506,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
indio_dev->channels = mdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
- mdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &mdata->sensor_settings->fs.fs_avl[0];
+ mdata->current_fullscale = &mdata->sensor_settings->fs.fs_avl[0];
mdata->odr = mdata->sensor_settings->odr.odr_avl[0].hz;
err = st_sensors_init_sensor(indio_dev, NULL);
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index 00af68764cda..6aac8bea233a 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -15,8 +15,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum incl_3d_channel {
@@ -346,18 +344,13 @@ static int hid_incl_3d_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&incl_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&incl_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -382,9 +375,7 @@ static int hid_incl_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&incl_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &incl_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -399,8 +390,7 @@ static int hid_incl_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_INCLINOMETER_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&incl_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &incl_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index 64ae7d04a200..b99f41240e3e 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
struct dev_rot_state {
@@ -288,18 +286,13 @@ static int hid_dev_rot_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- return ret;
- }
atomic_set(&rot_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&rot_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ return ret;
}
ret = iio_device_register(indio_dev);
@@ -323,9 +316,7 @@ static int hid_dev_rot_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&rot_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &rot_state->common_attributes);
return ret;
}
@@ -338,8 +329,7 @@ static int hid_dev_rot_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&rot_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &rot_state->common_attributes);
return 0;
}
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 29c209cc1108..126a56d31b6e 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -271,6 +271,8 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
+ (s32)2097152) * calib->H2 + 8192) >> 14);
var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)calib->H1) >> 4;
+ var = clamp_val(var, 0, 419430400);
+
return var >> 12;
};
@@ -337,8 +339,7 @@ static int bmp280_read_temp(struct bmp280_data *data,
__be32 tmp = 0;
s32 adc_temp, comp_temp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
- (u8 *) &tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB, &tmp, 3);
if (ret < 0) {
dev_err(data->dev, "failed to read temperature\n");
return ret;
@@ -377,8 +378,7 @@ static int bmp280_read_press(struct bmp280_data *data,
if (ret < 0)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
- (u8 *) &tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB, &tmp, 3);
if (ret < 0) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
@@ -400,8 +400,8 @@ static int bmp280_read_press(struct bmp280_data *data,
static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
{
+ __be16 tmp;
int ret;
- __be16 tmp = 0;
s32 adc_humidity;
u32 comp_humidity;
@@ -410,8 +410,7 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
if (ret < 0)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB,
- (u8 *) &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB, &tmp, 2);
if (ret < 0) {
dev_err(data->dev, "failed to read humidity\n");
return ret;
@@ -575,57 +574,38 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static ssize_t bmp280_show_avail(char *buf, const int *vals, const int n)
+static int bmp280_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
{
- size_t len = 0;
- int i;
-
- for (i = 0; i < n; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", vals[i]);
-
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static ssize_t bmp280_show_temp_oversampling_avail(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
-
- return bmp280_show_avail(buf, data->chip_info->oversampling_temp_avail,
- data->chip_info->num_oversampling_temp_avail);
-}
-
-static ssize_t bmp280_show_press_oversampling_avail(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
+ struct bmp280_data *data = iio_priv(indio_dev);
- return bmp280_show_avail(buf, data->chip_info->oversampling_press_avail,
- data->chip_info->num_oversampling_press_avail);
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *vals = data->chip_info->oversampling_press_avail;
+ *length = data->chip_info->num_oversampling_press_avail;
+ break;
+ case IIO_TEMP:
+ *vals = data->chip_info->oversampling_temp_avail;
+ *length = data->chip_info->num_oversampling_temp_avail;
+ break;
+ default:
+ return -EINVAL;
+ }
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
}
-static IIO_DEVICE_ATTR(in_temp_oversampling_ratio_available,
- S_IRUGO, bmp280_show_temp_oversampling_avail, NULL, 0);
-
-static IIO_DEVICE_ATTR(in_pressure_oversampling_ratio_available,
- S_IRUGO, bmp280_show_press_oversampling_avail, NULL, 0);
-
-static struct attribute *bmp280_attributes[] = {
- &iio_dev_attr_in_temp_oversampling_ratio_available.dev_attr.attr,
- &iio_dev_attr_in_pressure_oversampling_ratio_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group bmp280_attrs_group = {
- .attrs = bmp280_attributes,
-};
-
static const struct iio_info bmp280_info = {
.read_raw = &bmp280_read_raw,
+ .read_avail = &bmp280_read_avail,
.write_raw = &bmp280_write_raw,
- .attrs = &bmp280_attrs_group,
};
static int bmp280_chip_config(struct bmp280_data *data)
@@ -713,7 +693,7 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
unsigned int ctrl;
if (data->use_eoc)
- init_completion(&data->done);
+ reinit_completion(&data->done);
ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas);
if (ret)
@@ -752,14 +732,14 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
static int bmp180_read_adc_temp(struct bmp280_data *data, int *val)
{
+ __be16 tmp;
int ret;
- __be16 tmp = 0;
ret = bmp180_measure(data, BMP180_MEAS_TEMP);
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, &tmp, 2);
if (ret)
return ret;
@@ -856,7 +836,7 @@ static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, &tmp, 3);
if (ret)
return ret;
@@ -965,10 +945,12 @@ static int bmp085_fetch_eoc_irq(struct device *dev,
irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
if (irq_trig != IRQF_TRIGGER_RISING) {
- dev_err(dev, "non-rising trigger given for EOC interrupt, "
- "trying to enforce it\n");
+ dev_err(dev, "non-rising trigger given for EOC interrupt, trying to enforce it\n");
irq_trig = IRQF_TRIGGER_RISING;
}
+
+ init_completion(&data->done);
+
ret = devm_request_threaded_irq(dev,
irq,
bmp085_eoc_irq,
@@ -1082,9 +1064,9 @@ int bmp280_common_probe(struct device *dev,
usleep_range(data->start_up_time, data->start_up_time + 100);
/* Bring chip out of reset if there is an assigned GPIO line */
- gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
/* Deassert the signal */
- if (!IS_ERR(gpiod)) {
+ if (gpiod) {
dev_info(dev, "release reset\n");
gpiod_set_value(gpiod, 0);
}
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 953235052155..5e6663f757ae 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
#define CHANNEL_SCAN_INDEX_PRESSURE 0
@@ -290,18 +288,13 @@ static int hid_press_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&press_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&press_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -325,9 +318,7 @@ static int hid_press_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&press_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &press_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -342,8 +333,7 @@ static int hid_press_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PRESSURE);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&press_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &press_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
index 3ac3632e7242..1f931f5b7a65 100644
--- a/drivers/iio/pressure/hp206c.c
+++ b/drivers/iio/pressure/hp206c.c
@@ -18,6 +18,8 @@
#include <linux/util_macros.h>
#include <linux/acpi.h>
+#include <asm/unaligned.h>
+
/* I2C commands: */
#define HP206C_CMD_SOFT_RST 0x06
@@ -93,12 +95,12 @@ static int hp206c_read_20bit(struct i2c_client *client, u8 cmd)
int ret;
u8 values[3];
- ret = i2c_smbus_read_i2c_block_data(client, cmd, 3, values);
+ ret = i2c_smbus_read_i2c_block_data(client, cmd, sizeof(values), values);
if (ret < 0)
return ret;
- if (ret != 3)
+ if (ret != sizeof(values))
return -EIO;
- return ((values[0] & 0xF) << 16) | (values[1] << 8) | (values[2]);
+ return get_unaligned_be24(&values[0]) & GENMASK(19, 0);
}
/* Spin for max 160ms until DEV_RDY is 1, or return error. */
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 8089c59adce5..072c106dd66d 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -16,6 +16,8 @@
#include <linux/module.h>
#include <linux/of_device.h>
+#include <asm/unaligned.h>
+
#include "ms5611.h"
static int ms5611_i2c_reset(struct device *dev)
@@ -50,7 +52,7 @@ static int ms5611_i2c_read_adc(struct ms5611_state *st, s32 *val)
if (ret < 0)
return ret;
- *val = (buf[0] << 16) | (buf[1] << 8) | buf[2];
+ *val = get_unaligned_be24(&buf[0]);
return 0;
}
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index b463eaa799ab..4799aa57135e 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -11,6 +11,8 @@
#include <linux/spi/spi.h>
#include <linux/of_device.h>
+#include <asm/unaligned.h>
+
#include "ms5611.h"
static int ms5611_spi_reset(struct device *dev)
@@ -45,7 +47,7 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val)
if (ret < 0)
return ret;
- *val = (buf[0] << 16) | (buf[1] << 8) | buf[2];
+ *val = get_unaligned_be24(&buf[0]);
return 0;
}
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index bd972cec4830..789a2928504a 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -683,8 +683,7 @@ EXPORT_SYMBOL(st_press_get_settings);
int st_press_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *press_data = iio_priv(indio_dev);
- struct st_sensors_platform_data *pdata =
- (struct st_sensors_platform_data *)press_data->dev->platform_data;
+ struct st_sensors_platform_data *pdata = dev_get_platdata(press_data->dev);
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -708,9 +707,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
indio_dev->channels = press_data->sensor_settings->ch;
indio_dev->num_channels = press_data->sensor_settings->num_ch;
- press_data->current_fullscale =
- (struct st_sensor_fullscale_avl *)
- &press_data->sensor_settings->fs.fs_avl[0];
+ press_data->current_fullscale = &press_data->sensor_settings->fs.fs_avl[0];
press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 99dfe33ee402..37fe851f89af 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -64,6 +64,7 @@
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <asm/unaligned.h>
#include "zpa2326.h"
/* 200 ms should be enough for the longest conversion time in one-shot mode. */
@@ -1005,22 +1006,20 @@ static int zpa2326_fetch_raw_sample(const struct iio_dev *indio_dev,
struct regmap *regs = ((struct zpa2326_private *)
iio_priv(indio_dev))->regmap;
int err;
+ u8 v[3];
switch (type) {
case IIO_PRESSURE:
zpa2326_dbg(indio_dev, "fetching raw pressure sample");
- err = regmap_bulk_read(regs, ZPA2326_PRESS_OUT_XL_REG, value,
- 3);
+ err = regmap_bulk_read(regs, ZPA2326_PRESS_OUT_XL_REG, v, sizeof(v));
if (err) {
zpa2326_warn(indio_dev, "failed to fetch pressure (%d)",
err);
return err;
}
- /* Pressure is a 24 bits wide little-endian unsigned int. */
- *value = (((u8 *)value)[2] << 16) | (((u8 *)value)[1] << 8) |
- ((u8 *)value)[0];
+ *value = get_unaligned_le24(&v[0]);
return IIO_VAL_INT;
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index 37606d400805..12672a0e89ed 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -101,6 +101,19 @@ config SRF04
To compile this driver as a module, choose M here: the
module will be called srf04.
+config SX9310
+ tristate "SX9310/SX9311 Semtech proximity sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here to build a driver for Semtech's SX9310/SX9311 capacitive
+ proximity/button sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sx9310.
+
config SX9500
tristate "SX9500 Semtech proximity sensor"
select IIO_BUFFER
@@ -127,6 +140,17 @@ config SRF08
To compile this driver as a module, choose M here: the
module will be called srf08.
+config VCNL3020
+ tristate "VCNL3020 proximity sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VCNL3020
+ proximity sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vcnl3020.
+
config VL53L0X_I2C
tristate "STMicroelectronics VL53L0X ToF ranger sensor (I2C)"
depends on I2C
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index c591b019304e..9c1aca1a8b79 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -12,6 +12,8 @@ obj-$(CONFIG_PING) += ping.o
obj-$(CONFIG_RFD77402) += rfd77402.o
obj-$(CONFIG_SRF04) += srf04.o
obj-$(CONFIG_SRF08) += srf08.o
+obj-$(CONFIG_SX9310) += sx9310.o
obj-$(CONFIG_SX9500) += sx9500.o
+obj-$(CONFIG_VCNL3020) += vcnl3020.o
obj-$(CONFIG_VL53L0X_I2C) += vl53l0x-i2c.o
diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c
index 12b893c5b0ee..2e99eeb27f2e 100644
--- a/drivers/iio/proximity/ping.c
+++ b/drivers/iio/proximity/ping.c
@@ -89,14 +89,14 @@ static irqreturn_t ping_handle_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ping_read(struct ping_data *data)
+static int ping_read(struct iio_dev *indio_dev)
{
+ struct ping_data *data = iio_priv(indio_dev);
int ret;
ktime_t ktime_dt;
s64 dt_ns;
u32 time_ns, distance_mm;
struct platform_device *pdev = to_platform_device(data->dev);
- struct iio_dev *indio_dev = iio_priv_to_dev(data);
/*
* just one read-echo-cycle can take place at a time
@@ -228,7 +228,6 @@ static int ping_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *channel, int *val,
int *val2, long info)
{
- struct ping_data *data = iio_priv(indio_dev);
int ret;
if (channel->type != IIO_DISTANCE)
@@ -236,7 +235,7 @@ static int ping_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- ret = ping_read(data);
+ ret = ping_read(indio_dev);
if (ret < 0)
return ret;
*val = ret;
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
new file mode 100644
index 000000000000..d161f3061e35
--- /dev/null
+++ b/drivers/iio/proximity/sx9310.c
@@ -0,0 +1,1069 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC.
+ *
+ * Driver for Semtech's SX9310/SX9311 capacitive proximity/button solution.
+ * Based on SX9500 driver and Semtech driver using the input framework
+ * <https://my.syncplicity.com/share/teouwsim8niiaud/
+ * linux-driver-SX9310_NoSmartHSensing>.
+ * Reworked April 2019 by Evan Green <evgreen@chromium.org>
+ * and January 2020 by Daniel Campello <campello@chromium.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+/* Register definitions. */
+#define SX9310_REG_IRQ_SRC 0x00
+#define SX9310_REG_STAT0 0x01
+#define SX9310_REG_STAT1 0x02
+#define SX9310_REG_IRQ_MSK 0x03
+#define SX9310_CONVDONE_IRQ BIT(3)
+#define SX9310_FAR_IRQ BIT(5)
+#define SX9310_CLOSE_IRQ BIT(6)
+#define SX9310_EVENT_IRQ (SX9310_FAR_IRQ | \
+ SX9310_CLOSE_IRQ)
+#define SX9310_REG_IRQ_FUNC 0x04
+
+#define SX9310_REG_PROX_CTRL0 0x10
+#define SX9310_REG_PROX_CTRL0_PROXSTAT2 0x10
+#define SX9310_REG_PROX_CTRL0_EN_MASK 0x0F
+#define SX9310_REG_PROX_CTRL1 0x11
+#define SX9310_REG_PROX_CTRL2 0x12
+#define SX9310_REG_PROX_CTRL2_COMBMODE_ALL 0x80
+#define SX9310_REG_PROX_CTRL2_SHIELDEN_DYNAMIC 0x04
+#define SX9310_REG_PROX_CTRL3 0x13
+#define SX9310_REG_PROX_CTRL3_GAIN0_X8 0x0c
+#define SX9310_REG_PROX_CTRL3_GAIN12_X4 0x02
+#define SX9310_REG_PROX_CTRL4 0x14
+#define SX9310_REG_PROX_CTRL4_RESOLUTION_FINEST 0x07
+#define SX9310_REG_PROX_CTRL5 0x15
+#define SX9310_REG_PROX_CTRL5_RANGE_SMALL 0xc0
+#define SX9310_REG_PROX_CTRL5_STARTUPSENS_CS1 0x04
+#define SX9310_REG_PROX_CTRL5_RAWFILT_1P25 0x02
+#define SX9310_REG_PROX_CTRL6 0x16
+#define SX9310_REG_PROX_CTRL6_COMP_COMMON 0x20
+#define SX9310_REG_PROX_CTRL7 0x17
+#define SX9310_REG_PROX_CTRL7_AVGNEGFILT_2 0x08
+#define SX9310_REG_PROX_CTRL7_AVGPOSFILT_512 0x05
+#define SX9310_REG_PROX_CTRL8 0x18
+#define SX9310_REG_PROX_CTRL9 0x19
+#define SX9310_REG_PROX_CTRL8_9_PTHRESH12_28 0x40
+#define SX9310_REG_PROX_CTRL8_9_PTHRESH_96 0x88
+#define SX9310_REG_PROX_CTRL8_9_BODYTHRESH_900 0x03
+#define SX9310_REG_PROX_CTRL8_9_BODYTHRESH_1500 0x05
+#define SX9310_REG_PROX_CTRL10 0x1a
+#define SX9310_REG_PROX_CTRL10_HYST_6PCT 0x10
+#define SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_8 0x12
+#define SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_8 0x03
+#define SX9310_REG_PROX_CTRL11 0x1b
+#define SX9310_REG_PROX_CTRL12 0x1c
+#define SX9310_REG_PROX_CTRL13 0x1d
+#define SX9310_REG_PROX_CTRL14 0x1e
+#define SX9310_REG_PROX_CTRL15 0x1f
+#define SX9310_REG_PROX_CTRL16 0x20
+#define SX9310_REG_PROX_CTRL17 0x21
+#define SX9310_REG_PROX_CTRL18 0x22
+#define SX9310_REG_PROX_CTRL19 0x23
+#define SX9310_REG_SAR_CTRL0 0x2a
+#define SX9310_REG_SAR_CTRL0_SARDEB_4_SAMPLES 0x40
+#define SX9310_REG_SAR_CTRL0_SARHYST_8 0x10
+#define SX9310_REG_SAR_CTRL1 0x2b
+/* Each increment of the slope register is 0.0078125. */
+#define SX9310_REG_SAR_CTRL1_SLOPE(_hnslope) (_hnslope / 78125)
+#define SX9310_REG_SAR_CTRL2 0x2c
+#define SX9310_REG_SAR_CTRL2_SAROFFSET_DEFAULT 0x3c
+
+#define SX9310_REG_SENSOR_SEL 0x30
+
+#define SX9310_REG_USE_MSB 0x31
+#define SX9310_REG_USE_LSB 0x32
+
+#define SX9310_REG_AVG_MSB 0x33
+#define SX9310_REG_AVG_LSB 0x34
+
+#define SX9310_REG_DIFF_MSB 0x35
+#define SX9310_REG_DIFF_LSB 0x36
+
+#define SX9310_REG_OFFSET_MSB 0x37
+#define SX9310_REG_OFFSET_LSB 0x38
+
+#define SX9310_REG_SAR_MSB 0x39
+#define SX9310_REG_SAR_LSB 0x3a
+
+#define SX9310_REG_I2CADDR 0x40
+#define SX9310_REG_PAUSE 0x41
+#define SX9310_REG_WHOAMI 0x42
+#define SX9310_WHOAMI_VALUE 0x01
+#define SX9311_WHOAMI_VALUE 0x02
+
+#define SX9310_REG_RESET 0x7f
+#define SX9310_SOFT_RESET 0xde
+
+#define SX9310_SCAN_PERIOD_MASK GENMASK(7, 4)
+#define SX9310_SCAN_PERIOD_SHIFT 4
+
+#define SX9310_COMPSTAT_MASK GENMASK(3, 0)
+
+/* 4 hardware channels, as defined in STAT0: COMB, CS2, CS1 and CS0. */
+#define SX9310_NUM_CHANNELS 4
+#define SX9310_CHAN_ENABLED_MASK GENMASK(3, 0)
+
+struct sx9310_data {
+ /* Serialize access to registers and channel configuration */
+ struct mutex mutex;
+ struct i2c_client *client;
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ /*
+ * Last reading of the proximity status for each channel.
+ * We only send an event to user space when this changes.
+ */
+ bool prox_stat[SX9310_NUM_CHANNELS];
+ bool trigger_enabled;
+ __be16 buffer[SX9310_NUM_CHANNELS +
+ 4]; /* 64-bit data + 64-bit timestamp */
+ /* Remember enabled channels and sample rate during suspend. */
+ unsigned int suspend_ctrl0;
+ struct completion completion;
+ unsigned int chan_read, chan_event;
+ int channel_users[SX9310_NUM_CHANNELS];
+ int whoami;
+};
+
+static const struct iio_event_spec sx9310_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define SX9310_NAMED_CHANNEL(idx, name) \
+ { \
+ .type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .indexed = 1, \
+ .channel = idx, \
+ .extend_name = name, \
+ .address = SX9310_REG_DIFF_MSB, \
+ .event_spec = sx9310_events, \
+ .num_event_specs = ARRAY_SIZE(sx9310_events), \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+#define SX9310_CHANNEL(idx) SX9310_NAMED_CHANNEL(idx, NULL)
+
+static const struct iio_chan_spec sx9310_channels[] = {
+ SX9310_CHANNEL(0), /* CS0 */
+ SX9310_CHANNEL(1), /* CS1 */
+ SX9310_CHANNEL(2), /* CS2 */
+ SX9310_NAMED_CHANNEL(3, "comb"), /* COMB */
+
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+/*
+ * Each entry contains the integer part (val) and the fractional part, in micro
+ * seconds. It conforms to the IIO output IIO_VAL_INT_PLUS_MICRO.
+ */
+static const struct {
+ int val;
+ int val2;
+} sx9310_samp_freq_table[] = {
+ { 500, 0 }, /* 0000: Min (no idle time) */
+ { 66, 666666 }, /* 0001: 15 ms */
+ { 33, 333333 }, /* 0010: 30 ms (Typ.) */
+ { 22, 222222 }, /* 0011: 45 ms */
+ { 16, 666666 }, /* 0100: 60 ms */
+ { 11, 111111 }, /* 0101: 90 ms */
+ { 8, 333333 }, /* 0110: 120 ms */
+ { 5, 0 }, /* 0111: 200 ms */
+ { 2, 500000 }, /* 1000: 400 ms */
+ { 1, 666666 }, /* 1001: 600 ms */
+ { 1, 250000 }, /* 1010: 800 ms */
+ { 1, 0 }, /* 1011: 1 s */
+ { 0, 500000 }, /* 1100: 2 s */
+ { 0, 333333 }, /* 1101: 3 s */
+ { 0, 250000 }, /* 1110: 4 s */
+ { 0, 200000 }, /* 1111: 5 s */
+};
+static const unsigned int sx9310_scan_period_table[] = {
+ 2, 15, 30, 45, 60, 90, 120, 200,
+ 400, 600, 800, 1000, 2000, 3000, 4000, 5000,
+};
+
+static ssize_t sx9310_show_samp_freq_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sx9310_samp_freq_table); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%d ",
+ sx9310_samp_freq_table[i].val,
+ sx9310_samp_freq_table[i].val2);
+ buf[len - 1] = '\n';
+ return len;
+}
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sx9310_show_samp_freq_avail);
+
+static const struct regmap_range sx9310_writable_reg_ranges[] = {
+ regmap_reg_range(SX9310_REG_IRQ_MSK, SX9310_REG_IRQ_FUNC),
+ regmap_reg_range(SX9310_REG_PROX_CTRL0, SX9310_REG_PROX_CTRL19),
+ regmap_reg_range(SX9310_REG_SAR_CTRL0, SX9310_REG_SAR_CTRL2),
+ regmap_reg_range(SX9310_REG_SENSOR_SEL, SX9310_REG_SENSOR_SEL),
+ regmap_reg_range(SX9310_REG_OFFSET_MSB, SX9310_REG_OFFSET_LSB),
+ regmap_reg_range(SX9310_REG_PAUSE, SX9310_REG_PAUSE),
+ regmap_reg_range(SX9310_REG_RESET, SX9310_REG_RESET),
+};
+
+static const struct regmap_access_table sx9310_writeable_regs = {
+ .yes_ranges = sx9310_writable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9310_writable_reg_ranges),
+};
+
+static const struct regmap_range sx9310_readable_reg_ranges[] = {
+ regmap_reg_range(SX9310_REG_IRQ_SRC, SX9310_REG_IRQ_FUNC),
+ regmap_reg_range(SX9310_REG_PROX_CTRL0, SX9310_REG_PROX_CTRL19),
+ regmap_reg_range(SX9310_REG_SAR_CTRL0, SX9310_REG_SAR_CTRL2),
+ regmap_reg_range(SX9310_REG_SENSOR_SEL, SX9310_REG_SAR_LSB),
+ regmap_reg_range(SX9310_REG_I2CADDR, SX9310_REG_WHOAMI),
+ regmap_reg_range(SX9310_REG_RESET, SX9310_REG_RESET),
+};
+
+static const struct regmap_access_table sx9310_readable_regs = {
+ .yes_ranges = sx9310_readable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9310_readable_reg_ranges),
+};
+
+static const struct regmap_range sx9310_volatile_reg_ranges[] = {
+ regmap_reg_range(SX9310_REG_IRQ_SRC, SX9310_REG_STAT1),
+ regmap_reg_range(SX9310_REG_USE_MSB, SX9310_REG_DIFF_LSB),
+ regmap_reg_range(SX9310_REG_SAR_MSB, SX9310_REG_SAR_LSB),
+ regmap_reg_range(SX9310_REG_RESET, SX9310_REG_RESET),
+};
+
+static const struct regmap_access_table sx9310_volatile_regs = {
+ .yes_ranges = sx9310_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9310_volatile_reg_ranges),
+};
+
+static const struct regmap_config sx9310_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = SX9310_REG_RESET,
+ .cache_type = REGCACHE_RBTREE,
+
+ .wr_table = &sx9310_writeable_regs,
+ .rd_table = &sx9310_readable_regs,
+ .volatile_table = &sx9310_volatile_regs,
+};
+
+static int sx9310_update_chan_en(struct sx9310_data *data,
+ unsigned int chan_read,
+ unsigned int chan_event)
+{
+ int ret;
+
+ if ((data->chan_read | data->chan_event) != (chan_read | chan_event)) {
+ ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL0,
+ SX9310_CHAN_ENABLED_MASK,
+ chan_read | chan_event);
+ if (ret)
+ return ret;
+ }
+ data->chan_read = chan_read;
+ data->chan_event = chan_event;
+ return 0;
+}
+
+static int sx9310_get_read_channel(struct sx9310_data *data, int channel)
+{
+ return sx9310_update_chan_en(data, data->chan_read | BIT(channel),
+ data->chan_event);
+}
+
+static int sx9310_put_read_channel(struct sx9310_data *data, int channel)
+{
+ return sx9310_update_chan_en(data, data->chan_read & ~BIT(channel),
+ data->chan_event);
+}
+
+static int sx9310_get_event_channel(struct sx9310_data *data, int channel)
+{
+ return sx9310_update_chan_en(data, data->chan_read,
+ data->chan_event | BIT(channel));
+}
+
+static int sx9310_put_event_channel(struct sx9310_data *data, int channel)
+{
+ return sx9310_update_chan_en(data, data->chan_read,
+ data->chan_event & ~BIT(channel));
+}
+
+static int sx9310_enable_irq(struct sx9310_data *data, unsigned int irq)
+{
+ return regmap_update_bits(data->regmap, SX9310_REG_IRQ_MSK, irq, irq);
+}
+
+static int sx9310_disable_irq(struct sx9310_data *data, unsigned int irq)
+{
+ return regmap_update_bits(data->regmap, SX9310_REG_IRQ_MSK, irq, 0);
+}
+
+static int sx9310_read_prox_data(struct sx9310_data *data,
+ const struct iio_chan_spec *chan, __be16 *val)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, SX9310_REG_SENSOR_SEL, chan->channel);
+ if (ret < 0)
+ return ret;
+
+ return regmap_bulk_read(data->regmap, chan->address, val, 2);
+}
+
+/*
+ * If we have no interrupt support, we have to wait for a scan period
+ * after enabling a channel to get a result.
+ */
+static int sx9310_wait_for_sample(struct sx9310_data *data)
+{
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &val);
+ if (ret < 0)
+ return ret;
+
+ val = (val & SX9310_SCAN_PERIOD_MASK) >> SX9310_SCAN_PERIOD_SHIFT;
+
+ msleep(sx9310_scan_period_table[val]);
+
+ return 0;
+}
+
+static int sx9310_read_proximity(struct sx9310_data *data,
+ const struct iio_chan_spec *chan, int *val)
+{
+ int ret = 0;
+ __be16 rawval;
+
+ mutex_lock(&data->mutex);
+
+ ret = sx9310_get_read_channel(data, chan->channel);
+ if (ret < 0)
+ goto out;
+
+ ret = sx9310_enable_irq(data, SX9310_CONVDONE_IRQ);
+ if (ret < 0)
+ goto out_put_channel;
+
+ mutex_unlock(&data->mutex);
+
+ if (data->client->irq > 0) {
+ ret = wait_for_completion_interruptible(&data->completion);
+ reinit_completion(&data->completion);
+ } else {
+ ret = sx9310_wait_for_sample(data);
+ }
+
+ mutex_lock(&data->mutex);
+
+ if (ret < 0)
+ goto out_disable_irq;
+
+ ret = sx9310_read_prox_data(data, chan, &rawval);
+ if (ret < 0)
+ goto out_disable_irq;
+
+ *val = sign_extend32(be16_to_cpu(rawval),
+ (chan->address == SX9310_REG_DIFF_MSB ? 11 : 15));
+
+ ret = sx9310_disable_irq(data, SX9310_CONVDONE_IRQ);
+ if (ret < 0)
+ goto out_put_channel;
+
+ ret = sx9310_put_read_channel(data, chan->channel);
+ if (ret < 0)
+ goto out;
+
+ mutex_unlock(&data->mutex);
+
+ return IIO_VAL_INT;
+
+out_disable_irq:
+ sx9310_disable_irq(data, SX9310_CONVDONE_IRQ);
+out_put_channel:
+ sx9310_put_read_channel(data, chan->channel);
+out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int sx9310_read_samp_freq(struct sx9310_data *data, int *val, int *val2)
+{
+ unsigned int regval;
+ int ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &regval);
+
+ if (ret < 0)
+ return ret;
+
+ regval = (regval & SX9310_SCAN_PERIOD_MASK) >> SX9310_SCAN_PERIOD_SHIFT;
+ *val = sx9310_samp_freq_table[regval].val;
+ *val2 = sx9310_samp_freq_table[regval].val2;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int sx9310_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val,
+ int *val2, long mask)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = sx9310_read_proximity(data, chan, val);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return sx9310_read_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sx9310_set_samp_freq(struct sx9310_data *data, int val, int val2)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(sx9310_samp_freq_table); i++)
+ if (val == sx9310_samp_freq_table[i].val &&
+ val2 == sx9310_samp_freq_table[i].val2)
+ break;
+
+ if (i == ARRAY_SIZE(sx9310_samp_freq_table))
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL0,
+ SX9310_SCAN_PERIOD_MASK,
+ i << SX9310_SCAN_PERIOD_SHIFT);
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int sx9310_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int val, int val2,
+ long mask)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ return sx9310_set_samp_freq(data, val, val2);
+}
+
+static irqreturn_t sx9310_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ if (data->trigger_enabled)
+ iio_trigger_poll(data->trig);
+
+ /*
+ * Even if no event is enabled, we need to wake the thread to
+ * clear the interrupt state by reading SX9310_REG_IRQ_SRC. It
+ * is not possible to do that here because regmap_read takes a
+ * mutex.
+ */
+ return IRQ_WAKE_THREAD;
+}
+
+static void sx9310_push_events(struct iio_dev *indio_dev)
+{
+ int ret;
+ unsigned int val, chan;
+ struct sx9310_data *data = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns(indio_dev);
+
+ /* Read proximity state on all channels */
+ ret = regmap_read(data->regmap, SX9310_REG_STAT0, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c transfer error in irq\n");
+ return;
+ }
+
+ for (chan = 0; chan < SX9310_NUM_CHANNELS; chan++) {
+ int dir;
+ u64 ev;
+ bool new_prox = val & BIT(chan);
+
+ if (!(data->chan_event & BIT(chan)))
+ continue;
+ if (new_prox == data->prox_stat[chan])
+ /* No change on this channel. */
+ continue;
+
+ dir = new_prox ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
+ ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
+ IIO_EV_TYPE_THRESH, dir);
+
+ iio_push_event(indio_dev, ev, timestamp);
+ data->prox_stat[chan] = new_prox;
+ }
+}
+
+static irqreturn_t sx9310_irq_thread_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+ unsigned int val;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_read(data->regmap, SX9310_REG_IRQ_SRC, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c transfer error in irq\n");
+ goto out;
+ }
+
+ if (val & SX9310_EVENT_IRQ)
+ sx9310_push_events(indio_dev);
+
+ if (val & SX9310_CONVDONE_IRQ)
+ complete(&data->completion);
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return IRQ_HANDLED;
+}
+
+static int sx9310_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ return !!(data->chan_event & BIT(chan->channel));
+}
+
+static int sx9310_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* If the state hasn't changed, there's nothing to do. */
+ if (!!(data->chan_event & BIT(chan->channel)) == state)
+ return 0;
+
+ mutex_lock(&data->mutex);
+ if (state) {
+ ret = sx9310_get_event_channel(data, chan->channel);
+ if (ret < 0)
+ goto out_unlock;
+ if (!(data->chan_event & ~BIT(chan->channel))) {
+ ret = sx9310_enable_irq(data, SX9310_EVENT_IRQ);
+ if (ret < 0)
+ sx9310_put_event_channel(data, chan->channel);
+ }
+ } else {
+ ret = sx9310_put_event_channel(data, chan->channel);
+ if (ret < 0)
+ goto out_unlock;
+ if (!data->chan_event) {
+ ret = sx9310_disable_irq(data, SX9310_EVENT_IRQ);
+ if (ret < 0)
+ sx9310_get_event_channel(data, chan->channel);
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static struct attribute *sx9310_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group sx9310_attribute_group = {
+ .attrs = sx9310_attributes,
+};
+
+static const struct iio_info sx9310_info = {
+ .attrs = &sx9310_attribute_group,
+ .read_raw = sx9310_read_raw,
+ .write_raw = sx9310_write_raw,
+ .read_event_config = sx9310_read_event_config,
+ .write_event_config = sx9310_write_event_config,
+};
+
+static int sx9310_set_trigger_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret = 0;
+
+ mutex_lock(&data->mutex);
+
+ if (state)
+ ret = sx9310_enable_irq(data, SX9310_CONVDONE_IRQ);
+ else if (!data->chan_read)
+ ret = sx9310_disable_irq(data, SX9310_CONVDONE_IRQ);
+ if (ret < 0)
+ goto out;
+
+ data->trigger_enabled = state;
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_trigger_ops sx9310_trigger_ops = {
+ .set_trigger_state = sx9310_set_trigger_state,
+};
+
+static irqreturn_t sx9310_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct sx9310_data *data = iio_priv(indio_dev);
+ __be16 val;
+ int bit, ret, i = 0;
+
+ mutex_lock(&data->mutex);
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = sx9310_read_prox_data(data, &indio_dev->channels[bit],
+ &val);
+ if (ret < 0)
+ goto out;
+
+ data->buffer[i++] = val;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ pf->timestamp);
+
+out:
+ mutex_unlock(&data->mutex);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int sx9310_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ unsigned int channels = 0;
+ int bit, ret;
+
+ mutex_lock(&data->mutex);
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength)
+ channels |= BIT(indio_dev->channels[bit].channel);
+
+ ret = sx9310_update_chan_en(data, channels, data->chan_event);
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int sx9310_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = sx9310_update_chan_en(data, 0, data->chan_event);
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops sx9310_buffer_setup_ops = {
+ .preenable = sx9310_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = sx9310_buffer_postdisable,
+};
+
+struct sx9310_reg_default {
+ u8 reg;
+ u8 def;
+};
+
+#define SX_INIT(_reg, _def) \
+ { \
+ .reg = SX9310_REG_##_reg, \
+ .def = _def, \
+ }
+
+static const struct sx9310_reg_default sx9310_default_regs[] = {
+ SX_INIT(IRQ_MSK, 0x00),
+ SX_INIT(IRQ_FUNC, 0x00),
+ /*
+ * The lower 4 bits should not be set as it enable sensors measurements.
+ * Turning the detection on before the configuration values are set to
+ * good values can cause the device to return erroneous readings.
+ */
+ SX_INIT(PROX_CTRL0, SX9310_REG_PROX_CTRL0_PROXSTAT2),
+ SX_INIT(PROX_CTRL1, 0x00),
+ SX_INIT(PROX_CTRL2, SX9310_REG_PROX_CTRL2_COMBMODE_ALL |
+ SX9310_REG_PROX_CTRL2_SHIELDEN_DYNAMIC),
+ SX_INIT(PROX_CTRL3, SX9310_REG_PROX_CTRL3_GAIN0_X8 |
+ SX9310_REG_PROX_CTRL3_GAIN12_X4),
+ SX_INIT(PROX_CTRL4, SX9310_REG_PROX_CTRL4_RESOLUTION_FINEST),
+ SX_INIT(PROX_CTRL5, SX9310_REG_PROX_CTRL5_RANGE_SMALL |
+ SX9310_REG_PROX_CTRL5_STARTUPSENS_CS1 |
+ SX9310_REG_PROX_CTRL5_RAWFILT_1P25),
+ SX_INIT(PROX_CTRL6, SX9310_REG_PROX_CTRL6_COMP_COMMON),
+ SX_INIT(PROX_CTRL7, SX9310_REG_PROX_CTRL7_AVGNEGFILT_2 |
+ SX9310_REG_PROX_CTRL7_AVGPOSFILT_512),
+ SX_INIT(PROX_CTRL8, SX9310_REG_PROX_CTRL8_9_PTHRESH_96 |
+ SX9310_REG_PROX_CTRL8_9_BODYTHRESH_1500),
+ SX_INIT(PROX_CTRL9, SX9310_REG_PROX_CTRL8_9_PTHRESH12_28 |
+ SX9310_REG_PROX_CTRL8_9_BODYTHRESH_900),
+ SX_INIT(PROX_CTRL10, SX9310_REG_PROX_CTRL10_HYST_6PCT |
+ SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_8 |
+ SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_8),
+ SX_INIT(PROX_CTRL11, 0x00),
+ SX_INIT(PROX_CTRL12, 0x00),
+ SX_INIT(PROX_CTRL13, 0x00),
+ SX_INIT(PROX_CTRL14, 0x00),
+ SX_INIT(PROX_CTRL15, 0x00),
+ SX_INIT(PROX_CTRL16, 0x00),
+ SX_INIT(PROX_CTRL17, 0x00),
+ SX_INIT(PROX_CTRL18, 0x00),
+ SX_INIT(PROX_CTRL19, 0x00),
+ SX_INIT(SAR_CTRL0, SX9310_REG_SAR_CTRL0_SARDEB_4_SAMPLES |
+ SX9310_REG_SAR_CTRL0_SARHYST_8),
+ SX_INIT(SAR_CTRL1, SX9310_REG_SAR_CTRL1_SLOPE(10781250)),
+ SX_INIT(SAR_CTRL2, SX9310_REG_SAR_CTRL2_SAROFFSET_DEFAULT),
+};
+
+/* Activate all channels and perform an initial compensation. */
+static int sx9310_init_compensation(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int i, ret;
+ unsigned int val;
+ unsigned int ctrl0;
+
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &ctrl0);
+ if (ret < 0)
+ return ret;
+
+ /* run the compensation phase on all channels */
+ ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0,
+ ctrl0 | SX9310_REG_PROX_CTRL0_EN_MASK);
+ if (ret < 0)
+ return ret;
+
+ for (i = 100; i >= 0; i--) {
+ msleep(20);
+ ret = regmap_read(data->regmap, SX9310_REG_STAT1, &val);
+ if (ret < 0)
+ goto out;
+ if (!(val & SX9310_COMPSTAT_MASK))
+ break;
+ }
+
+ if (i < 0) {
+ dev_err(&data->client->dev,
+ "initial compensation timed out: 0x%02x", val);
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ regmap_write(data->regmap, SX9310_REG_PROX_CTRL0, ctrl0);
+ return ret;
+}
+
+static int sx9310_init_device(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ const struct sx9310_reg_default *initval;
+ int ret;
+ unsigned int i, val;
+
+ ret = regmap_write(data->regmap, SX9310_REG_RESET, SX9310_SOFT_RESET);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000); /* power-up time is ~1ms. */
+
+ /* Clear reset interrupt state by reading SX9310_REG_IRQ_SRC. */
+ ret = regmap_read(data->regmap, SX9310_REG_IRQ_SRC, &val);
+ if (ret < 0)
+ return ret;
+
+ /* Program some sane defaults. */
+ for (i = 0; i < ARRAY_SIZE(sx9310_default_regs); i++) {
+ initval = &sx9310_default_regs[i];
+ ret = regmap_write(data->regmap, initval->reg, initval->def);
+ if (ret < 0)
+ return ret;
+ }
+
+ return sx9310_init_compensation(indio_dev);
+}
+
+static int sx9310_set_indio_dev_name(struct device *dev,
+ struct iio_dev *indio_dev,
+ const struct i2c_device_id *id, int whoami)
+{
+ const struct acpi_device_id *acpi_id;
+
+ /* id will be NULL when enumerated via ACPI */
+ if (id) {
+ if (id->driver_data != whoami)
+ dev_err(dev, "WHOAMI does not match i2c_device_id: %s",
+ id->name);
+ } else if (ACPI_HANDLE(dev)) {
+ acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!acpi_id)
+ return -ENODEV;
+ if (acpi_id->driver_data != whoami)
+ dev_err(dev, "WHOAMI does not match acpi_device_id: %s",
+ acpi_id->id);
+ } else
+ return -ENODEV;
+
+ switch (whoami) {
+ case SX9310_WHOAMI_VALUE:
+ indio_dev->name = "sx9310";
+ break;
+ case SX9311_WHOAMI_VALUE:
+ indio_dev->name = "sx9311";
+ break;
+ default:
+ dev_err(dev, "unexpected WHOAMI response: %u", whoami);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int sx9310_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct sx9310_data *data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->mutex);
+ init_completion(&data->completion);
+
+ data->regmap = devm_regmap_init_i2c(client, &sx9310_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ ret = regmap_read(data->regmap, SX9310_REG_WHOAMI, &data->whoami);
+ if (ret < 0) {
+ dev_err(&client->dev, "error in reading WHOAMI register: %d",
+ ret);
+ return ret;
+ }
+
+ ret = sx9310_set_indio_dev_name(&client->dev, indio_dev, id,
+ data->whoami);
+ if (ret < 0)
+ return ret;
+
+ ACPI_COMPANION_SET(&indio_dev->dev, ACPI_COMPANION(&client->dev));
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = sx9310_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sx9310_channels);
+ indio_dev->info = &sx9310_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ i2c_set_clientdata(client, indio_dev);
+
+ ret = sx9310_init_device(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ sx9310_irq_handler,
+ sx9310_irq_thread_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "sx9310_event", indio_dev);
+ if (ret < 0)
+ return ret;
+
+ data->trig =
+ devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!data->trig)
+ return -ENOMEM;
+
+ data->trig->dev.parent = &client->dev;
+ data->trig->ops = &sx9310_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
+
+ ret = devm_iio_trigger_register(&client->dev, data->trig);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ iio_pollfunc_store_time,
+ sx9310_trigger_handler,
+ &sx9310_buffer_setup_ops);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static int __maybe_unused sx9310_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct sx9310_data *data = iio_priv(indio_dev);
+ u8 ctrl0;
+ int ret;
+
+ disable_irq_nosync(data->client->irq);
+
+ mutex_lock(&data->mutex);
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0,
+ &data->suspend_ctrl0);
+
+ if (ret)
+ goto out;
+
+ ctrl0 = data->suspend_ctrl0 & ~SX9310_REG_PROX_CTRL0_EN_MASK;
+ ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0, ctrl0);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 0);
+
+out:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int __maybe_unused sx9310_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 1);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0,
+ data->suspend_ctrl0);
+
+out:
+ mutex_unlock(&data->mutex);
+
+ enable_irq(data->client->irq);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sx9310_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sx9310_suspend, sx9310_resume)
+};
+
+static const struct acpi_device_id sx9310_acpi_match[] = {
+ { "STH9310", SX9310_WHOAMI_VALUE },
+ { "STH9311", SX9311_WHOAMI_VALUE },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, sx9310_acpi_match);
+
+static const struct of_device_id sx9310_of_match[] = {
+ { .compatible = "semtech,sx9310" },
+ { .compatible = "semtech,sx9311" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sx9310_of_match);
+
+static const struct i2c_device_id sx9310_id[] = {
+ { "sx9310", SX9310_WHOAMI_VALUE },
+ { "sx9311", SX9311_WHOAMI_VALUE },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, sx9310_id);
+
+static struct i2c_driver sx9310_driver = {
+ .driver = {
+ .name = "sx9310",
+ .acpi_match_table = ACPI_PTR(sx9310_acpi_match),
+ .of_match_table = of_match_ptr(sx9310_of_match),
+ .pm = &sx9310_pm_ops,
+ },
+ .probe = sx9310_probe,
+ .id_table = sx9310_id,
+};
+module_i2c_driver(sx9310_driver);
+
+MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
+MODULE_AUTHOR("Daniel Campello <campello@chromium.org>");
+MODULE_DESCRIPTION("Driver for Semtech SX9310/SX9311 proximity sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/proximity/vcnl3020.c b/drivers/iio/proximity/vcnl3020.c
new file mode 100644
index 000000000000..9ff1a164c2e6
--- /dev/null
+++ b/drivers/iio/proximity/vcnl3020.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Support for Vishay VCNL3020 proximity sensor on i2c bus.
+ * Based on Vishay VCNL4000 driver code.
+ *
+ * TODO: interrupts.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define VCNL3020_PROD_ID 0x21
+
+#define VCNL_COMMAND 0x80 /* Command register */
+#define VCNL_PROD_REV 0x81 /* Product ID and Revision ID */
+#define VCNL_PROXIMITY_RATE 0x82 /* Rate of Proximity Measurement */
+#define VCNL_LED_CURRENT 0x83 /* IR LED current for proximity mode */
+#define VCNL_PS_RESULT_HI 0x87 /* Proximity result register, MSB */
+#define VCNL_PS_RESULT_LO 0x88 /* Proximity result register, LSB */
+#define VCNL_PS_ICR 0x89 /* Interrupt Control Register */
+#define VCNL_PS_LO_THR_HI 0x8a /* High byte of low threshold value */
+#define VCNL_PS_LO_THR_LO 0x8b /* Low byte of low threshold value */
+#define VCNL_PS_HI_THR_HI 0x8c /* High byte of high threshold value */
+#define VCNL_PS_HI_THR_LO 0x8d /* Low byte of high threshold value */
+#define VCNL_ISR 0x8e /* Interrupt Status Register */
+#define VCNL_PS_MOD_ADJ 0x8f /* Proximity Modulator Timing Adjustment */
+
+/* Bit masks for COMMAND register */
+#define VCNL_PS_RDY BIT(5) /* proximity data ready? */
+#define VCNL_PS_OD BIT(3) /* start on-demand proximity
+ * measurement
+ */
+
+#define VCNL_ON_DEMAND_TIMEOUT_US 100000
+#define VCNL_POLL_US 20000
+
+/**
+ * struct vcnl3020_data - vcnl3020 specific data.
+ * @regmap: device register map.
+ * @dev: vcnl3020 device.
+ * @rev: revision id.
+ * @lock: lock for protecting access to device hardware registers.
+ */
+struct vcnl3020_data {
+ struct regmap *regmap;
+ struct device *dev;
+ u8 rev;
+ struct mutex lock;
+};
+
+/**
+ * struct vcnl3020_property - vcnl3020 property.
+ * @name: property name.
+ * @reg: i2c register offset.
+ * @conversion_func: conversion function.
+ */
+struct vcnl3020_property {
+ const char *name;
+ u32 reg;
+ u32 (*conversion_func)(u32 *val);
+};
+
+static u32 microamp_to_reg(u32 *val)
+{
+ /*
+ * An example of conversion from uA to reg val:
+ * 200000 uA == 200 mA == 20
+ */
+ return *val /= 10000;
+};
+
+static struct vcnl3020_property vcnl3020_led_current_property = {
+ .name = "vishay,led-current-microamp",
+ .reg = VCNL_LED_CURRENT,
+ .conversion_func = microamp_to_reg,
+};
+
+static int vcnl3020_get_and_apply_property(struct vcnl3020_data *data,
+ struct vcnl3020_property prop)
+{
+ int rc;
+ u32 val;
+
+ rc = device_property_read_u32(data->dev, prop.name, &val);
+ if (rc)
+ return 0;
+
+ if (prop.conversion_func)
+ prop.conversion_func(&val);
+
+ rc = regmap_write(data->regmap, prop.reg, val);
+ if (rc) {
+ dev_err(data->dev, "Error (%d) setting property (%s)\n",
+ rc, prop.name);
+ }
+
+ return rc;
+}
+
+static int vcnl3020_init(struct vcnl3020_data *data)
+{
+ int rc;
+ unsigned int reg;
+
+ rc = regmap_read(data->regmap, VCNL_PROD_REV, &reg);
+ if (rc) {
+ dev_err(data->dev,
+ "Error (%d) reading product revision\n", rc);
+ return rc;
+ }
+
+ if (reg != VCNL3020_PROD_ID) {
+ dev_err(data->dev,
+ "Product id (%x) did not match vcnl3020 (%x)\n", reg,
+ VCNL3020_PROD_ID);
+ return -ENODEV;
+ }
+
+ data->rev = reg;
+ mutex_init(&data->lock);
+
+ return vcnl3020_get_and_apply_property(data,
+ vcnl3020_led_current_property);
+};
+
+static int vcnl3020_measure_proximity(struct vcnl3020_data *data, int *val)
+{
+ int rc;
+ unsigned int reg;
+ __be16 res;
+
+ mutex_lock(&data->lock);
+
+ rc = regmap_write(data->regmap, VCNL_COMMAND, VCNL_PS_OD);
+ if (rc)
+ goto err_unlock;
+
+ /* wait for data to become ready */
+ rc = regmap_read_poll_timeout(data->regmap, VCNL_COMMAND, reg,
+ reg & VCNL_PS_RDY, VCNL_POLL_US,
+ VCNL_ON_DEMAND_TIMEOUT_US);
+ if (rc) {
+ dev_err(data->dev,
+ "Error (%d) reading vcnl3020 command register\n", rc);
+ goto err_unlock;
+ }
+
+ /* high & low result bytes read */
+ rc = regmap_bulk_read(data->regmap, VCNL_PS_RESULT_HI, &res,
+ sizeof(res));
+ if (rc)
+ goto err_unlock;
+
+ *val = be16_to_cpu(res);
+
+err_unlock:
+ mutex_unlock(&data->lock);
+
+ return rc;
+}
+
+static const struct iio_chan_spec vcnl3020_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+};
+
+static int vcnl3020_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int rc;
+ struct vcnl3020_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ rc = vcnl3020_measure_proximity(data, val);
+ if (rc)
+ return rc;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info vcnl3020_info = {
+ .read_raw = vcnl3020_read_raw,
+};
+
+static const struct regmap_config vcnl3020_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = VCNL_PS_MOD_ADJ,
+};
+
+static int vcnl3020_probe(struct i2c_client *client)
+{
+ struct vcnl3020_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int rc;
+
+ regmap = devm_regmap_init_i2c(client, &vcnl3020_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "regmap_init failed\n");
+ return PTR_ERR(regmap);
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->regmap = regmap;
+ data->dev = &client->dev;
+
+ rc = vcnl3020_init(data);
+ if (rc)
+ return rc;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &vcnl3020_info;
+ indio_dev->channels = vcnl3020_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vcnl3020_channels);
+ indio_dev->name = "vcnl3020";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct of_device_id vcnl3020_of_match[] = {
+ {
+ .compatible = "vishay,vcnl3020",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vcnl3020_of_match);
+
+static struct i2c_driver vcnl3020_driver = {
+ .driver = {
+ .name = "vcnl3020",
+ .of_match_table = vcnl3020_of_match,
+ },
+ .probe_new = vcnl3020_probe,
+};
+module_i2c_driver(vcnl3020_driver);
+
+MODULE_AUTHOR("Ivan Mikhaylov <i.mikhaylov@yadro.com>");
+MODULE_DESCRIPTION("Vishay VCNL3020 proximity sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
index eda55b9c1e9b..8d1f434f109d 100644
--- a/drivers/iio/temperature/hid-sensor-temperature.c
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -7,8 +7,6 @@
#include <linux/hid-sensor-hub.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
-#include <linux/iio/triggered_buffer.h>
-#include <linux/iio/trigger_consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -230,12 +228,8 @@ static int hid_temperature_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = devm_iio_triggered_buffer_setup(&pdev->dev, indio_dev,
- &iio_pollfunc_store_time, NULL, NULL);
- if (ret)
- return ret;
-
atomic_set(&temp_st->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&temp_st->common_attributes);
if (ret)
@@ -258,7 +252,7 @@ static int hid_temperature_probe(struct platform_device *pdev)
error_remove_callback:
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TEMPERATURE);
error_remove_trigger:
- hid_sensor_remove_trigger(&temp_st->common_attributes);
+ hid_sensor_remove_trigger(indio_dev, &temp_st->common_attributes);
return ret;
}
@@ -270,7 +264,7 @@ static int hid_temperature_remove(struct platform_device *pdev)
struct temperature_state *temp_st = iio_priv(indio_dev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TEMPERATURE);
- hid_sensor_remove_trigger(&temp_st->common_attributes);
+ hid_sensor_remove_trigger(indio_dev, &temp_st->common_attributes);
return 0;
}
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index d39c0d6b77f1..8976e8d59826 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -390,8 +390,8 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
* For custom steinhart, the full u32 is taken. For all the others
* the MSB is discarded.
*/
- const u8 n_size = (is_steinhart == true) ? 4 : 3;
- const u8 e_size = (is_steinhart == true) ? sizeof(u32) : sizeof(u64);
+ const u8 n_size = is_steinhart ? 4 : 3;
+ const u8 e_size = is_steinhart ? sizeof(u32) : sizeof(u64);
n_entries = of_property_count_elems_of_size(np, propname, e_size);
/* n_entries must be an even number */
diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
index b4cb21ab2e85..b4c49a5d3685 100644
--- a/drivers/iio/temperature/max31856.c
+++ b/drivers/iio/temperature/max31856.c
@@ -14,6 +14,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/util_macros.h>
+#include <asm/unaligned.h>
#include <dt-bindings/iio/temperature/thermocouple.h>
/*
* The MSB of the register value determines whether the following byte will
@@ -168,7 +169,7 @@ static int max31856_thermocouple_read(struct max31856_data *data,
if (ret)
return ret;
/* Skip last 5 dead bits of LTCBL */
- *val = (reg_val[0] << 16 | reg_val[1] << 8 | reg_val[2]) >> 5;
+ *val = get_unaligned_be24(&reg_val[0]) >> 5;
/* Check 7th bit of LTCBH reg. value for sign*/
if (reg_val[0] & 0x80)
*val -= 0x80000;
@@ -185,7 +186,7 @@ static int max31856_thermocouple_read(struct max31856_data *data,
/* Get Cold Junction Temp. offset register value */
offset_cjto = reg_val[0];
/* Get CJTH and CJTL value and skip last 2 dead bits of CJTL */
- *val = (reg_val[1] << 8 | reg_val[2]) >> 2;
+ *val = get_unaligned_be16(&reg_val[1]) >> 2;
/* As per datasheet add offset into CJTH and CJTL */
*val += offset_cjto;
/* Check 7th bit of CJTH reg. value for sign */
diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c
index a5e670726717..f59bf8d58586 100644
--- a/drivers/iio/trigger/iio-trig-hrtimer.c
+++ b/drivers/iio/trigger/iio-trig-hrtimer.c
@@ -4,7 +4,7 @@
*
* Copyright (C) Intuitive Aerial AB
* Written by Marten Svanfeldt, marten@intuitiveaerial.com
- * Copyright (C) 2012, Analog Device Inc.
+ * Copyright (C) 2012, Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
* Copyright (C) 2015, Intel Corporation
*/
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index ade86388434f..477418b37786 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -107,6 +107,7 @@ source "drivers/infiniband/ulp/srpt/Kconfig"
source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig"
+source "drivers/infiniband/ulp/rtrs/Kconfig"
source "drivers/infiniband/ulp/opa_vnic/Kconfig"
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index d1b14887960e..24cb71a16a28 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -8,11 +8,11 @@ obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
- device.o fmr_pool.o cache.o netlink.o \
+ device.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
nldev.o restrack.o counters.o ib_core_uverbs.o \
- trace.o
+ trace.o lag.o
ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
@@ -36,6 +36,9 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
uverbs_std_types_mr.o uverbs_std_types_counters.o \
uverbs_uapi.o uverbs_std_types_device.o \
- uverbs_std_types_async_fd.o
+ uverbs_std_types_async_fd.o \
+ uverbs_std_types_srq.o \
+ uverbs_std_types_wq.o \
+ uverbs_std_types_qp.o
ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 1753a9801b70..3a98439bba83 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -371,6 +371,8 @@ static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
(const void *)&dst_in6->sin6_addr;
sa_family_t family = dst_in->sa_family;
+ might_sleep();
+
/* If we have a gateway in IB mode then it must be an IB network */
if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB)
return ib_nl_fetch_ha(dev_addr, daddr, seq, family);
@@ -727,6 +729,8 @@ int roce_resolve_route_from_path(struct sa_path_rec *rec,
struct rdma_dev_addr dev_addr = {};
int ret;
+ might_sleep();
+
if (rec->roce.route_resolved)
return 0;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 17f14e0eafe4..9ce787e37e22 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -66,6 +66,8 @@ static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
[IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
[IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
+ [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
+ "vendor option is not supported",
};
const char *__attribute_const__ ibcm_reject_msg(int reason)
@@ -81,8 +83,11 @@ const char *__attribute_const__ ibcm_reject_msg(int reason)
EXPORT_SYMBOL(ibcm_reject_msg);
struct cm_id_private;
-static void cm_add_one(struct ib_device *device);
+struct cm_work;
+static int cm_add_one(struct ib_device *device);
static void cm_remove_one(struct ib_device *device, void *client_data);
+static void cm_process_work(struct cm_id_private *cm_id_priv,
+ struct cm_work *work);
static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param);
static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
@@ -287,6 +292,8 @@ struct cm_id_private {
struct list_head work_list;
atomic_t work_count;
+
+ struct rdma_ucm_ece ece;
};
static void cm_work_handler(struct work_struct *work);
@@ -474,24 +481,19 @@ static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh, &av->ah_attr);
}
-static int add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
- struct cm_av *av,
- struct cm_port *port)
+static void add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
+ struct cm_av *av, struct cm_port *port)
{
unsigned long flags;
- int ret = 0;
spin_lock_irqsave(&cm.lock, flags);
-
if (&cm_id_priv->av == av)
list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
else if (&cm_id_priv->alt_av == av)
list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
else
- ret = -EINVAL;
-
+ WARN_ON(true);
spin_unlock_irqrestore(&cm.lock, flags);
- return ret;
}
static struct cm_port *
@@ -572,12 +574,7 @@ static int cm_init_av_by_path(struct sa_path_rec *path,
return ret;
av->timeout = path->packet_life_time + 1;
-
- ret = add_cm_id_to_port_list(cm_id_priv, av, port);
- if (ret) {
- rdma_destroy_ah_attr(&new_ah_attr);
- return ret;
- }
+ add_cm_id_to_port_list(cm_id_priv, av, port);
rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
return 0;
}
@@ -587,11 +584,6 @@ static u32 cm_local_id(__be32 local_id)
return (__force u32) (local_id ^ cm.random_id_operand);
}
-static void cm_free_id(__be32 local_id)
-{
- xa_erase_irq(&cm.local_id_table, cm_local_id(local_id));
-}
-
static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
@@ -698,9 +690,10 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
if ((cm_id_priv->id.service_mask & service_id) ==
cm_id_priv->id.service_id &&
- (cm_id_priv->id.device == device))
+ (cm_id_priv->id.device == device)) {
+ refcount_inc(&cm_id_priv->refcount);
return cm_id_priv;
-
+ }
if (device < cm_id_priv->id.device)
node = node->rb_left;
else if (device > cm_id_priv->id.device)
@@ -745,12 +738,14 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
return NULL;
}
-static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
- __be32 remote_id)
+static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
+ __be32 remote_id)
{
struct rb_node *node = cm.remote_id_table.rb_node;
struct cm_timewait_info *timewait_info;
+ struct cm_id_private *res = NULL;
+ spin_lock_irq(&cm.lock);
while (node) {
timewait_info = rb_entry(node, struct cm_timewait_info,
remote_id_node);
@@ -762,10 +757,14 @@ static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
node = node->rb_left;
else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
node = node->rb_right;
- else
- return timewait_info;
+ else {
+ res = cm_acquire_id(timewait_info->work.local_id,
+ timewait_info->work.remote_id);
+ break;
+ }
}
- return NULL;
+ spin_unlock_irq(&cm.lock);
+ return res;
}
static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
@@ -917,6 +916,35 @@ static void cm_free_work(struct cm_work *work)
kfree(work);
}
+static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
+ struct cm_work *work)
+{
+ bool immediate;
+
+ /*
+ * To deliver the event to the user callback we have the drop the
+ * spinlock, however, we need to ensure that the user callback is single
+ * threaded and receives events in the temporal order. If there are
+ * already events being processed then thread new events onto a list,
+ * the thread currently processing will pick them up.
+ */
+ immediate = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!immediate) {
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+ /*
+ * This routine always consumes incoming reference. Once queued
+ * to the work_list then a reference is held by the thread
+ * currently running cm_process_work() and this reference is not
+ * needed.
+ */
+ cm_deref_id(cm_id_priv);
+ }
+ spin_unlock_irq(&cm_id_priv->lock);
+
+ if (immediate)
+ cm_process_work(cm_id_priv, work);
+}
+
static inline int cm_convert_to_ms(int iba_time)
{
/* approximate conversion to ms from 4.096us x 2^iba_time */
@@ -942,8 +970,10 @@ static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
return min(31, ack_timeout);
}
-static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
+static void cm_remove_remote(struct cm_id_private *cm_id_priv)
{
+ struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
+
if (timewait_info->inserted_remote_id) {
rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
timewait_info->inserted_remote_id = 0;
@@ -982,7 +1012,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
return;
spin_lock_irqsave(&cm.lock, flags);
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
spin_unlock_irqrestore(&cm.lock, flags);
@@ -1001,6 +1031,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
msecs_to_jiffies(wait_time));
spin_unlock_irqrestore(&cm.lock, flags);
+ /*
+ * The timewait_info is converted into a work and gets freed during
+ * cm_free_work() in cm_timewait_handler().
+ */
+ BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
cm_id_priv->timewait_info = NULL;
}
@@ -1013,7 +1048,7 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
cm_id_priv->id.state = IB_CM_IDLE;
if (cm_id_priv->timewait_info) {
spin_lock_irqsave(&cm.lock, flags);
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
spin_unlock_irqrestore(&cm.lock, flags);
kfree(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
@@ -1076,7 +1111,9 @@ retest:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- /* Fall through */
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
+ 0, NULL, 0);
+ goto retest;
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
@@ -1101,7 +1138,7 @@ retest:
case IB_CM_TIMEWAIT:
/*
* The cm_acquire_id in cm_timewait_handler will stop working
- * once we do cm_free_id() below, so just move to idle here for
+ * once we do xa_erase below, so just move to idle here for
* consistency.
*/
cm_id->state = IB_CM_IDLE;
@@ -1114,7 +1151,7 @@ retest:
spin_lock(&cm.lock);
/* Required for cleanup paths related cm_req_handler() */
if (cm_id_priv->timewait_info) {
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
kfree(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
}
@@ -1131,7 +1168,7 @@ retest:
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
- cm_free_id(cm_id->local_id);
+ xa_erase_irq(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
@@ -1287,6 +1324,13 @@ static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
hdr->tid = tid;
}
+static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
+ __be64 tid, u32 attr_mod)
+{
+ cm_format_mad_hdr(hdr, attr_id, tid);
+ hdr->attr_mod = cpu_to_be32(attr_mod);
+}
+
static void cm_format_req(struct cm_req_msg *req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_req_param *param)
@@ -1299,8 +1343,8 @@ static void cm_format_req(struct cm_req_msg *req_msg,
pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
pri_path->opa.slid);
- cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
- cm_form_tid(cm_id_priv));
+ cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
+ cm_form_tid(cm_id_priv), param->ece.attr_mod);
IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
be32_to_cpu(cm_id_priv->id.local_id));
@@ -1423,6 +1467,7 @@ static void cm_format_req(struct cm_req_msg *req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
alt_path->packet_life_time));
}
+ IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
if (param->private_data && param->private_data_len)
IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
@@ -1779,6 +1824,9 @@ static void cm_format_req_event(struct cm_work *work,
param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
+ param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
+ param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
+
work->cm_event.private_data =
IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
}
@@ -1927,7 +1975,6 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
struct cm_timewait_info *timewait_info;
struct cm_req_msg *req_msg;
- struct ib_cm_id *cm_id;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1948,7 +1995,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
/* Check for stale connections. */
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
@@ -1957,8 +2004,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
NULL, 0);
if (cur_cm_id_priv) {
- cm_id = &cur_cm_id_priv->id;
- ib_send_cm_dreq(cm_id, NULL, 0);
+ ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
cm_deref_id(cur_cm_id_priv);
}
return NULL;
@@ -1969,14 +2015,13 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
cm_id_priv->id.device,
cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
if (!listen_cm_id_priv) {
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
NULL, 0);
return NULL;
}
- refcount_inc(&listen_cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
return listen_cm_id_priv;
}
@@ -2153,9 +2198,7 @@ static int cm_req_handler(struct cm_work *work)
/* Refcount belongs to the event, pairs with cm_process_work() */
refcount_inc(&cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->work_count);
- spin_unlock_irq(&cm_id_priv->lock);
- cm_process_work(cm_id_priv, work);
+ cm_queue_work_unlock(cm_id_priv, work);
/*
* Since this ID was just created and was not made visible to other MAD
* handlers until the cm_finalize_id() above we know that the
@@ -2176,7 +2219,8 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_rep_param *param)
{
- cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
+ cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
+ param->ece.attr_mod);
IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
be32_to_cpu(cm_id_priv->id.local_id));
IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
@@ -2203,6 +2247,10 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
}
+ IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
+ IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
+ IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
+
if (param->private_data && param->private_data_len)
IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
param->private_data_len);
@@ -2350,6 +2398,11 @@ static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
+ param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
+ param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
+ param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
+ param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
+
work->cm_event.private_data =
IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
}
@@ -2404,7 +2457,6 @@ static int cm_rep_handler(struct cm_work *work)
struct cm_rep_msg *rep_msg;
int ret;
struct cm_id_private *cur_cm_id_priv;
- struct ib_cm_id *cm_id;
struct cm_timewait_info *timewait_info;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -2454,9 +2506,7 @@ static int cm_rep_handler(struct cm_work *work)
/* Check for a stale connection. */
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
- rb_erase(&cm_id_priv->timewait_info->remote_id_node,
- &cm.remote_id_table);
- cm_id_priv->timewait_info->inserted_remote_id = 0;
+ cm_remove_remote(cm_id_priv);
cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
@@ -2472,8 +2522,7 @@ static int cm_rep_handler(struct cm_work *work)
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
if (cur_cm_id_priv) {
- cm_id = &cur_cm_id_priv->id;
- ib_send_cm_dreq(cm_id, NULL, 0);
+ ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
cm_deref_id(cur_cm_id_priv);
}
@@ -2501,15 +2550,7 @@ static int cm_rep_handler(struct cm_work *work)
cm_id_priv->alt_av.timeout - 1);
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
error:
@@ -2520,7 +2561,6 @@ error:
static int cm_establish_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
- int ret;
/* See comment in cm_establish about lookup. */
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
@@ -2534,15 +2574,7 @@ static int cm_establish_handler(struct cm_work *work)
}
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2553,7 +2585,6 @@ static int cm_rtu_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rtu_msg *rtu_msg;
- int ret;
rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(
@@ -2576,15 +2607,7 @@ static int cm_rtu_handler(struct cm_work *work)
cm_id_priv->id.state = IB_CM_ESTABLISHED;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2777,7 +2800,6 @@ static int cm_dreq_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
struct cm_dreq_msg *dreq_msg;
struct ib_mad_send_buf *msg = NULL;
- int ret;
dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(
@@ -2842,15 +2864,7 @@ static int cm_dreq_handler(struct cm_work *work)
}
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
cm_id_priv->tid = dreq_msg->hdr.tid;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
unlock: spin_unlock_irq(&cm_id_priv->lock);
@@ -2862,7 +2876,6 @@ static int cm_drep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_drep_msg *drep_msg;
- int ret;
drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(
@@ -2883,15 +2896,7 @@ static int cm_drep_handler(struct cm_work *work)
cm_enter_timewait(cm_id_priv);
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2987,24 +2992,15 @@ static void cm_format_rej_event(struct cm_work *work)
static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
{
- struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
__be32 remote_id;
remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
- spin_lock_irq(&cm.lock);
- timewait_info = cm_find_remote_id(
+ cm_id_priv = cm_find_remote_id(
*((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
remote_id);
- if (!timewait_info) {
- spin_unlock_irq(&cm.lock);
- return NULL;
- }
- cm_id_priv =
- cm_acquire_id(timewait_info->work.local_id, remote_id);
- spin_unlock_irq(&cm.lock);
} else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
CM_MSG_RESPONSE_REQ)
cm_id_priv = cm_acquire_id(
@@ -3022,7 +3018,6 @@ static int cm_rej_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rej_msg *rej_msg;
- int ret;
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_rejected_id(rej_msg);
@@ -3068,19 +3063,10 @@ static int cm_rej_handler(struct cm_work *work)
__func__, be32_to_cpu(cm_id_priv->id.local_id),
cm_id_priv->id.state);
spin_unlock_irq(&cm_id_priv->lock);
- ret = -EINVAL;
goto out;
}
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -3190,7 +3176,7 @@ static int cm_mra_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_mra_msg *mra_msg;
- int timeout, ret;
+ int timeout;
mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_mraed_id(mra_msg);
@@ -3250,15 +3236,7 @@ static int cm_mra_handler(struct cm_work *work)
cm_id_priv->msg->context[1] = (void *) (unsigned long)
cm_id_priv->id.state;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
spin_unlock_irq(&cm_id_priv->lock);
@@ -3393,15 +3371,7 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
unlock: spin_unlock_irq(&cm_id_priv->lock);
@@ -3413,7 +3383,6 @@ static int cm_apr_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_apr_msg *apr_msg;
- int ret;
/* Currently Alternate path messages are not supported for
* RoCE link layer.
@@ -3448,16 +3417,7 @@ static int cm_apr_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
cm_id_priv->msg = NULL;
-
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -3468,7 +3428,6 @@ static int cm_timewait_handler(struct cm_work *work)
{
struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
- int ret;
timewait_info = container_of(work, struct cm_timewait_info, work);
spin_lock_irq(&cm.lock);
@@ -3487,15 +3446,7 @@ static int cm_timewait_handler(struct cm_work *work)
goto out;
}
cm_id_priv->id.state = IB_CM_IDLE;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -3642,7 +3593,6 @@ static int cm_sidr_req_handler(struct cm_work *work)
.status = IB_SIDR_UNSUPPORTED });
goto out; /* No match. */
}
- refcount_inc(&listen_cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
@@ -3674,8 +3624,8 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param)
{
- cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
- cm_id_priv->tid);
+ cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
+ cm_id_priv->tid, param->ece.attr_mod);
IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
be32_to_cpu(cm_id_priv->id.remote_id));
IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
@@ -3683,6 +3633,10 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
be64_to_cpu(cm_id_priv->id.service_id));
IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
+ IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
+ param->ece.vendor_id & 0xFF);
+ IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
+ (param->ece.vendor_id >> 8) & 0xFF);
if (param->info && param->info_length)
IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
@@ -4384,7 +4338,7 @@ static void cm_remove_port_fs(struct cm_port *port)
}
-static void cm_add_one(struct ib_device *ib_device)
+static int cm_add_one(struct ib_device *ib_device)
{
struct cm_device *cm_dev;
struct cm_port *port;
@@ -4403,7 +4357,7 @@ static void cm_add_one(struct ib_device *ib_device)
cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
GFP_KERNEL);
if (!cm_dev)
- return;
+ return -ENOMEM;
cm_dev->ib_device = ib_device;
cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
@@ -4415,8 +4369,10 @@ static void cm_add_one(struct ib_device *ib_device)
continue;
port = kzalloc(sizeof *port, GFP_KERNEL);
- if (!port)
+ if (!port) {
+ ret = -ENOMEM;
goto error1;
+ }
cm_dev->port[i-1] = port;
port->cm_dev = cm_dev;
@@ -4437,8 +4393,10 @@ static void cm_add_one(struct ib_device *ib_device)
cm_recv_handler,
port,
0);
- if (IS_ERR(port->mad_agent))
+ if (IS_ERR(port->mad_agent)) {
+ ret = PTR_ERR(port->mad_agent);
goto error2;
+ }
ret = ib_modify_port(ib_device, i, 0, &port_modify);
if (ret)
@@ -4447,15 +4405,17 @@ static void cm_add_one(struct ib_device *ib_device)
count++;
}
- if (!count)
+ if (!count) {
+ ret = -EOPNOTSUPP;
goto free;
+ }
ib_set_client_data(ib_device, &cm_client, cm_dev);
write_lock_irqsave(&cm.device_lock, flags);
list_add_tail(&cm_dev->list, &cm.device_list);
write_unlock_irqrestore(&cm.device_lock, flags);
- return;
+ return 0;
error3:
ib_unregister_mad_agent(port->mad_agent);
@@ -4477,6 +4437,7 @@ error1:
}
free:
kfree(cm_dev);
+ return ret;
}
static void cm_remove_one(struct ib_device *ib_device, void *client_data)
@@ -4491,9 +4452,6 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
unsigned long flags;
int i;
- if (!cm_dev)
- return;
-
write_lock_irqsave(&cm.device_lock, flags);
list_del(&cm_dev->list);
write_unlock_irqrestore(&cm.device_lock, flags);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 26e6f7df247b..3d7cc9f0f3d4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -91,7 +91,13 @@ const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
}
EXPORT_SYMBOL(rdma_reject_msg);
-bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
+/**
+ * rdma_is_consumer_reject - return true if the consumer rejected the connect
+ * request.
+ * @id: Communication identifier that received the REJECT event.
+ * @reason: Value returned in the REJECT event status field.
+ */
+static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
{
if (rdma_ib_or_roce(id->device, id->port_num))
return reason == IB_CM_REJ_CONSUMER_DEFINED;
@@ -102,7 +108,6 @@ bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
WARN_ON_ONCE(1);
return false;
}
-EXPORT_SYMBOL(rdma_is_consumer_reject);
const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
struct rdma_cm_event *ev, u8 *data_len)
@@ -148,7 +153,7 @@ struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
}
EXPORT_SYMBOL(rdma_res_to_id);
-static void cma_add_one(struct ib_device *device);
+static int cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
static struct ib_client cma_client = {
@@ -479,6 +484,7 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
rdma_restrack_kadd(&id_priv->res);
else
rdma_restrack_uadd(&id_priv->res);
+ trace_cm_id_attach(id_priv, cma_dev->device);
}
static void cma_attach_to_dev(struct rdma_id_private *id_priv,
@@ -883,7 +889,6 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
id_priv->id.route.addr.dev_addr.net = get_net(net);
id_priv->seq_num &= 0x00ffffff;
- trace_cm_id_create(id_priv);
return &id_priv->id;
}
EXPORT_SYMBOL(__rdma_create_id);
@@ -1906,6 +1911,9 @@ static void cma_set_rep_event_data(struct rdma_cm_event *event,
event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
event->param.conn.srq = rep_data->srq;
event->param.conn.qp_num = rep_data->remote_qpn;
+
+ event->ece.vendor_id = rep_data->ece.vendor_id;
+ event->ece.attr_mod = rep_data->ece.attr_mod;
}
static int cma_cm_event_handler(struct rdma_id_private *id_priv,
@@ -2124,6 +2132,9 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
event->param.conn.srq = req_data->srq;
event->param.conn.qp_num = req_data->remote_qpn;
+
+ event->ece.vendor_id = req_data->ece.vendor_id;
+ event->ece.attr_mod = req_data->ece.attr_mod;
}
static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
@@ -2904,6 +2915,24 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
return 0;
}
+static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv)
+{
+ struct sockaddr_in6 *addr6;
+ u16 dport, sport;
+ u32 hash, fl;
+
+ addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv);
+ fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK;
+ if ((cma_family(id_priv) != AF_INET6) || !fl) {
+ dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv)));
+ sport = be16_to_cpu(cma_port(cma_src_addr(id_priv)));
+ hash = (u32)sport * 31 + dport;
+ fl = hash & IB_GRH_FLOWLABEL_MASK;
+ }
+
+ return cpu_to_be32(fl);
+}
+
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
struct rdma_route *route = &id_priv->id.route;
@@ -2970,6 +2999,11 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
+ if (rdma_protocol_roce_udp_encap(id_priv->id.device,
+ id_priv->id.port_num))
+ route->path_rec->flow_label =
+ cma_get_roce_udp_flow_label(id_priv);
+
cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
@@ -3919,6 +3953,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
req.max_cm_retries = CMA_MAX_CM_RETRIES;
req.srq = id_priv->srq ? 1 : 0;
+ req.ece.vendor_id = id_priv->ece.vendor_id;
+ req.ece.attr_mod = id_priv->ece.attr_mod;
trace_cm_send_req(id_priv);
ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
@@ -4008,6 +4044,27 @@ err:
}
EXPORT_SYMBOL(rdma_connect);
+/**
+ * rdma_connect_ece - Initiate an active connection request with ECE data.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ * @ece: ECE parameters
+ *
+ * See rdma_connect() explanation.
+ */
+int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ struct rdma_ucm_ece *ece)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ id_priv->ece.vendor_id = ece->vendor_id;
+ id_priv->ece.attr_mod = ece->attr_mod;
+
+ return rdma_connect(id, conn_param);
+}
+EXPORT_SYMBOL(rdma_connect_ece);
+
static int cma_accept_ib(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
@@ -4033,6 +4090,8 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
rep.flow_control = conn_param->flow_control;
rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
rep.srq = id_priv->srq ? 1 : 0;
+ rep.ece.vendor_id = id_priv->ece.vendor_id;
+ rep.ece.attr_mod = id_priv->ece.attr_mod;
trace_cm_send_rep(id_priv);
ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
@@ -4080,7 +4139,11 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
return ret;
rep.qp_num = id_priv->qp_num;
rep.qkey = id_priv->qkey;
+
+ rep.ece.vendor_id = id_priv->ece.vendor_id;
+ rep.ece.attr_mod = id_priv->ece.attr_mod;
}
+
rep.private_data = private_data;
rep.private_data_len = private_data_len;
@@ -4133,11 +4196,24 @@ int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
return 0;
reject:
cma_modify_qp_err(id_priv);
- rdma_reject(id, NULL, 0);
+ rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
return ret;
}
EXPORT_SYMBOL(__rdma_accept);
+int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ const char *caller, struct rdma_ucm_ece *ece)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ id_priv->ece.vendor_id = ece->vendor_id;
+ id_priv->ece.attr_mod = ece->attr_mod;
+
+ return __rdma_accept(id, conn_param, caller);
+}
+EXPORT_SYMBOL(__rdma_accept_ece);
+
int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
{
struct rdma_id_private *id_priv;
@@ -4160,7 +4236,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
EXPORT_SYMBOL(rdma_notify);
int rdma_reject(struct rdma_cm_id *id, const void *private_data,
- u8 private_data_len)
+ u8 private_data_len, u8 reason)
{
struct rdma_id_private *id_priv;
int ret;
@@ -4175,9 +4251,8 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
private_data, private_data_len);
} else {
trace_cm_send_rej(id_priv);
- ret = ib_send_cm_rej(id_priv->cm_id.ib,
- IB_CM_REJ_CONSUMER_DEFINED, NULL,
- 0, private_data, private_data_len);
+ ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0,
+ private_data, private_data_len);
}
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = iw_cm_reject(id_priv->cm_id.iw,
@@ -4633,29 +4708,34 @@ static struct notifier_block cma_nb = {
.notifier_call = cma_netdev_callback
};
-static void cma_add_one(struct ib_device *device)
+static int cma_add_one(struct ib_device *device)
{
struct cma_device *cma_dev;
struct rdma_id_private *id_priv;
unsigned int i;
unsigned long supported_gids = 0;
+ int ret;
cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
if (!cma_dev)
- return;
+ return -ENOMEM;
cma_dev->device = device;
cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
sizeof(*cma_dev->default_gid_type),
GFP_KERNEL);
- if (!cma_dev->default_gid_type)
+ if (!cma_dev->default_gid_type) {
+ ret = -ENOMEM;
goto free_cma_dev;
+ }
cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
sizeof(*cma_dev->default_roce_tos),
GFP_KERNEL);
- if (!cma_dev->default_roce_tos)
+ if (!cma_dev->default_roce_tos) {
+ ret = -ENOMEM;
goto free_gid_type;
+ }
rdma_for_each_port (device, i) {
supported_gids = roce_gid_type_mask_support(device, i);
@@ -4681,15 +4761,14 @@ static void cma_add_one(struct ib_device *device)
mutex_unlock(&lock);
trace_cm_add_one(device);
- return;
+ return 0;
free_gid_type:
kfree(cma_dev->default_gid_type);
free_cma_dev:
kfree(cma_dev);
-
- return;
+ return ret;
}
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
@@ -4751,9 +4830,6 @@ static void cma_remove_one(struct ib_device *device, void *client_data)
trace_cm_remove_one(device);
- if (!cma_dev)
- return;
-
mutex_lock(&lock);
list_del(&cma_dev->list);
mutex_unlock(&lock);
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index c672a4978bfd..3c1e2ca564fe 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -322,8 +322,21 @@ fail:
return ERR_PTR(err);
}
+static void drop_cma_dev(struct config_group *cgroup, struct config_item *item)
+{
+ struct config_group *group =
+ container_of(item, struct config_group, cg_item);
+ struct cma_dev_group *cma_dev_group =
+ container_of(group, struct cma_dev_group, device_group);
+
+ configfs_remove_default_groups(&cma_dev_group->ports_group);
+ configfs_remove_default_groups(&cma_dev_group->device_group);
+ config_item_put(item);
+}
+
static struct configfs_group_operations cma_subsys_group_ops = {
.make_group = make_cma_dev,
+ .drop_item = drop_cma_dev,
};
static const struct config_item_type cma_subsys_type = {
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index 5edcf44a9307..caece96ebcf5 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -95,6 +95,7 @@ struct rdma_id_private {
* Internal to RDMA/core, don't use in the drivers
*/
struct rdma_restrack_entry res;
+ struct rdma_ucm_ece ece;
};
#if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS)
diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
index 81e36bf13159..e6e20c36c538 100644
--- a/drivers/infiniband/core/cma_trace.h
+++ b/drivers/infiniband/core/cma_trace.h
@@ -103,23 +103,33 @@ DEFINE_CMA_FSM_EVENT(sent_drep);
DEFINE_CMA_FSM_EVENT(sent_dreq);
DEFINE_CMA_FSM_EVENT(id_destroy);
-TRACE_EVENT(cm_id_create,
+TRACE_EVENT(cm_id_attach,
TP_PROTO(
- const struct rdma_id_private *id_priv
+ const struct rdma_id_private *id_priv,
+ const struct ib_device *device
),
- TP_ARGS(id_priv),
+ TP_ARGS(id_priv, device),
TP_STRUCT__entry(
__field(u32, cm_id)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ __string(devname, device->name)
),
TP_fast_assign(
__entry->cm_id = id_priv->res.id;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ __assign_str(devname, device->name);
),
- TP_printk("cm.id=%u",
- __entry->cm_id
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc device=%s",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr,
+ __get_str(devname)
)
);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index cf42acca4a3a..a1e6a67b2c4a 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -414,4 +414,7 @@ void rdma_umap_priv_init(struct rdma_umap_priv *priv,
struct vm_area_struct *vma,
struct rdma_user_mmap_entry *entry);
+void ib_cq_pool_init(struct ib_device *dev);
+void ib_cq_pool_destroy(struct ib_device *dev);
+
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 4f25b2400694..655795bfa0ee 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -7,7 +7,11 @@
#include <linux/slab.h>
#include <rdma/ib_verbs.h>
+#include "core_priv.h"
+
#include <trace/events/rdma_core.h>
+/* Max size for shared CQ, may require tuning */
+#define IB_MAX_SHARED_CQ_SZ 4096U
/* # of WCs to poll for with a single call to ib_poll_cq */
#define IB_POLL_BATCH 16
@@ -218,6 +222,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
cq->cq_context = private;
cq->poll_ctx = poll_ctx;
atomic_set(&cq->usecnt, 0);
+ cq->comp_vector = comp_vector;
cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
if (!cq->wc)
@@ -309,6 +314,8 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{
if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
return;
+ if (WARN_ON_ONCE(cq->cqe_used))
+ return;
switch (cq->poll_ctx) {
case IB_POLL_DIRECT:
@@ -334,3 +341,169 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
kfree(cq);
}
EXPORT_SYMBOL(ib_free_cq_user);
+
+void ib_cq_pool_init(struct ib_device *dev)
+{
+ unsigned int i;
+
+ spin_lock_init(&dev->cq_pools_lock);
+ for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++)
+ INIT_LIST_HEAD(&dev->cq_pools[i]);
+}
+
+void ib_cq_pool_destroy(struct ib_device *dev)
+{
+ struct ib_cq *cq, *n;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++) {
+ list_for_each_entry_safe(cq, n, &dev->cq_pools[i],
+ pool_entry) {
+ WARN_ON(cq->cqe_used);
+ cq->shared = false;
+ ib_free_cq(cq);
+ }
+ }
+}
+
+static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
+ enum ib_poll_context poll_ctx)
+{
+ LIST_HEAD(tmp_list);
+ unsigned int nr_cqs, i;
+ struct ib_cq *cq;
+ int ret;
+
+ if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
+ WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate at least as many CQEs as requested, and otherwise
+ * a reasonable batch size so that we can share CQs between
+ * multiple users instead of allocating a larger number of CQs.
+ */
+ nr_cqes = min_t(unsigned int, dev->attrs.max_cqe,
+ max(nr_cqes, IB_MAX_SHARED_CQ_SZ));
+ nr_cqs = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
+ for (i = 0; i < nr_cqs; i++) {
+ cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx);
+ if (IS_ERR(cq)) {
+ ret = PTR_ERR(cq);
+ goto out_free_cqs;
+ }
+ cq->shared = true;
+ list_add_tail(&cq->pool_entry, &tmp_list);
+ }
+
+ spin_lock_irq(&dev->cq_pools_lock);
+ list_splice(&tmp_list, &dev->cq_pools[poll_ctx]);
+ spin_unlock_irq(&dev->cq_pools_lock);
+
+ return 0;
+
+out_free_cqs:
+ list_for_each_entry(cq, &tmp_list, pool_entry) {
+ cq->shared = false;
+ ib_free_cq(cq);
+ }
+ return ret;
+}
+
+/**
+ * ib_cq_pool_get() - Find the least used completion queue that matches
+ * a given cpu hint (or least used for wild card affinity) and fits
+ * nr_cqe.
+ * @dev: rdma device
+ * @nr_cqe: number of needed cqe entries
+ * @comp_vector_hint: completion vector hint (-1) for the driver to assign
+ * a comp vector based on internal counter
+ * @poll_ctx: cq polling context
+ *
+ * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and
+ * claim entries in it for us. In case there is no available cq, allocate
+ * a new cq with the requirements and add it to the device pool.
+ * IB_POLL_DIRECT cannot be used for shared cqs so it is not a valid value
+ * for @poll_ctx.
+ */
+struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
+ int comp_vector_hint,
+ enum ib_poll_context poll_ctx)
+{
+ static unsigned int default_comp_vector;
+ unsigned int vector, num_comp_vectors;
+ struct ib_cq *cq, *found = NULL;
+ int ret;
+
+ if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
+ WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
+ return ERR_PTR(-EINVAL);
+ }
+
+ num_comp_vectors =
+ min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
+ /* Project the affinty to the device completion vector range */
+ if (comp_vector_hint < 0) {
+ comp_vector_hint =
+ (READ_ONCE(default_comp_vector) + 1) % num_comp_vectors;
+ WRITE_ONCE(default_comp_vector, comp_vector_hint);
+ }
+ vector = comp_vector_hint % num_comp_vectors;
+
+ /*
+ * Find the least used CQ with correct affinity and
+ * enough free CQ entries
+ */
+ while (!found) {
+ spin_lock_irq(&dev->cq_pools_lock);
+ list_for_each_entry(cq, &dev->cq_pools[poll_ctx],
+ pool_entry) {
+ /*
+ * Check to see if we have found a CQ with the
+ * correct completion vector
+ */
+ if (vector != cq->comp_vector)
+ continue;
+ if (cq->cqe_used + nr_cqe > cq->cqe)
+ continue;
+ found = cq;
+ break;
+ }
+
+ if (found) {
+ found->cqe_used += nr_cqe;
+ spin_unlock_irq(&dev->cq_pools_lock);
+
+ return found;
+ }
+ spin_unlock_irq(&dev->cq_pools_lock);
+
+ /*
+ * Didn't find a match or ran out of CQs in the device
+ * pool, allocate a new array of CQs.
+ */
+ ret = ib_alloc_cqs(dev, nr_cqe, poll_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return found;
+}
+EXPORT_SYMBOL(ib_cq_pool_get);
+
+/**
+ * ib_cq_pool_put - Return a CQ taken from a shared pool.
+ * @cq: The CQ to return.
+ * @nr_cqe: The max number of cqes that the user had requested.
+ */
+void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe)
+{
+ if (WARN_ON_ONCE(nr_cqe > cq->cqe_used))
+ return;
+
+ spin_lock_irq(&cq->device->cq_pools_lock);
+ cq->cqe_used -= nr_cqe;
+ spin_unlock_irq(&cq->device->cq_pools_lock);
+}
+EXPORT_SYMBOL(ib_cq_pool_put);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index d0b3d35ad3e4..905a2beaf885 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -677,8 +677,20 @@ static int add_client_context(struct ib_device *device,
if (ret)
goto out;
downgrade_write(&device->client_data_rwsem);
- if (client->add)
- client->add(device);
+ if (client->add) {
+ if (client->add(device)) {
+ /*
+ * If a client fails to add then the error code is
+ * ignored, but we won't call any more ops on this
+ * client.
+ */
+ xa_erase(&device->client_data, client->client_id);
+ up_read(&device->client_data_rwsem);
+ ib_device_put(device);
+ ib_client_put(client);
+ return 0;
+ }
+ }
/* Readers shall not see a client until add has been completed */
xa_set_mark(&device->client_data, client->client_id,
@@ -1381,6 +1393,7 @@ int ib_register_device(struct ib_device *device, const char *name)
goto dev_cleanup;
}
+ ib_cq_pool_init(device);
ret = enable_device_and_get(device);
dev_set_uevent_suppress(&device->dev, false);
/* Mark for userspace that device is ready */
@@ -1435,6 +1448,7 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
goto out;
disable_device(ib_dev);
+ ib_cq_pool_destroy(ib_dev);
/* Expedite removing unregistered pointers from the hash table */
free_netdevs(ib_dev);
@@ -2557,7 +2571,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, add_gid);
SET_DEVICE_OP(dev_ops, advise_mr);
SET_DEVICE_OP(dev_ops, alloc_dm);
- SET_DEVICE_OP(dev_ops, alloc_fmr);
SET_DEVICE_OP(dev_ops, alloc_hw_stats);
SET_DEVICE_OP(dev_ops, alloc_mr);
SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
@@ -2584,7 +2597,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_wq);
SET_DEVICE_OP(dev_ops, dealloc_dm);
SET_DEVICE_OP(dev_ops, dealloc_driver);
- SET_DEVICE_OP(dev_ops, dealloc_fmr);
SET_DEVICE_OP(dev_ops, dealloc_mw);
SET_DEVICE_OP(dev_ops, dealloc_pd);
SET_DEVICE_OP(dev_ops, dealloc_ucontext);
@@ -2628,7 +2640,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, iw_rem_ref);
SET_DEVICE_OP(dev_ops, map_mr_sg);
SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
- SET_DEVICE_OP(dev_ops, map_phys_fmr);
SET_DEVICE_OP(dev_ops, mmap);
SET_DEVICE_OP(dev_ops, mmap_free);
SET_DEVICE_OP(dev_ops, modify_ah);
@@ -2662,7 +2673,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, resize_cq);
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
- SET_DEVICE_OP(dev_ops, unmap_fmr);
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_cq);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
deleted file mode 100644
index e08aec427027..000000000000
--- a/drivers/infiniband/core/fmr_pool.c
+++ /dev/null
@@ -1,494 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/jhash.h>
-#include <linux/kthread.h>
-
-#include <rdma/ib_fmr_pool.h>
-
-#include "core_priv.h"
-
-#define PFX "fmr_pool: "
-
-enum {
- IB_FMR_MAX_REMAPS = 32,
-
- IB_FMR_HASH_BITS = 8,
- IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,
- IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1
-};
-
-/*
- * If an FMR is not in use, then the list member will point to either
- * its pool's free_list (if the FMR can be mapped again; that is,
- * remap_count < pool->max_remaps) or its pool's dirty_list (if the
- * FMR needs to be unmapped before being remapped). In either of
- * these cases it is a bug if the ref_count is not 0. In other words,
- * if ref_count is > 0, then the list member must not be linked into
- * either free_list or dirty_list.
- *
- * The cache_node member is used to link the FMR into a cache bucket
- * (if caching is enabled). This is independent of the reference
- * count of the FMR. When a valid FMR is released, its ref_count is
- * decremented, and if ref_count reaches 0, the FMR is placed in
- * either free_list or dirty_list as appropriate. However, it is not
- * removed from the cache and may be "revived" if a call to
- * ib_fmr_register_physical() occurs before the FMR is remapped. In
- * this case we just increment the ref_count and remove the FMR from
- * free_list/dirty_list.
- *
- * Before we remap an FMR from free_list, we remove it from the cache
- * (to prevent another user from obtaining a stale FMR). When an FMR
- * is released, we add it to the tail of the free list, so that our
- * cache eviction policy is "least recently used."
- *
- * All manipulation of ref_count, list and cache_node is protected by
- * pool_lock to maintain consistency.
- */
-
-struct ib_fmr_pool {
- spinlock_t pool_lock;
-
- int pool_size;
- int max_pages;
- int max_remaps;
- int dirty_watermark;
- int dirty_len;
- struct list_head free_list;
- struct list_head dirty_list;
- struct hlist_head *cache_bucket;
-
- void (*flush_function)(struct ib_fmr_pool *pool,
- void * arg);
- void *flush_arg;
-
- struct kthread_worker *worker;
- struct kthread_work work;
-
- atomic_t req_ser;
- atomic_t flush_ser;
-
- wait_queue_head_t force_wait;
-};
-
-static inline u32 ib_fmr_hash(u64 first_page)
-{
- return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
- (IB_FMR_HASH_SIZE - 1);
-}
-
-/* Caller must hold pool_lock */
-static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
- u64 *page_list,
- int page_list_len,
- u64 io_virtual_address)
-{
- struct hlist_head *bucket;
- struct ib_pool_fmr *fmr;
-
- if (!pool->cache_bucket)
- return NULL;
-
- bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
-
- hlist_for_each_entry(fmr, bucket, cache_node)
- if (io_virtual_address == fmr->io_virtual_address &&
- page_list_len == fmr->page_list_len &&
- !memcmp(page_list, fmr->page_list,
- page_list_len * sizeof *page_list))
- return fmr;
-
- return NULL;
-}
-
-static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
-{
- int ret;
- struct ib_pool_fmr *fmr;
- LIST_HEAD(unmap_list);
- LIST_HEAD(fmr_list);
-
- spin_lock_irq(&pool->pool_lock);
-
- list_for_each_entry(fmr, &pool->dirty_list, list) {
- hlist_del_init(&fmr->cache_node);
- fmr->remap_count = 0;
- list_add_tail(&fmr->fmr->list, &fmr_list);
- }
-
- list_splice_init(&pool->dirty_list, &unmap_list);
- pool->dirty_len = 0;
-
- spin_unlock_irq(&pool->pool_lock);
-
- if (list_empty(&unmap_list)) {
- return;
- }
-
- ret = ib_unmap_fmr(&fmr_list);
- if (ret)
- pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
-
- spin_lock_irq(&pool->pool_lock);
- list_splice(&unmap_list, &pool->free_list);
- spin_unlock_irq(&pool->pool_lock);
-}
-
-static void ib_fmr_cleanup_func(struct kthread_work *work)
-{
- struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work);
-
- ib_fmr_batch_release(pool);
- atomic_inc(&pool->flush_ser);
- wake_up_interruptible(&pool->force_wait);
-
- if (pool->flush_function)
- pool->flush_function(pool, pool->flush_arg);
-
- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0)
- kthread_queue_work(pool->worker, &pool->work);
-}
-
-/**
- * ib_create_fmr_pool - Create an FMR pool
- * @pd:Protection domain for FMRs
- * @params:FMR pool parameters
- *
- * Create a pool of FMRs. Return value is pointer to new pool or
- * error code if creation failed.
- */
-struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
- struct ib_fmr_pool_param *params)
-{
- struct ib_device *device;
- struct ib_fmr_pool *pool;
- int i;
- int ret;
- int max_remaps;
-
- if (!params)
- return ERR_PTR(-EINVAL);
-
- device = pd->device;
- if (!device->ops.alloc_fmr || !device->ops.dealloc_fmr ||
- !device->ops.map_phys_fmr || !device->ops.unmap_fmr) {
- dev_info(&device->dev, "Device does not support FMRs\n");
- return ERR_PTR(-ENOSYS);
- }
-
- if (!device->attrs.max_map_per_fmr)
- max_remaps = IB_FMR_MAX_REMAPS;
- else
- max_remaps = device->attrs.max_map_per_fmr;
-
- pool = kmalloc(sizeof *pool, GFP_KERNEL);
- if (!pool)
- return ERR_PTR(-ENOMEM);
-
- pool->cache_bucket = NULL;
- pool->flush_function = params->flush_function;
- pool->flush_arg = params->flush_arg;
-
- INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->dirty_list);
-
- if (params->cache) {
- pool->cache_bucket =
- kmalloc_array(IB_FMR_HASH_SIZE,
- sizeof(*pool->cache_bucket),
- GFP_KERNEL);
- if (!pool->cache_bucket) {
- ret = -ENOMEM;
- goto out_free_pool;
- }
-
- for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
- INIT_HLIST_HEAD(pool->cache_bucket + i);
- }
-
- pool->pool_size = 0;
- pool->max_pages = params->max_pages_per_fmr;
- pool->max_remaps = max_remaps;
- pool->dirty_watermark = params->dirty_watermark;
- pool->dirty_len = 0;
- spin_lock_init(&pool->pool_lock);
- atomic_set(&pool->req_ser, 0);
- atomic_set(&pool->flush_ser, 0);
- init_waitqueue_head(&pool->force_wait);
-
- pool->worker =
- kthread_create_worker(0, "ib_fmr(%s)", dev_name(&device->dev));
- if (IS_ERR(pool->worker)) {
- pr_warn(PFX "couldn't start cleanup kthread worker\n");
- ret = PTR_ERR(pool->worker);
- goto out_free_pool;
- }
- kthread_init_work(&pool->work, ib_fmr_cleanup_func);
-
- {
- struct ib_pool_fmr *fmr;
- struct ib_fmr_attr fmr_attr = {
- .max_pages = params->max_pages_per_fmr,
- .max_maps = pool->max_remaps,
- .page_shift = params->page_shift
- };
- int bytes_per_fmr = sizeof *fmr;
-
- if (pool->cache_bucket)
- bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
-
- for (i = 0; i < params->pool_size; ++i) {
- fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
- if (!fmr)
- goto out_fail;
-
- fmr->pool = pool;
- fmr->remap_count = 0;
- fmr->ref_count = 0;
- INIT_HLIST_NODE(&fmr->cache_node);
-
- fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
- if (IS_ERR(fmr->fmr)) {
- pr_warn(PFX "fmr_create failed for FMR %d\n",
- i);
- kfree(fmr);
- goto out_fail;
- }
-
- list_add_tail(&fmr->list, &pool->free_list);
- ++pool->pool_size;
- }
- }
-
- return pool;
-
- out_free_pool:
- kfree(pool->cache_bucket);
- kfree(pool);
-
- return ERR_PTR(ret);
-
- out_fail:
- ib_destroy_fmr_pool(pool);
-
- return ERR_PTR(-ENOMEM);
-}
-EXPORT_SYMBOL(ib_create_fmr_pool);
-
-/**
- * ib_destroy_fmr_pool - Free FMR pool
- * @pool:FMR pool to free
- *
- * Destroy an FMR pool and free all associated resources.
- */
-void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
-{
- struct ib_pool_fmr *fmr;
- struct ib_pool_fmr *tmp;
- LIST_HEAD(fmr_list);
- int i;
-
- kthread_destroy_worker(pool->worker);
- ib_fmr_batch_release(pool);
-
- i = 0;
- list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
- if (fmr->remap_count) {
- INIT_LIST_HEAD(&fmr_list);
- list_add_tail(&fmr->fmr->list, &fmr_list);
- ib_unmap_fmr(&fmr_list);
- }
- ib_dealloc_fmr(fmr->fmr);
- list_del(&fmr->list);
- kfree(fmr);
- ++i;
- }
-
- if (i < pool->pool_size)
- pr_warn(PFX "pool still has %d regions registered\n",
- pool->pool_size - i);
-
- kfree(pool->cache_bucket);
- kfree(pool);
-}
-EXPORT_SYMBOL(ib_destroy_fmr_pool);
-
-/**
- * ib_flush_fmr_pool - Invalidate all unmapped FMRs
- * @pool:FMR pool to flush
- *
- * Ensure that all unmapped FMRs are fully invalidated.
- */
-int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
-{
- int serial;
- struct ib_pool_fmr *fmr, *next;
-
- /*
- * The free_list holds FMRs that may have been used
- * but have not been remapped enough times to be dirty.
- * Put them on the dirty list now so that the cleanup
- * thread will reap them too.
- */
- spin_lock_irq(&pool->pool_lock);
- list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
- if (fmr->remap_count > 0)
- list_move(&fmr->list, &pool->dirty_list);
- }
- spin_unlock_irq(&pool->pool_lock);
-
- serial = atomic_inc_return(&pool->req_ser);
- kthread_queue_work(pool->worker, &pool->work);
-
- if (wait_event_interruptible(pool->force_wait,
- atomic_read(&pool->flush_ser) - serial >= 0))
- return -EINTR;
-
- return 0;
-}
-EXPORT_SYMBOL(ib_flush_fmr_pool);
-
-/**
- * ib_fmr_pool_map_phys - Map an FMR from an FMR pool.
- * @pool_handle: FMR pool to allocate FMR from
- * @page_list: List of pages to map
- * @list_len: Number of pages in @page_list
- * @io_virtual_address: I/O virtual address for new FMR
- */
-struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
- u64 *page_list,
- int list_len,
- u64 io_virtual_address)
-{
- struct ib_fmr_pool *pool = pool_handle;
- struct ib_pool_fmr *fmr;
- unsigned long flags;
- int result;
-
- if (list_len < 1 || list_len > pool->max_pages)
- return ERR_PTR(-EINVAL);
-
- spin_lock_irqsave(&pool->pool_lock, flags);
- fmr = ib_fmr_cache_lookup(pool,
- page_list,
- list_len,
- io_virtual_address);
- if (fmr) {
- /* found in cache */
- ++fmr->ref_count;
- if (fmr->ref_count == 1) {
- list_del(&fmr->list);
- }
-
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- return fmr;
- }
-
- if (list_empty(&pool->free_list)) {
- spin_unlock_irqrestore(&pool->pool_lock, flags);
- return ERR_PTR(-EAGAIN);
- }
-
- fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
- list_del(&fmr->list);
- hlist_del_init(&fmr->cache_node);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
- io_virtual_address);
-
- if (result) {
- spin_lock_irqsave(&pool->pool_lock, flags);
- list_add(&fmr->list, &pool->free_list);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- pr_warn(PFX "fmr_map returns %d\n", result);
-
- return ERR_PTR(result);
- }
-
- ++fmr->remap_count;
- fmr->ref_count = 1;
-
- if (pool->cache_bucket) {
- fmr->io_virtual_address = io_virtual_address;
- fmr->page_list_len = list_len;
- memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
-
- spin_lock_irqsave(&pool->pool_lock, flags);
- hlist_add_head(&fmr->cache_node,
- pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
- spin_unlock_irqrestore(&pool->pool_lock, flags);
- }
-
- return fmr;
-}
-EXPORT_SYMBOL(ib_fmr_pool_map_phys);
-
-/**
- * ib_fmr_pool_unmap - Unmap FMR
- * @fmr:FMR to unmap
- *
- * Unmap an FMR. The FMR mapping may remain valid until the FMR is
- * reused (or until ib_flush_fmr_pool() is called).
- */
-void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
-{
- struct ib_fmr_pool *pool;
- unsigned long flags;
-
- pool = fmr->pool;
-
- spin_lock_irqsave(&pool->pool_lock, flags);
-
- --fmr->ref_count;
- if (!fmr->ref_count) {
- if (fmr->remap_count < pool->max_remaps) {
- list_add_tail(&fmr->list, &pool->free_list);
- } else {
- list_add_tail(&fmr->list, &pool->dirty_list);
- if (++pool->dirty_len >= pool->dirty_watermark) {
- atomic_inc(&pool->req_ser);
- kthread_queue_work(pool->worker, &pool->work);
- }
- }
- }
-
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-}
-EXPORT_SYMBOL(ib_fmr_pool_unmap);
diff --git a/drivers/infiniband/core/lag.c b/drivers/infiniband/core/lag.c
new file mode 100644
index 000000000000..7063e41eaf26
--- /dev/null
+++ b/drivers/infiniband/core/lag.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_cache.h>
+#include <rdma/lag.h>
+
+static struct sk_buff *rdma_build_skb(struct ib_device *device,
+ struct net_device *netdev,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags)
+{
+ struct ipv6hdr *ip6h;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ u8 smac[ETH_ALEN];
+ bool is_ipv4;
+ int hdr_len;
+
+ is_ipv4 = ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw);
+ hdr_len = ETH_HLEN + sizeof(struct udphdr) + LL_RESERVED_SPACE(netdev);
+ hdr_len += is_ipv4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr);
+
+ skb = alloc_skb(hdr_len, flags);
+ if (!skb)
+ return NULL;
+
+ skb->dev = netdev;
+ skb_reserve(skb, hdr_len);
+ skb_push(skb, sizeof(struct udphdr));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+ uh->source =
+ htons(rdma_flow_label_to_udp_sport(ah_attr->grh.flow_label));
+ uh->dest = htons(ROCE_V2_UDP_DPORT);
+ uh->len = htons(sizeof(struct udphdr));
+
+ if (is_ipv4) {
+ skb_push(skb, sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+ iph = ip_hdr(skb);
+ iph->frag_off = 0;
+ iph->version = 4;
+ iph->protocol = IPPROTO_UDP;
+ iph->ihl = 0x5;
+ iph->tot_len = htons(sizeof(struct udphdr) + sizeof(struct
+ iphdr));
+ memcpy(&iph->saddr, ah_attr->grh.sgid_attr->gid.raw + 12,
+ sizeof(struct in_addr));
+ memcpy(&iph->daddr, ah_attr->grh.dgid.raw + 12,
+ sizeof(struct in_addr));
+ } else {
+ skb_push(skb, sizeof(struct ipv6hdr));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6h->version = 6;
+ ip6h->nexthdr = IPPROTO_UDP;
+ memcpy(&ip6h->flow_lbl, &ah_attr->grh.flow_label,
+ sizeof(*ip6h->flow_lbl));
+ memcpy(&ip6h->saddr, ah_attr->grh.sgid_attr->gid.raw,
+ sizeof(struct in6_addr));
+ memcpy(&ip6h->daddr, ah_attr->grh.dgid.raw,
+ sizeof(struct in6_addr));
+ }
+
+ skb_push(skb, sizeof(struct ethhdr));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->protocol = eth->h_proto = htons(is_ipv4 ? ETH_P_IP : ETH_P_IPV6);
+ rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr, NULL, smac);
+ memcpy(eth->h_source, smac, ETH_ALEN);
+ memcpy(eth->h_dest, ah_attr->roce.dmac, ETH_ALEN);
+
+ return skb;
+}
+
+static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device,
+ struct net_device *master,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags)
+{
+ struct net_device *slave;
+ struct sk_buff *skb;
+
+ skb = rdma_build_skb(device, master, ah_attr, flags);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ rcu_read_lock();
+ slave = netdev_get_xmit_slave(master, skb,
+ !!(device->lag_flags &
+ RDMA_LAG_FLAGS_HASH_ALL_SLAVES));
+ if (slave)
+ dev_hold(slave);
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return slave;
+}
+
+void rdma_lag_put_ah_roce_slave(struct net_device *xmit_slave)
+{
+ if (xmit_slave)
+ dev_put(xmit_slave);
+}
+
+struct net_device *rdma_lag_get_ah_roce_slave(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags)
+{
+ struct net_device *slave = NULL;
+ struct net_device *master;
+
+ if (!(ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE &&
+ ah_attr->grh.sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
+ ah_attr->grh.flow_label))
+ return NULL;
+
+ rcu_read_lock();
+ master = rdma_read_gid_attr_ndev_rcu(ah_attr->grh.sgid_attr);
+ if (IS_ERR(master)) {
+ rcu_read_unlock();
+ return master;
+ }
+ dev_hold(master);
+ rcu_read_unlock();
+
+ if (!netif_is_bond_master(master))
+ goto put;
+
+ slave = rdma_get_xmit_slave_udp(device, master, ah_attr, flags);
+put:
+ dev_put(master);
+ return slave;
+}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index c54db13fa9b0..186e0d652e8b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -85,7 +85,6 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
-/* Client ID 0 is used for snoop-only clients */
static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
static u32 ib_mad_client_next;
static struct list_head ib_mad_port_list;
@@ -483,141 +482,12 @@ error1:
}
EXPORT_SYMBOL(ib_register_mad_agent);
-static inline int is_snooping_sends(int mad_snoop_flags)
-{
- return (mad_snoop_flags &
- (/*IB_MAD_SNOOP_POSTED_SENDS |
- IB_MAD_SNOOP_RMPP_SENDS |*/
- IB_MAD_SNOOP_SEND_COMPLETIONS /*|
- IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
-}
-
-static inline int is_snooping_recvs(int mad_snoop_flags)
-{
- return (mad_snoop_flags &
- (IB_MAD_SNOOP_RECVS /*|
- IB_MAD_SNOOP_RMPP_RECVS*/));
-}
-
-static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
- struct ib_mad_snoop_private *mad_snoop_priv)
-{
- struct ib_mad_snoop_private **new_snoop_table;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- /* Check for empty slot in array. */
- for (i = 0; i < qp_info->snoop_table_size; i++)
- if (!qp_info->snoop_table[i])
- break;
-
- if (i == qp_info->snoop_table_size) {
- /* Grow table. */
- new_snoop_table = krealloc(qp_info->snoop_table,
- sizeof mad_snoop_priv *
- (qp_info->snoop_table_size + 1),
- GFP_ATOMIC);
- if (!new_snoop_table) {
- i = -ENOMEM;
- goto out;
- }
-
- qp_info->snoop_table = new_snoop_table;
- qp_info->snoop_table_size++;
- }
- qp_info->snoop_table[i] = mad_snoop_priv;
- atomic_inc(&qp_info->snoop_count);
-out:
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- return i;
-}
-
-struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
- u8 port_num,
- enum ib_qp_type qp_type,
- int mad_snoop_flags,
- ib_mad_snoop_handler snoop_handler,
- ib_mad_recv_handler recv_handler,
- void *context)
-{
- struct ib_mad_port_private *port_priv;
- struct ib_mad_agent *ret;
- struct ib_mad_snoop_private *mad_snoop_priv;
- int qpn;
- int err;
-
- /* Validate parameters */
- if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
- (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
- ret = ERR_PTR(-EINVAL);
- goto error1;
- }
- qpn = get_spl_qp_index(qp_type);
- if (qpn == -1) {
- ret = ERR_PTR(-EINVAL);
- goto error1;
- }
- port_priv = ib_get_mad_port(device, port_num);
- if (!port_priv) {
- ret = ERR_PTR(-ENODEV);
- goto error1;
- }
- /* Allocate structures */
- mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
- if (!mad_snoop_priv) {
- ret = ERR_PTR(-ENOMEM);
- goto error1;
- }
-
- /* Now, fill in the various structures */
- mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
- mad_snoop_priv->agent.device = device;
- mad_snoop_priv->agent.recv_handler = recv_handler;
- mad_snoop_priv->agent.snoop_handler = snoop_handler;
- mad_snoop_priv->agent.context = context;
- mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
- mad_snoop_priv->agent.port_num = port_num;
- mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
- init_completion(&mad_snoop_priv->comp);
-
- err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
- if (err) {
- ret = ERR_PTR(err);
- goto error2;
- }
-
- mad_snoop_priv->snoop_index = register_snoop_agent(
- &port_priv->qp_info[qpn],
- mad_snoop_priv);
- if (mad_snoop_priv->snoop_index < 0) {
- ret = ERR_PTR(mad_snoop_priv->snoop_index);
- goto error3;
- }
-
- atomic_set(&mad_snoop_priv->refcount, 1);
- return &mad_snoop_priv->agent;
-error3:
- ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
-error2:
- kfree(mad_snoop_priv);
-error1:
- return ret;
-}
-EXPORT_SYMBOL(ib_register_mad_snoop);
-
static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
if (atomic_dec_and_test(&mad_agent_priv->refcount))
complete(&mad_agent_priv->comp);
}
-static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
-{
- if (atomic_dec_and_test(&mad_snoop_priv->refcount))
- complete(&mad_snoop_priv->comp);
-}
-
static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
struct ib_mad_port_private *port_priv;
@@ -650,25 +520,6 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
kfree_rcu(mad_agent_priv, rcu);
}
-static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
-{
- struct ib_mad_qp_info *qp_info;
- unsigned long flags;
-
- qp_info = mad_snoop_priv->qp_info;
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
- atomic_dec(&qp_info->snoop_count);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
-
- deref_snoop_agent(mad_snoop_priv);
- wait_for_completion(&mad_snoop_priv->comp);
-
- ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
-
- kfree(mad_snoop_priv);
-}
-
/*
* ib_unregister_mad_agent - Unregisters a client from using MAD services
*
@@ -677,20 +528,11 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
{
struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_snoop_private *mad_snoop_priv;
-
- /* If the TID is zero, the agent can only snoop. */
- if (mad_agent->hi_tid) {
- mad_agent_priv = container_of(mad_agent,
- struct ib_mad_agent_private,
- agent);
- unregister_mad_agent(mad_agent_priv);
- } else {
- mad_snoop_priv = container_of(mad_agent,
- struct ib_mad_snoop_private,
- agent);
- unregister_mad_snoop(mad_snoop_priv);
- }
+
+ mad_agent_priv = container_of(mad_agent,
+ struct ib_mad_agent_private,
+ agent);
+ unregister_mad_agent(mad_agent_priv);
}
EXPORT_SYMBOL(ib_unregister_mad_agent);
@@ -706,57 +548,6 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list)
spin_unlock_irqrestore(&mad_queue->lock, flags);
}
-static void snoop_send(struct ib_mad_qp_info *qp_info,
- struct ib_mad_send_buf *send_buf,
- struct ib_mad_send_wc *mad_send_wc,
- int mad_snoop_flags)
-{
- struct ib_mad_snoop_private *mad_snoop_priv;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- for (i = 0; i < qp_info->snoop_table_size; i++) {
- mad_snoop_priv = qp_info->snoop_table[i];
- if (!mad_snoop_priv ||
- !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
- continue;
-
- atomic_inc(&mad_snoop_priv->refcount);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
- send_buf, mad_send_wc);
- deref_snoop_agent(mad_snoop_priv);
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- }
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
-}
-
-static void snoop_recv(struct ib_mad_qp_info *qp_info,
- struct ib_mad_recv_wc *mad_recv_wc,
- int mad_snoop_flags)
-{
- struct ib_mad_snoop_private *mad_snoop_priv;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- for (i = 0; i < qp_info->snoop_table_size; i++) {
- mad_snoop_priv = qp_info->snoop_table[i];
- if (!mad_snoop_priv ||
- !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
- continue;
-
- atomic_inc(&mad_snoop_priv->refcount);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
- mad_recv_wc);
- deref_snoop_agent(mad_snoop_priv);
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- }
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
-}
-
static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
u16 pkey_index, u8 port_num, struct ib_wc *wc)
{
@@ -2289,9 +2080,6 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
recv->header.recv_wc.recv_buf.grh = &recv->grh;
- if (atomic_read(&qp_info->snoop_count))
- snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
-
/* Validate MAD */
if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
goto out;
@@ -2538,9 +2326,6 @@ retry:
mad_send_wc.send_buf = &mad_send_wr->send_buf;
mad_send_wc.status = wc->status;
mad_send_wc.vendor_err = wc->vendor_err;
- if (atomic_read(&qp_info->snoop_count))
- snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
- IB_MAD_SNOOP_SEND_COMPLETIONS);
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
if (queued_send_wr) {
@@ -2782,10 +2567,6 @@ static void local_completions(struct work_struct *work)
local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
local->mad_priv->header.recv_wc.recv_buf.mad =
(struct ib_mad *)local->mad_priv->mad;
- if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
- snoop_recv(recv_mad_agent->qp_info,
- &local->mad_priv->header.recv_wc,
- IB_MAD_SNOOP_RECVS);
recv_mad_agent->agent.recv_handler(
&recv_mad_agent->agent,
&local->mad_send_wr->send_buf,
@@ -2800,10 +2581,6 @@ local_send_completion:
mad_send_wc.status = IB_WC_SUCCESS;
mad_send_wc.vendor_err = 0;
mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
- if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
- snoop_send(mad_agent_priv->qp_info,
- &local->mad_send_wr->send_buf,
- &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
&mad_send_wc);
@@ -3119,10 +2896,6 @@ static void init_mad_qp(struct ib_mad_port_private *port_priv,
init_mad_queue(qp_info, &qp_info->send_queue);
init_mad_queue(qp_info, &qp_info->recv_queue);
INIT_LIST_HEAD(&qp_info->overflow_list);
- spin_lock_init(&qp_info->snoop_lock);
- qp_info->snoop_table = NULL;
- qp_info->snoop_table_size = 0;
- atomic_set(&qp_info->snoop_count, 0);
}
static int create_mad_qp(struct ib_mad_qp_info *qp_info,
@@ -3166,7 +2939,6 @@ static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
return;
ib_destroy_qp(qp_info->qp);
- kfree(qp_info->snoop_table);
}
/*
@@ -3304,9 +3076,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
return 0;
}
-static void ib_mad_init_device(struct ib_device *device)
+static int ib_mad_init_device(struct ib_device *device)
{
int start, i;
+ unsigned int count = 0;
+ int ret;
start = rdma_start_port(device);
@@ -3314,17 +3088,23 @@ static void ib_mad_init_device(struct ib_device *device)
if (!rdma_cap_ib_mad(device, i))
continue;
- if (ib_mad_port_open(device, i)) {
+ ret = ib_mad_port_open(device, i);
+ if (ret) {
dev_err(&device->dev, "Couldn't open port %d\n", i);
goto error;
}
- if (ib_agent_port_open(device, i)) {
+ ret = ib_agent_port_open(device, i);
+ if (ret) {
dev_err(&device->dev,
"Couldn't open port %d for agents\n", i);
goto error_agent;
}
+ count++;
}
- return;
+ if (!count)
+ return -EOPNOTSUPP;
+
+ return 0;
error_agent:
if (ib_mad_port_close(device, i))
@@ -3341,6 +3121,7 @@ error:
if (ib_mad_port_close(device, i))
dev_err(&device->dev, "Couldn't close port %d\n", i);
}
+ return ret;
}
static void ib_mad_remove_device(struct ib_device *device, void *client_data)
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 9c2d8b7f1af9..740f03ecc05d 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -42,7 +42,7 @@
#include <rdma/ib_cache.h>
#include "sa.h"
-static void mcast_add_one(struct ib_device *device);
+static int mcast_add_one(struct ib_device *device);
static void mcast_remove_one(struct ib_device *device, void *client_data);
static struct ib_client mcast_client = {
@@ -815,7 +815,7 @@ static void mcast_event_handler(struct ib_event_handler *handler,
}
}
-static void mcast_add_one(struct ib_device *device)
+static int mcast_add_one(struct ib_device *device)
{
struct mcast_device *dev;
struct mcast_port *port;
@@ -825,7 +825,7 @@ static void mcast_add_one(struct ib_device *device)
dev = kmalloc(struct_size(dev, port, device->phys_port_cnt),
GFP_KERNEL);
if (!dev)
- return;
+ return -ENOMEM;
dev->start_port = rdma_start_port(device);
dev->end_port = rdma_end_port(device);
@@ -845,7 +845,7 @@ static void mcast_add_one(struct ib_device *device)
if (!count) {
kfree(dev);
- return;
+ return -EOPNOTSUPP;
}
dev->device = device;
@@ -853,6 +853,7 @@ static void mcast_add_one(struct ib_device *device)
INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
ib_register_event_handler(&dev->event_handler);
+ return 0;
}
static void mcast_remove_one(struct ib_device *device, void *client_data)
@@ -861,9 +862,6 @@ static void mcast_remove_one(struct ib_device *device, void *client_data)
struct mcast_port *port;
int i;
- if (!dev)
- return;
-
ib_unregister_event_handler(&dev->event_handler);
flush_workqueue(mcast_wq);
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index e0a5e897e4b1..38de4942c682 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -130,6 +130,17 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
lockdep_assert_held(&ufile->hw_destroy_rwsem);
assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
+ if (reason == RDMA_REMOVE_ABORT_HWOBJ) {
+ reason = RDMA_REMOVE_ABORT;
+ ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
+ attrs);
+ /*
+ * Drivers are not permitted to ignore RDMA_REMOVE_ABORT, see
+ * ib_is_destroy_retryable, cleanup_retryable == false here.
+ */
+ WARN_ON(ret);
+ }
+
if (reason == RDMA_REMOVE_ABORT) {
WARN_ON(!list_empty(&uobj->list));
WARN_ON(!uobj->context);
@@ -653,11 +664,15 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
* object and anything else connected to uobj before calling this.
*/
void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
- struct uverbs_attr_bundle *attrs)
+ struct uverbs_attr_bundle *attrs,
+ bool hw_obj_valid)
{
struct ib_uverbs_file *ufile = uobj->ufile;
- uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
+ uverbs_destroy_uobject(uobj,
+ hw_obj_valid ? RDMA_REMOVE_ABORT_HWOBJ :
+ RDMA_REMOVE_ABORT,
+ attrs);
/* Matches the down_read in rdma_alloc_begin_uobject */
up_read(&ufile->hw_destroy_rwsem);
@@ -927,8 +942,8 @@ uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
}
void uverbs_finalize_object(struct ib_uobject *uobj,
- enum uverbs_obj_access access, bool commit,
- struct uverbs_attr_bundle *attrs)
+ enum uverbs_obj_access access, bool hw_obj_valid,
+ bool commit, struct uverbs_attr_bundle *attrs)
{
/*
* refcounts should be handled at the object level and not at the
@@ -951,7 +966,7 @@ void uverbs_finalize_object(struct ib_uobject *uobj,
if (commit)
rdma_alloc_commit_uobject(uobj, attrs);
else
- rdma_alloc_abort_uobject(uobj, attrs);
+ rdma_alloc_abort_uobject(uobj, attrs, hw_obj_valid);
break;
default:
WARN_ON(true);
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index 33978e0f1262..33706dad6c0f 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -64,8 +64,8 @@ uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
s64 id, struct uverbs_attr_bundle *attrs);
void uverbs_finalize_object(struct ib_uobject *uobj,
- enum uverbs_obj_access access, bool commit,
- struct uverbs_attr_bundle *attrs);
+ enum uverbs_obj_access access, bool hw_obj_valid,
+ bool commit, struct uverbs_attr_bundle *attrs);
int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
@@ -159,6 +159,9 @@ extern const struct uapi_definition uverbs_def_obj_dm[];
extern const struct uapi_definition uverbs_def_obj_flow_action[];
extern const struct uapi_definition uverbs_def_obj_intf[];
extern const struct uapi_definition uverbs_def_obj_mr[];
+extern const struct uapi_definition uverbs_def_obj_qp[];
+extern const struct uapi_definition uverbs_def_obj_srq[];
+extern const struct uapi_definition uverbs_def_obj_wq[];
extern const struct uapi_definition uverbs_def_write_intf[];
static inline const struct uverbs_api_write_method *
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 557efbf29197..614cff89fc71 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -129,7 +129,7 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
qp->integrity_en);
int i, j, ret = 0, count = 0;
- ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr;
+ ctx->nr_ops = DIV_ROUND_UP(sg_cnt, pages_per_mr);
ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
if (!ctx->reg) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 74e0058fcf9e..a2ed09a3c714 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -174,7 +174,7 @@ static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
};
-static void ib_sa_add_one(struct ib_device *device);
+static int ib_sa_add_one(struct ib_device *device);
static void ib_sa_remove_one(struct ib_device *device, void *client_data);
static struct ib_client sa_client = {
@@ -190,7 +190,7 @@ static u32 tid;
#define PATH_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct sa_path_rec, field), \
- .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \
+ .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \
.field_name = "sa_path_rec:" #field
static const struct ib_field path_rec_table[] = {
@@ -292,7 +292,7 @@ static const struct ib_field path_rec_table[] = {
.struct_offset_bytes = \
offsetof(struct sa_path_rec, field), \
.struct_size_bytes = \
- sizeof((struct sa_path_rec *)0)->field, \
+ sizeof_field(struct sa_path_rec, field), \
.field_name = "sa_path_rec:" #field
static const struct ib_field opa_path_rec_table[] = {
@@ -420,7 +420,7 @@ static const struct ib_field opa_path_rec_table[] = {
#define MCMEMBER_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
- .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \
.field_name = "sa_mcmember_rec:" #field
static const struct ib_field mcmember_rec_table[] = {
@@ -504,7 +504,7 @@ static const struct ib_field mcmember_rec_table[] = {
#define SERVICE_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
- .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_sa_service_rec, field), \
.field_name = "sa_service_rec:" #field
static const struct ib_field service_rec_table[] = {
@@ -552,7 +552,7 @@ static const struct ib_field service_rec_table[] = {
#define CLASSPORTINFO_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
- .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \
.field_name = "ib_class_port_info:" #field
static const struct ib_field ib_classport_info_rec_table[] = {
@@ -630,7 +630,7 @@ static const struct ib_field ib_classport_info_rec_table[] = {
.struct_offset_bytes =\
offsetof(struct opa_class_port_info, field), \
.struct_size_bytes = \
- sizeof((struct opa_class_port_info *)0)->field, \
+ sizeof_field(struct opa_class_port_info, field), \
.field_name = "opa_class_port_info:" #field
static const struct ib_field opa_classport_info_rec_table[] = {
@@ -710,7 +710,7 @@ static const struct ib_field opa_classport_info_rec_table[] = {
#define GUIDINFO_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
- .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \
.field_name = "sa_guidinfo_rec:" #field
static const struct ib_field guidinfo_rec_table[] = {
@@ -1412,17 +1412,13 @@ void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
EXPORT_SYMBOL(ib_sa_pack_path);
static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
- struct ib_device *device,
+ struct ib_sa_device *sa_dev,
u8 port_num)
{
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
struct ib_sa_port *port;
unsigned long flags;
bool ret = false;
- if (!sa_dev)
- return ret;
-
port = &sa_dev->port[port_num - sa_dev->start_port];
spin_lock_irqsave(&port->classport_lock, flags);
if (!port->classport_info.valid)
@@ -1450,8 +1446,8 @@ enum opa_pr_supported {
* query is possible.
*/
static int opa_pr_query_possible(struct ib_sa_client *client,
- struct ib_device *device,
- u8 port_num,
+ struct ib_sa_device *sa_dev,
+ struct ib_device *device, u8 port_num,
struct sa_path_rec *rec)
{
struct ib_port_attr port_attr;
@@ -1459,7 +1455,7 @@ static int opa_pr_query_possible(struct ib_sa_client *client,
if (ib_query_port(device, port_num, &port_attr))
return PR_NOT_SUPPORTED;
- if (ib_sa_opa_pathrecord_support(client, device, port_num))
+ if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num))
return PR_OPA_SUPPORTED;
if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
@@ -1574,7 +1570,8 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
query->sa_query.port = port;
if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
- status = opa_pr_query_possible(client, device, port_num, rec);
+ status = opa_pr_query_possible(client, sa_dev, device, port_num,
+ rec);
if (status == PR_NOT_SUPPORTED) {
ret = -EINVAL;
goto err1;
@@ -2325,18 +2322,19 @@ static void ib_sa_event(struct ib_event_handler *handler,
}
}
-static void ib_sa_add_one(struct ib_device *device)
+static int ib_sa_add_one(struct ib_device *device)
{
struct ib_sa_device *sa_dev;
int s, e, i;
int count = 0;
+ int ret;
s = rdma_start_port(device);
e = rdma_end_port(device);
sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
if (!sa_dev)
- return;
+ return -ENOMEM;
sa_dev->start_port = s;
sa_dev->end_port = e;
@@ -2356,8 +2354,10 @@ static void ib_sa_add_one(struct ib_device *device)
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
NULL, 0, send_handler,
recv_handler, sa_dev, 0);
- if (IS_ERR(sa_dev->port[i].agent))
+ if (IS_ERR(sa_dev->port[i].agent)) {
+ ret = PTR_ERR(sa_dev->port[i].agent);
goto err;
+ }
INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
@@ -2366,8 +2366,10 @@ static void ib_sa_add_one(struct ib_device *device)
count++;
}
- if (!count)
+ if (!count) {
+ ret = -EOPNOTSUPP;
goto free;
+ }
ib_set_client_data(device, &sa_client, sa_dev);
@@ -2386,7 +2388,7 @@ static void ib_sa_add_one(struct ib_device *device)
update_sm_ah(&sa_dev->port[i].update_task);
}
- return;
+ return 0;
err:
while (--i >= 0) {
@@ -2395,7 +2397,7 @@ err:
}
free:
kfree(sa_dev);
- return;
+ return ret;
}
static void ib_sa_remove_one(struct ib_device *device, void *client_data)
@@ -2403,9 +2405,6 @@ static void ib_sa_remove_one(struct ib_device *device, void *client_data)
struct ib_sa_device *sa_dev = client_data;
int i;
- if (!sa_dev)
- return;
-
ib_unregister_event_handler(&sa_dev->event_handler);
flush_workqueue(ib_wq);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 087682e6969e..defe9cd4c5ee 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1058,8 +1058,7 @@ static int add_port(struct ib_core_device *coredev, int port_num)
coredev->ports_kobj,
"%d", port_num);
if (ret) {
- kfree(p);
- return ret;
+ goto err_put;
}
p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL);
@@ -1072,8 +1071,7 @@ static int add_port(struct ib_core_device *coredev, int port_num)
ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type,
&p->kobj, "gid_attrs");
if (ret) {
- kfree(p->gid_attr_group);
- goto err_put;
+ goto err_put_gid_attrs;
}
if (device->ops.process_mad && is_full_dev) {
@@ -1404,8 +1402,10 @@ int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
ret = kobject_init_and_add(kobj, ktype, &port->kobj, "%s",
name);
- if (ret)
+ if (ret) {
+ kobject_put(kobj);
return ret;
+ }
}
return 0;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 16b6cf57fa85..5b87eee8ccc8 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -52,6 +52,7 @@
#include <rdma/rdma_cm_ib.h>
#include <rdma/ib_addr.h>
#include <rdma/ib.h>
+#include <rdma/ib_cm.h>
#include <rdma/rdma_netlink.h>
#include "core_priv.h"
@@ -360,6 +361,9 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
ucma_copy_conn_event(&uevent->resp.param.conn,
&event->param.conn);
+ uevent->resp.ece.vendor_id = event->ece.vendor_id;
+ uevent->resp.ece.attr_mod = event->ece.attr_mod;
+
if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
if (!ctx->backlog) {
ret = -ENOMEM;
@@ -404,7 +408,8 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
* Old 32 bit user space does not send the 4 byte padding in the
* reserved field. We don't care, allow it to keep working.
*/
- if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved))
+ if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved) -
+ sizeof(uevent->resp.ece))
return -ENOSPC;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
@@ -845,7 +850,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
struct sockaddr *addr;
int ret = 0;
- if (out_len < sizeof(resp))
+ if (out_len < offsetof(struct rdma_ucm_query_route_resp, ibdev_index))
return -ENOSPC;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
@@ -869,6 +874,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
goto out;
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
+ resp.ibdev_index = ctx->cm_id->device->index;
resp.port_num = ctx->cm_id->port_num;
if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
@@ -880,8 +886,8 @@ static ssize_t ucma_query_route(struct ucma_file *file,
out:
mutex_unlock(&ctx->mutex);
- if (copy_to_user(u64_to_user_ptr(cmd.response),
- &resp, sizeof(resp)))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp,
+ min_t(size_t, out_len, sizeof(resp))))
ret = -EFAULT;
ucma_put_ctx(ctx);
@@ -895,6 +901,7 @@ static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
return;
resp->node_guid = (__force __u64) cm_id->device->node_guid;
+ resp->ibdev_index = cm_id->device->index;
resp->port_num = cm_id->port_num;
resp->pkey = (__force __u16) cpu_to_be16(
ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
@@ -907,7 +914,7 @@ static ssize_t ucma_query_addr(struct ucma_context *ctx,
struct sockaddr *addr;
int ret = 0;
- if (out_len < sizeof(resp))
+ if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
return -ENOSPC;
memset(&resp, 0, sizeof resp);
@@ -922,7 +929,7 @@ static ssize_t ucma_query_addr(struct ucma_context *ctx,
ucma_query_device_addr(ctx->cm_id, &resp);
- if (copy_to_user(response, &resp, sizeof(resp)))
+ if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
ret = -EFAULT;
return ret;
@@ -974,7 +981,7 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
struct sockaddr_ib *addr;
int ret = 0;
- if (out_len < sizeof(resp))
+ if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
return -ENOSPC;
memset(&resp, 0, sizeof resp);
@@ -1007,7 +1014,7 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
&ctx->cm_id->route.addr.dst_addr);
}
- if (copy_to_user(response, &resp, sizeof(resp)))
+ if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
ret = -EFAULT;
return ret;
@@ -1070,12 +1077,15 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id,
static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{
- struct rdma_ucm_connect cmd;
struct rdma_conn_param conn_param;
+ struct rdma_ucm_ece ece = {};
+ struct rdma_ucm_connect cmd;
struct ucma_context *ctx;
+ size_t in_size;
int ret;
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ in_size = min_t(size_t, in_len, sizeof(cmd));
+ if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
if (!cmd.conn_param.valid)
@@ -1086,8 +1096,13 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
return PTR_ERR(ctx);
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
+ if (offsetofend(typeof(cmd), ece) <= in_size) {
+ ece.vendor_id = cmd.ece.vendor_id;
+ ece.attr_mod = cmd.ece.attr_mod;
+ }
+
mutex_lock(&ctx->mutex);
- ret = rdma_connect(ctx->cm_id, &conn_param);
+ ret = rdma_connect_ece(ctx->cm_id, &conn_param, &ece);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
@@ -1121,28 +1136,36 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
{
struct rdma_ucm_accept cmd;
struct rdma_conn_param conn_param;
+ struct rdma_ucm_ece ece = {};
struct ucma_context *ctx;
+ size_t in_size;
int ret;
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ in_size = min_t(size_t, in_len, sizeof(cmd));
+ if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ if (offsetofend(typeof(cmd), ece) <= in_size) {
+ ece.vendor_id = cmd.ece.vendor_id;
+ ece.attr_mod = cmd.ece.attr_mod;
+ }
+
if (cmd.conn_param.valid) {
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
mutex_lock(&file->mut);
mutex_lock(&ctx->mutex);
- ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
+ ret = __rdma_accept_ece(ctx->cm_id, &conn_param, NULL, &ece);
mutex_unlock(&ctx->mutex);
if (!ret)
ctx->uid = cmd.uid;
mutex_unlock(&file->mut);
} else {
mutex_lock(&ctx->mutex);
- ret = __rdma_accept(ctx->cm_id, NULL, NULL);
+ ret = __rdma_accept_ece(ctx->cm_id, NULL, NULL, &ece);
mutex_unlock(&ctx->mutex);
}
ucma_put_ctx(ctx);
@@ -1159,12 +1182,24 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (!cmd.reason)
+ cmd.reason = IB_CM_REJ_CONSUMER_DEFINED;
+
+ switch (cmd.reason) {
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ case IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
- ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
+ ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len,
+ cmd.reason);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 29a45d2f8898..d65d541b9a25 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -41,7 +41,7 @@
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
- .struct_size_bytes = sizeof ((struct ib_unpacked_ ## header *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_unpacked_ ## header, field), \
.field_name = #header ":" #field
static const struct ib_field lrh_table[] = {
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 3b1e627d9a8d..ccd28405451c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -429,7 +429,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
PAGE_SIZE / sizeof(struct page *));
- down_read(&owning_mm->mmap_sem);
+ mmap_read_lock(owning_mm);
/*
* Note: this might result in redundent page getting. We can
* avoid this by checking dma_list to be 0 before calling
@@ -440,7 +440,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages,
flags, local_page_list, NULL, NULL);
- up_read(&owning_mm->mmap_sem);
+ mmap_read_unlock(owning_mm);
if (npages < 0) {
if (npages != -EAGAIN)
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index da229eab5903..b0d0b522cc76 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -142,7 +142,7 @@ static dev_t dynamic_issm_dev;
static DEFINE_IDA(umad_ida);
-static void ib_umad_add_one(struct ib_device *device);
+static int ib_umad_add_one(struct ib_device *device);
static void ib_umad_remove_one(struct ib_device *device, void *client_data);
static void ib_umad_dev_free(struct kref *kref)
@@ -1352,37 +1352,41 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
put_device(&port->dev);
}
-static void ib_umad_add_one(struct ib_device *device)
+static int ib_umad_add_one(struct ib_device *device)
{
struct ib_umad_device *umad_dev;
int s, e, i;
int count = 0;
+ int ret;
s = rdma_start_port(device);
e = rdma_end_port(device);
umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
if (!umad_dev)
- return;
+ return -ENOMEM;
kref_init(&umad_dev->kref);
for (i = s; i <= e; ++i) {
if (!rdma_cap_ib_mad(device, i))
continue;
- if (ib_umad_init_port(device, i, umad_dev,
- &umad_dev->ports[i - s]))
+ ret = ib_umad_init_port(device, i, umad_dev,
+ &umad_dev->ports[i - s]);
+ if (ret)
goto err;
count++;
}
- if (!count)
+ if (!count) {
+ ret = -EOPNOTSUPP;
goto free;
+ }
ib_set_client_data(device, &umad_client, umad_dev);
- return;
+ return 0;
err:
while (--i >= s) {
@@ -1394,6 +1398,7 @@ err:
free:
/* balances kref_init */
ib_umad_dev_put(umad_dev);
+ return ret;
}
static void ib_umad_remove_one(struct ib_device *device, void *client_data)
@@ -1401,9 +1406,6 @@ static void ib_umad_remove_one(struct ib_device *device, void *client_data)
struct ib_umad_device *umad_dev = client_data;
unsigned int i;
- if (!umad_dev)
- return;
-
rdma_for_each_port (device, i) {
if (rdma_cap_ib_mad(device, i))
ib_umad_kill_port(
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 3d189c7ee59e..53a10479958b 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -142,7 +142,7 @@ struct ib_uverbs_file {
* ucontext_lock held
*/
struct ib_ucontext *ucontext;
- struct ib_uverbs_async_event_file *async_file;
+ struct ib_uverbs_async_event_file *default_async_file;
struct list_head list;
/*
@@ -180,6 +180,7 @@ struct ib_uverbs_mcast_entry {
struct ib_uevent_object {
struct ib_uobject uobject;
+ struct ib_uverbs_async_event_file *event_file;
/* List member for ib_uverbs_async_event_file list */
struct list_head event_list;
u32 events_reported;
@@ -296,6 +297,24 @@ static inline u32 make_port_cap_flags(const struct ib_port_attr *attr)
return res;
}
+static inline struct ib_uverbs_async_event_file *
+ib_uverbs_get_async_event(struct uverbs_attr_bundle *attrs,
+ u16 id)
+{
+ struct ib_uobject *async_ev_file_uobj;
+ struct ib_uverbs_async_event_file *async_ev_file;
+
+ async_ev_file_uobj = uverbs_attr_get_uobject(attrs, id);
+ if (IS_ERR(async_ev_file_uobj))
+ async_ev_file = READ_ONCE(attrs->ufile->default_async_file);
+ else
+ async_ev_file = container_of(async_ev_file_uobj,
+ struct ib_uverbs_async_event_file,
+ uobj);
+ if (async_ev_file)
+ uverbs_uobject_get(&async_ev_file->uobj);
+ return async_ev_file;
+}
void copy_port_attr_to_resp(struct ib_port_attr *attr,
struct ib_uverbs_query_port_resp *resp,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 060b4ebbd2ba..b48b3f6e632d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -311,7 +311,7 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
return 0;
err_uobj:
- rdma_alloc_abort_uobject(uobj, attrs);
+ rdma_alloc_abort_uobject(uobj, attrs, false);
err_ucontext:
kfree(attrs->context);
attrs->context = NULL;
@@ -356,8 +356,6 @@ static void copy_query_dev_fields(struct ib_ucontext *ucontext,
resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
resp->max_ah = attr->max_ah;
- resp->max_fmr = attr->max_fmr;
- resp->max_map_per_fmr = attr->max_map_per_fmr;
resp->max_srq = attr->max_srq;
resp->max_srq_wr = attr->max_srq_wr;
resp->max_srq_sge = attr->max_srq_sge;
@@ -1051,6 +1049,10 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
goto err_free;
obj->uevent.uobject.object = cq;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
+
memset(&resp, 0, sizeof resp);
resp.base.cq_handle = obj->uevent.uobject.id;
resp.base.cqe = cq->cqe;
@@ -1067,6 +1069,8 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
return obj;
err_cb:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
cq = NULL;
err_free:
@@ -1460,6 +1464,9 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
}
obj->uevent.uobject.object = qp;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
memset(&resp, 0, sizeof resp);
resp.base.qpn = qp->qp_num;
@@ -1473,7 +1480,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
ret = uverbs_response(attrs, &resp, sizeof(resp));
if (ret)
- goto err_cb;
+ goto err_uevent;
if (xrcd) {
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
@@ -1498,6 +1505,9 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
return 0;
+err_uevent:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
err_cb:
ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
@@ -2954,11 +2964,11 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
wq_init_attr.cq = cq;
wq_init_attr.max_sge = cmd.max_sge;
wq_init_attr.max_wr = cmd.max_wr;
- wq_init_attr.wq_context = attrs->ufile;
wq_init_attr.wq_type = cmd.wq_type;
wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
wq_init_attr.create_flags = cmd.create_flags;
INIT_LIST_HEAD(&obj->uevent.event_list);
+ obj->uevent.uobject.user_handle = cmd.user_handle;
wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
if (IS_ERR(wq)) {
@@ -2972,12 +2982,12 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
wq->cq = cq;
wq->pd = pd;
wq->device = pd->device;
- wq->wq_context = wq_init_attr.wq_context;
atomic_set(&wq->usecnt, 0);
atomic_inc(&pd->usecnt);
atomic_inc(&cq->usecnt);
- wq->uobject = obj;
- obj->uevent.uobject.object = wq;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
memset(&resp, 0, sizeof(resp));
resp.wq_handle = obj->uevent.uobject.id;
@@ -2996,6 +3006,8 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
return 0;
err_copy:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs));
err_put_cq:
rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
@@ -3441,46 +3453,25 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
}
attr.event_handler = ib_uverbs_srq_event_handler;
- attr.srq_context = attrs->ufile;
attr.srq_type = cmd->srq_type;
attr.attr.max_wr = cmd->max_wr;
attr.attr.max_sge = cmd->max_sge;
attr.attr.srq_limit = cmd->srq_limit;
INIT_LIST_HEAD(&obj->uevent.event_list);
+ obj->uevent.uobject.user_handle = cmd->user_handle;
- srq = rdma_zalloc_drv_obj(ib_dev, ib_srq);
- if (!srq) {
- ret = -ENOMEM;
- goto err_put;
- }
-
- srq->device = pd->device;
- srq->pd = pd;
- srq->srq_type = cmd->srq_type;
- srq->uobject = obj;
- srq->event_handler = attr.event_handler;
- srq->srq_context = attr.srq_context;
-
- ret = pd->device->ops.create_srq(srq, &attr, udata);
- if (ret)
- goto err_free;
-
- if (ib_srq_has_cq(cmd->srq_type)) {
- srq->ext.cq = attr.ext.cq;
- atomic_inc(&attr.ext.cq->usecnt);
- }
-
- if (cmd->srq_type == IB_SRQT_XRC) {
- srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
- atomic_inc(&attr.ext.xrc.xrcd->usecnt);
+ srq = ib_create_srq_user(pd, &attr, obj, udata);
+ if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
+ goto err_put_pd;
}
- atomic_inc(&pd->usecnt);
- atomic_set(&srq->usecnt, 0);
-
obj->uevent.uobject.object = srq;
obj->uevent.uobject.user_handle = cmd->user_handle;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
memset(&resp, 0, sizeof resp);
resp.srq_handle = obj->uevent.uobject.id;
@@ -3505,14 +3496,11 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
return 0;
err_copy:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
- /* It was released in ib_destroy_srq_user */
- srq = NULL;
-err_free:
- kfree(srq);
-err_put:
+err_put_pd:
uobj_put_obj_read(pd);
-
err_put_cq:
if (ib_srq_has_cq(cmd->srq_type))
rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
@@ -3751,7 +3739,7 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
#define UAPI_DEF_WRITE_IO(req, resp) \
.write.has_resp = 1 + \
BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) + \
- BUILD_BUG_ON_ZERO(sizeof(((req *)0)->response) != \
+ BUILD_BUG_ON_ZERO(sizeof_field(req, response) != \
sizeof(u64)), \
.write.req_size = sizeof(req), .write.resp_size = sizeof(resp)
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 538affbc517e..2d882c02387c 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -58,6 +58,7 @@ struct bundle_priv {
DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN);
DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN);
+ DECLARE_BITMAP(uobj_hw_obj_valid, UVERBS_API_ATTR_BKEY_LEN);
/*
* Must be last. bundle ends in a flex array which overlaps
@@ -136,7 +137,7 @@ EXPORT_SYMBOL(_uverbs_alloc);
static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
u16 len)
{
- if (uattr->len > sizeof(((struct ib_uverbs_attr *)0)->data))
+ if (uattr->len > sizeof_field(struct ib_uverbs_attr, data))
return ib_is_buffer_cleared(u64_to_user_ptr(uattr->data) + len,
uattr->len - len);
@@ -230,7 +231,8 @@ static void uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
for (i = 0; i != attr->len; i++)
uverbs_finalize_object(attr->uobjects[i],
- spec->u2.objs_arr.access, commit, attrs);
+ spec->u2.objs_arr.access, false, commit,
+ attrs);
}
static int uverbs_process_attr(struct bundle_priv *pbundle,
@@ -502,7 +504,9 @@ static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
uverbs_finalize_object(
attr->obj_attr.uobject,
- attr->obj_attr.attr_elm->spec.u.obj.access, commit,
+ attr->obj_attr.attr_elm->spec.u.obj.access,
+ test_bit(i, pbundle->uobj_hw_obj_valid),
+ commit,
&pbundle->bundle);
}
@@ -590,6 +594,8 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
sizeof(pbundle->bundle.attr_present));
memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize));
+ memset(pbundle->uobj_hw_obj_valid, 0,
+ sizeof(pbundle->uobj_hw_obj_valid));
ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
bundle_destroy(pbundle, ret == 0);
@@ -784,3 +790,15 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
}
return uverbs_copy_to(bundle, idx, from, size);
}
+
+/* Once called an abort will call through to the type's destroy_hw() */
+void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle,
+ u16 idx)
+{
+ struct bundle_priv *pbundle =
+ container_of(bundle, struct bundle_priv, bundle);
+
+ __set_bit(uapi_bkey_attr(uapi_key_attr(idx)),
+ pbundle->uobj_hw_obj_valid);
+}
+EXPORT_SYMBOL(uverbs_finalize_uobj_create);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 1bab8de14757..69e4755cc04b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -75,7 +75,7 @@ static dev_t dynamic_uverbs_dev;
static struct class *uverbs_class;
static DEFINE_IDA(uverbs_ida);
-static void ib_uverbs_add_one(struct ib_device *device);
+static int ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
/*
@@ -146,8 +146,7 @@ void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
{
- struct ib_uverbs_async_event_file *async_file =
- READ_ONCE(uobj->uobject.ufile->async_file);
+ struct ib_uverbs_async_event_file *async_file = uobj->event_file;
struct ib_uverbs_event *evt, *tmp;
if (!async_file)
@@ -159,6 +158,7 @@ void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
kfree(evt);
}
spin_unlock_irq(&async_file->ev_queue.lock);
+ uverbs_uobject_put(&async_file->uobj);
}
void ib_uverbs_detach_umcast(struct ib_qp *qp,
@@ -197,8 +197,8 @@ void ib_uverbs_release_file(struct kref *ref)
if (atomic_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device);
- if (file->async_file)
- uverbs_uobject_put(&file->async_file->uobj);
+ if (file->default_async_file)
+ uverbs_uobject_put(&file->default_async_file->uobj);
put_device(&file->device->dev);
if (file->disassociate_page)
@@ -296,6 +296,8 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
spin_lock_irq(&ev_queue->lock);
if (!list_empty(&ev_queue->event_list))
pollflags = EPOLLIN | EPOLLRDNORM;
+ else if (ev_queue->is_closed)
+ pollflags = EPOLLERR;
spin_unlock_irq(&ev_queue->lock);
return pollflags;
@@ -425,7 +427,7 @@ void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
static void uverbs_uobj_event(struct ib_uevent_object *eobj,
struct ib_event *event)
{
- ib_uverbs_async_handler(READ_ONCE(eobj->uobject.ufile->async_file),
+ ib_uverbs_async_handler(eobj->event_file,
eobj->uobject.user_handle, event->event,
&eobj->event_list, &eobj->events_reported);
}
@@ -482,10 +484,10 @@ void ib_uverbs_init_async_event_file(
/* The first async_event_file becomes the default one for the file. */
mutex_lock(&uverbs_file->ucontext_lock);
- if (!uverbs_file->async_file) {
+ if (!uverbs_file->default_async_file) {
/* Pairs with the put in ib_uverbs_release_file */
uverbs_uobject_get(&async_file->uobj);
- smp_store_release(&uverbs_file->async_file, async_file);
+ smp_store_release(&uverbs_file->default_async_file, async_file);
}
mutex_unlock(&uverbs_file->ucontext_lock);
@@ -833,12 +835,12 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
return;
/*
- * The umap_lock is nested under mmap_sem since it used within
+ * The umap_lock is nested under mmap_lock since it used within
* the vma_ops callbacks, so we have to clean the list one mm
* at a time to get the lock ordering right. Typically there
* will only be one mm, so no big deal.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (!mmget_still_valid(mm))
goto skip_mm;
mutex_lock(&ufile->umap_lock);
@@ -860,7 +862,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
}
mutex_unlock(&ufile->umap_lock);
skip_mm:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
}
@@ -1092,7 +1094,7 @@ static int ib_uverbs_create_uapi(struct ib_device *device,
return 0;
}
-static void ib_uverbs_add_one(struct ib_device *device)
+static int ib_uverbs_add_one(struct ib_device *device)
{
int devnum;
dev_t base;
@@ -1100,16 +1102,16 @@ static void ib_uverbs_add_one(struct ib_device *device)
int ret;
if (!device->ops.alloc_ucontext)
- return;
+ return -EOPNOTSUPP;
uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
if (!uverbs_dev)
- return;
+ return -ENOMEM;
ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
if (ret) {
kfree(uverbs_dev);
- return;
+ return -ENOMEM;
}
device_initialize(&uverbs_dev->dev);
@@ -1129,15 +1131,18 @@ static void ib_uverbs_add_one(struct ib_device *device)
devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
GFP_KERNEL);
- if (devnum < 0)
+ if (devnum < 0) {
+ ret = -ENOMEM;
goto err;
+ }
uverbs_dev->devnum = devnum;
if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
else
base = IB_UVERBS_BASE_DEV + devnum;
- if (ib_uverbs_create_uapi(device, uverbs_dev))
+ ret = ib_uverbs_create_uapi(device, uverbs_dev);
+ if (ret)
goto err_uapi;
uverbs_dev->dev.devt = base;
@@ -1152,7 +1157,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
goto err_uapi;
ib_set_client_data(device, &uverbs_client, uverbs_dev);
- return;
+ return 0;
err_uapi:
ida_free(&uverbs_ida, devnum);
@@ -1161,7 +1166,7 @@ err:
ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
put_device(&uverbs_dev->dev);
- return;
+ return ret;
}
static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
@@ -1201,9 +1206,6 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
struct ib_uverbs_device *uverbs_dev = client_data;
int wait_clients = 1;
- if (!uverbs_dev)
- return;
-
cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
ida_free(&uverbs_ida, uverbs_dev->devnum);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 3abfc63225cb..08c39cfb1bd9 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -75,40 +75,6 @@ static int uverbs_free_mw(struct ib_uobject *uobject,
return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
}
-static int uverbs_free_qp(struct ib_uobject *uobject,
- enum rdma_remove_reason why,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_qp *qp = uobject->object;
- struct ib_uqp_object *uqp =
- container_of(uobject, struct ib_uqp_object, uevent.uobject);
- int ret;
-
- /*
- * If this is a user triggered destroy then do not allow destruction
- * until the user cleans up all the mcast bindings. Unlike in other
- * places we forcibly clean up the mcast attachments for !DESTROY
- * because the mcast attaches are not ubojects and will not be
- * destroyed by anything else during cleanup processing.
- */
- if (why == RDMA_REMOVE_DESTROY) {
- if (!list_empty(&uqp->mcast_list))
- return -EBUSY;
- } else if (qp == qp->real_qp) {
- ib_uverbs_detach_umcast(qp, uqp);
- }
-
- ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
- return ret;
-
- if (uqp->uxrcd)
- atomic_dec(&uqp->uxrcd->refcnt);
-
- ib_uverbs_release_uevent(&uqp->uevent);
- return ret;
-}
-
static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs)
@@ -125,48 +91,6 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
return ret;
}
-static int uverbs_free_wq(struct ib_uobject *uobject,
- enum rdma_remove_reason why,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_wq *wq = uobject->object;
- struct ib_uwq_object *uwq =
- container_of(uobject, struct ib_uwq_object, uevent.uobject);
- int ret;
-
- ret = ib_destroy_wq(wq, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
- return ret;
-
- ib_uverbs_release_uevent(&uwq->uevent);
- return ret;
-}
-
-static int uverbs_free_srq(struct ib_uobject *uobject,
- enum rdma_remove_reason why,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_srq *srq = uobject->object;
- struct ib_uevent_object *uevent =
- container_of(uobject, struct ib_uevent_object, uobject);
- enum ib_srq_type srq_type = srq->srq_type;
- int ret;
-
- ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
- return ret;
-
- if (srq_type == IB_SRQT_XRC) {
- struct ib_usrq_object *us =
- container_of(uevent, struct ib_usrq_object, uevent);
-
- atomic_dec(&us->uxrcd->refcnt);
- }
-
- ib_uverbs_release_uevent(uevent);
- return ret;
-}
-
static int uverbs_free_xrcd(struct ib_uobject *uobject,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs)
@@ -252,10 +176,6 @@ DECLARE_UVERBS_NAMED_OBJECT(
"[infinibandevent]",
O_RDONLY));
-DECLARE_UVERBS_NAMED_OBJECT(
- UVERBS_OBJECT_QP,
- UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp));
-
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_MW_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MW_HANDLE,
@@ -267,11 +187,6 @@ DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW,
UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw),
&UVERBS_METHOD(UVERBS_METHOD_MW_DESTROY));
-DECLARE_UVERBS_NAMED_OBJECT(
- UVERBS_OBJECT_SRQ,
- UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
- uverbs_free_srq));
-
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_AH_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_AH_HANDLE,
@@ -296,10 +211,6 @@ DECLARE_UVERBS_NAMED_OBJECT(
uverbs_free_flow),
&UVERBS_METHOD(UVERBS_METHOD_FLOW_DESTROY));
-DECLARE_UVERBS_NAMED_OBJECT(
- UVERBS_OBJECT_WQ,
- UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq));
-
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_RWQ_IND_TBL_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_RWQ_IND_TBL_HANDLE,
@@ -340,18 +251,12 @@ const struct uapi_definition uverbs_def_obj_intf[] = {
UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_COMP_CHANNEL,
UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)),
- UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_QP,
- UAPI_DEF_OBJ_NEEDS_FN(destroy_qp)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_AH,
UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MW,
UAPI_DEF_OBJ_NEEDS_FN(dealloc_mw)),
- UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_SRQ,
- UAPI_DEF_OBJ_NEEDS_FN(destroy_srq)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_FLOW,
UAPI_DEF_OBJ_NEEDS_FN(destroy_flow)),
- UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_WQ,
- UAPI_DEF_OBJ_NEEDS_FN(destroy_wq)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
UVERBS_OBJECT_RWQ_IND_TBL,
UAPI_DEF_OBJ_NEEDS_FN(destroy_rwq_ind_table)),
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index da4110a0eea2..5dce2c7cc323 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -100,6 +100,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
uverbs_uobject_get(ev_file_uobj);
}
+ obj->uevent.event_file = ib_uverbs_get_async_event(
+ attrs, UVERBS_ATTR_CREATE_CQ_EVENT_FD);
+
if (attr.comp_vector >= attrs->ufile->device->num_comp_vectors) {
ret = -EINVAL;
goto err_event_file;
@@ -129,19 +132,17 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
obj->uevent.uobject.object = cq;
obj->uevent.uobject.user_handle = user_handle;
rdma_restrack_uadd(&cq->res);
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE);
ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
sizeof(cq->cqe));
- if (ret)
- goto err_cq;
+ return ret;
- return 0;
-err_cq:
- ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
- cq = NULL;
err_free:
kfree(cq);
err_event_file:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
if (ev_file)
uverbs_uobject_put(ev_file_uobj);
return ret;
@@ -171,6 +172,10 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_CQ_RESP_CQE,
UVERBS_ATTR_TYPE(u32),
UA_MANDATORY),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
UVERBS_ATTR_UHW());
static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index c1286a52dc84..a2722ef8496e 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -136,21 +136,15 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
uobj->object = mr;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE);
+
ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DM_MR_RESP_LKEY, &mr->lkey,
sizeof(mr->lkey));
if (ret)
- goto err_dereg;
+ return ret;
ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
&mr->rkey, sizeof(mr->rkey));
- if (ret)
- goto err_dereg;
-
- return 0;
-
-err_dereg:
- ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
-
return ret;
}
diff --git a/drivers/infiniband/core/uverbs_std_types_qp.c b/drivers/infiniband/core/uverbs_std_types_qp.c
new file mode 100644
index 000000000000..3bf8dcdfe7eb
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_qp.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+#include "core_priv.h"
+
+static int uverbs_free_qp(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_qp *qp = uobject->object;
+ struct ib_uqp_object *uqp =
+ container_of(uobject, struct ib_uqp_object, uevent.uobject);
+ int ret;
+
+ /*
+ * If this is a user triggered destroy then do not allow destruction
+ * until the user cleans up all the mcast bindings. Unlike in other
+ * places we forcibly clean up the mcast attachments for !DESTROY
+ * because the mcast attaches are not ubojects and will not be
+ * destroyed by anything else during cleanup processing.
+ */
+ if (why == RDMA_REMOVE_DESTROY) {
+ if (!list_empty(&uqp->mcast_list))
+ return -EBUSY;
+ } else if (qp == qp->real_qp) {
+ ib_uverbs_detach_umcast(qp, uqp);
+ }
+
+ ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ if (uqp->uxrcd)
+ atomic_dec(&uqp->uxrcd->refcnt);
+
+ ib_uverbs_release_uevent(&uqp->uevent);
+ return ret;
+}
+
+static int check_creation_flags(enum ib_qp_type qp_type,
+ u32 create_flags)
+{
+ create_flags &= ~IB_UVERBS_QP_CREATE_SQ_SIG_ALL;
+
+ if (!create_flags || qp_type == IB_QPT_DRIVER)
+ return 0;
+
+ if (qp_type != IB_QPT_RAW_PACKET && qp_type != IB_QPT_UD)
+ return -EINVAL;
+
+ if ((create_flags & IB_UVERBS_QP_CREATE_SCATTER_FCS ||
+ create_flags & IB_UVERBS_QP_CREATE_CVLAN_STRIPPING) &&
+ qp_type != IB_QPT_RAW_PACKET)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void set_caps(struct ib_qp_init_attr *attr,
+ struct ib_uverbs_qp_cap *cap, bool req)
+{
+ if (req) {
+ attr->cap.max_send_wr = cap->max_send_wr;
+ attr->cap.max_recv_wr = cap->max_recv_wr;
+ attr->cap.max_send_sge = cap->max_send_sge;
+ attr->cap.max_recv_sge = cap->max_recv_sge;
+ attr->cap.max_inline_data = cap->max_inline_data;
+ } else {
+ cap->max_send_wr = attr->cap.max_send_wr;
+ cap->max_recv_wr = attr->cap.max_recv_wr;
+ cap->max_send_sge = attr->cap.max_send_sge;
+ cap->max_recv_sge = attr->cap.max_recv_sge;
+ cap->max_inline_data = attr->cap.max_inline_data;
+ }
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uqp_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_QP_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_qp_init_attr attr = {};
+ struct ib_uverbs_qp_cap cap = {};
+ struct ib_rwq_ind_table *rwq_ind_tbl = NULL;
+ struct ib_qp *qp;
+ struct ib_pd *pd = NULL;
+ struct ib_srq *srq = NULL;
+ struct ib_cq *recv_cq = NULL;
+ struct ib_cq *send_cq = NULL;
+ struct ib_xrcd *xrcd = NULL;
+ struct ib_uobject *xrcd_uobj = NULL;
+ struct ib_device *device;
+ u64 user_handle;
+ int ret;
+
+ ret = uverbs_copy_from_or_zero(&cap, attrs,
+ UVERBS_ATTR_CREATE_QP_CAP);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_QP_USER_HANDLE);
+ if (!ret)
+ ret = uverbs_get_const(&attr.qp_type, attrs,
+ UVERBS_ATTR_CREATE_QP_TYPE);
+ if (ret)
+ return ret;
+
+ switch (attr.qp_type) {
+ case IB_QPT_XRC_TGT:
+ if (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_PD_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE))
+ return -EINVAL;
+
+ xrcd_uobj = uverbs_attr_get_uobject(attrs,
+ UVERBS_ATTR_CREATE_QP_XRCD_HANDLE);
+ if (IS_ERR(xrcd_uobj))
+ return PTR_ERR(xrcd_uobj);
+
+ xrcd = (struct ib_xrcd *)xrcd_uobj->object;
+ if (!xrcd)
+ return -EINVAL;
+ device = xrcd->device;
+ break;
+ case IB_UVERBS_QPT_RAW_PACKET:
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+ fallthrough;
+ case IB_UVERBS_QPT_RC:
+ case IB_UVERBS_QPT_UC:
+ case IB_UVERBS_QPT_UD:
+ case IB_UVERBS_QPT_XRC_INI:
+ case IB_UVERBS_QPT_DRIVER:
+ if (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_XRCD_HANDLE) ||
+ (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE) &&
+ attr.qp_type == IB_QPT_XRC_INI))
+ return -EINVAL;
+
+ pd = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_PD_HANDLE);
+ if (IS_ERR(pd))
+ return PTR_ERR(pd);
+
+ rwq_ind_tbl = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE);
+ if (!IS_ERR(rwq_ind_tbl)) {
+ if (cap.max_recv_wr || cap.max_recv_sge ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE))
+ return -EINVAL;
+
+ /* send_cq is optinal */
+ if (cap.max_send_wr) {
+ send_cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE);
+ if (IS_ERR(send_cq))
+ return PTR_ERR(send_cq);
+ }
+ attr.rwq_ind_tbl = rwq_ind_tbl;
+ } else {
+ send_cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE);
+ if (IS_ERR(send_cq))
+ return PTR_ERR(send_cq);
+
+ if (attr.qp_type != IB_QPT_XRC_INI) {
+ recv_cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE);
+ if (IS_ERR(recv_cq))
+ return PTR_ERR(recv_cq);
+ }
+ }
+
+ device = pd->device;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = uverbs_get_flags32(&attr.create_flags, attrs,
+ UVERBS_ATTR_CREATE_QP_FLAGS,
+ IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
+ IB_UVERBS_QP_CREATE_SCATTER_FCS |
+ IB_UVERBS_QP_CREATE_CVLAN_STRIPPING |
+ IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING |
+ IB_UVERBS_QP_CREATE_SQ_SIG_ALL);
+ if (ret)
+ return ret;
+
+ ret = check_creation_flags(attr.qp_type, attr.create_flags);
+ if (ret)
+ return ret;
+
+ if (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SOURCE_QPN)) {
+ ret = uverbs_copy_from(&attr.source_qpn, attrs,
+ UVERBS_ATTR_CREATE_QP_SOURCE_QPN);
+ if (ret)
+ return ret;
+ attr.create_flags |= IB_QP_CREATE_SOURCE_QPN;
+ }
+
+ srq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE);
+ if (!IS_ERR(srq)) {
+ if ((srq->srq_type == IB_SRQT_XRC &&
+ attr.qp_type != IB_QPT_XRC_TGT) ||
+ (srq->srq_type != IB_SRQT_XRC &&
+ attr.qp_type == IB_QPT_XRC_TGT))
+ return -EINVAL;
+ attr.srq = srq;
+ }
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
+ UVERBS_ATTR_CREATE_QP_EVENT_FD);
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ INIT_LIST_HEAD(&obj->mcast_list);
+ obj->uevent.uobject.user_handle = user_handle;
+ attr.event_handler = ib_uverbs_qp_event_handler;
+ attr.send_cq = send_cq;
+ attr.recv_cq = recv_cq;
+ attr.xrcd = xrcd;
+ if (attr.create_flags & IB_UVERBS_QP_CREATE_SQ_SIG_ALL) {
+ /* This creation bit is uverbs one, need to mask before
+ * calling drivers. It was added to prevent an extra user attr
+ * only for that when using ioctl.
+ */
+ attr.create_flags &= ~IB_UVERBS_QP_CREATE_SQ_SIG_ALL;
+ attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+ } else {
+ attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ }
+
+ set_caps(&attr, &cap, true);
+ mutex_init(&obj->mcast_lock);
+
+ if (attr.qp_type == IB_QPT_XRC_TGT)
+ qp = ib_create_qp(pd, &attr);
+ else
+ qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
+ obj);
+
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
+ goto err_put;
+ }
+
+ if (attr.qp_type != IB_QPT_XRC_TGT) {
+ atomic_inc(&pd->usecnt);
+ if (attr.send_cq)
+ atomic_inc(&attr.send_cq->usecnt);
+ if (attr.recv_cq)
+ atomic_inc(&attr.recv_cq->usecnt);
+ if (attr.srq)
+ atomic_inc(&attr.srq->usecnt);
+ if (attr.rwq_ind_tbl)
+ atomic_inc(&attr.rwq_ind_tbl->usecnt);
+ } else {
+ obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
+ uobject);
+ atomic_inc(&obj->uxrcd->refcnt);
+ /* It is done in _ib_create_qp for other QP types */
+ qp->uobject = obj;
+ }
+
+ obj->uevent.uobject.object = qp;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_QP_HANDLE);
+
+ if (attr.qp_type != IB_QPT_XRC_TGT) {
+ ret = ib_create_qp_security(qp, device);
+ if (ret)
+ return ret;
+ }
+
+ set_caps(&attr, &cap, false);
+ ret = uverbs_copy_to_struct_or_zero(attrs,
+ UVERBS_ATTR_CREATE_QP_RESP_CAP, &cap,
+ sizeof(cap));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_QP_RESP_QP_NUM,
+ &qp->qp_num,
+ sizeof(qp->qp_num));
+
+ return ret;
+err_put:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QP_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_HANDLE,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_XRCD_HANDLE,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_SRQ_HANDLE,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_CAP,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_qp_cap,
+ max_inline_data),
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_QP_TYPE,
+ enum ib_uverbs_qp_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_QP_FLAGS,
+ enum ib_uverbs_qp_create_flags,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_SOURCE_QPN,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_QP_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_QP_RESP_CAP,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_qp_cap,
+ max_inline_data),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_QP_RESP_QP_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QP_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_QP_HANDLE);
+ struct ib_uqp_object *obj =
+ container_of(uobj, struct ib_uqp_object, uevent.uobject);
+ struct ib_uverbs_destroy_qp_resp resp = {
+ .events_reported = obj->uevent.events_reported
+ };
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_QP_RESP, &resp,
+ sizeof(resp));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QP_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_QP_HANDLE,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_QP_RESP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_qp_resp),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_QP,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp),
+ &UVERBS_METHOD(UVERBS_METHOD_QP_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_QP_DESTROY));
+
+const struct uapi_definition uverbs_def_obj_qp[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_QP,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_qp)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_srq.c b/drivers/infiniband/core/uverbs_std_types_srq.c
new file mode 100644
index 000000000000..c0ecbba26bf4
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_srq.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int uverbs_free_srq(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_srq *srq = uobject->object;
+ struct ib_uevent_object *uevent =
+ container_of(uobject, struct ib_uevent_object, uobject);
+ enum ib_srq_type srq_type = srq->srq_type;
+ int ret;
+
+ ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ if (srq_type == IB_SRQT_XRC) {
+ struct ib_usrq_object *us =
+ container_of(uobject, struct ib_usrq_object,
+ uevent.uobject);
+
+ atomic_dec(&us->uxrcd->refcnt);
+ }
+
+ ib_uverbs_release_uevent(uevent);
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_SRQ_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_usrq_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_SRQ_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_SRQ_PD_HANDLE);
+ struct ib_srq_init_attr attr = {};
+ struct ib_uobject *xrcd_uobj;
+ struct ib_srq *srq;
+ u64 user_handle;
+ int ret;
+
+ ret = uverbs_copy_from(&attr.attr.max_sge, attrs,
+ UVERBS_ATTR_CREATE_SRQ_MAX_SGE);
+ if (!ret)
+ ret = uverbs_copy_from(&attr.attr.max_wr, attrs,
+ UVERBS_ATTR_CREATE_SRQ_MAX_WR);
+ if (!ret)
+ ret = uverbs_copy_from(&attr.attr.srq_limit, attrs,
+ UVERBS_ATTR_CREATE_SRQ_LIMIT);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_SRQ_USER_HANDLE);
+ if (!ret)
+ ret = uverbs_get_const(&attr.srq_type, attrs,
+ UVERBS_ATTR_CREATE_SRQ_TYPE);
+ if (ret)
+ return ret;
+
+ if (ib_srq_has_cq(attr.srq_type)) {
+ attr.ext.cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE);
+ if (IS_ERR(attr.ext.cq))
+ return PTR_ERR(attr.ext.cq);
+ }
+
+ switch (attr.srq_type) {
+ case IB_UVERBS_SRQT_XRC:
+ xrcd_uobj = uverbs_attr_get_uobject(attrs,
+ UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE);
+ if (IS_ERR(xrcd_uobj))
+ return PTR_ERR(xrcd_uobj);
+
+ attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
+ if (!attr.ext.xrc.xrcd)
+ return -EINVAL;
+ obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
+ uobject);
+ atomic_inc(&obj->uxrcd->refcnt);
+ break;
+ case IB_UVERBS_SRQT_TM:
+ ret = uverbs_copy_from(&attr.ext.tag_matching.max_num_tags,
+ attrs,
+ UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS);
+ if (ret)
+ return ret;
+ break;
+ case IB_UVERBS_SRQT_BASIC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
+ UVERBS_ATTR_CREATE_SRQ_EVENT_FD);
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ attr.event_handler = ib_uverbs_srq_event_handler;
+ obj->uevent.uobject.user_handle = user_handle;
+
+ srq = ib_create_srq_user(pd, &attr, obj, &attrs->driver_udata);
+ if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
+ goto err;
+ }
+
+ obj->uevent.uobject.object = srq;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_SRQ_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR,
+ &attr.attr.max_wr,
+ sizeof(attr.attr.max_wr));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE,
+ &attr.attr.max_sge,
+ sizeof(attr.attr.max_sge));
+ if (ret)
+ return ret;
+
+ if (attr.srq_type == IB_SRQT_XRC) {
+ ret = uverbs_copy_to(attrs,
+ UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM,
+ &srq->ext.xrc.srq_num,
+ sizeof(srq->ext.xrc.srq_num));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+err:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ if (attr.srq_type == IB_SRQT_XRC)
+ atomic_dec(&obj->uxrcd->refcnt);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_SRQ_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_HANDLE,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_SRQ_TYPE,
+ enum ib_uverbs_srq_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_LIMIT,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_SRQ_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_SRQ_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_SRQ_HANDLE);
+ struct ib_usrq_object *obj =
+ container_of(uobj, struct ib_usrq_object, uevent.uobject);
+ struct ib_uverbs_destroy_srq_resp resp = {
+ .events_reported = obj->uevent.events_reported
+ };
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_SRQ_RESP, &resp,
+ sizeof(resp));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_SRQ_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_SRQ_HANDLE,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_SRQ_RESP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_srq_resp),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_SRQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
+ uverbs_free_srq),
+ &UVERBS_METHOD(UVERBS_METHOD_SRQ_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_SRQ_DESTROY)
+);
+
+const struct uapi_definition uverbs_def_obj_srq[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_SRQ,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_srq)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_wq.c b/drivers/infiniband/core/uverbs_std_types_wq.c
new file mode 100644
index 000000000000..cad842ede077
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_wq.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int uverbs_free_wq(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_wq *wq = uobject->object;
+ struct ib_uwq_object *uwq =
+ container_of(uobject, struct ib_uwq_object, uevent.uobject);
+ int ret;
+
+ ret = ib_destroy_wq(wq, &attrs->driver_udata);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ ib_uverbs_release_uevent(&uwq->uevent);
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_WQ_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uwq_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_WQ_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_WQ_PD_HANDLE);
+ struct ib_cq *cq =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_WQ_CQ_HANDLE);
+ struct ib_wq_init_attr wq_init_attr = {};
+ struct ib_wq *wq;
+ u64 user_handle;
+ int ret;
+
+ ret = uverbs_get_flags32(&wq_init_attr.create_flags, attrs,
+ UVERBS_ATTR_CREATE_WQ_FLAGS,
+ IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING |
+ IB_UVERBS_WQ_FLAGS_SCATTER_FCS |
+ IB_UVERBS_WQ_FLAGS_DELAY_DROP |
+ IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING);
+ if (!ret)
+ ret = uverbs_copy_from(&wq_init_attr.max_sge, attrs,
+ UVERBS_ATTR_CREATE_WQ_MAX_SGE);
+ if (!ret)
+ ret = uverbs_copy_from(&wq_init_attr.max_wr, attrs,
+ UVERBS_ATTR_CREATE_WQ_MAX_WR);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_WQ_USER_HANDLE);
+ if (!ret)
+ ret = uverbs_get_const(&wq_init_attr.wq_type, attrs,
+ UVERBS_ATTR_CREATE_WQ_TYPE);
+ if (ret)
+ return ret;
+
+ if (wq_init_attr.wq_type != IB_WQT_RQ)
+ return -EINVAL;
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
+ UVERBS_ATTR_CREATE_WQ_EVENT_FD);
+ obj->uevent.uobject.user_handle = user_handle;
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
+ wq_init_attr.wq_context = attrs->ufile;
+ wq_init_attr.cq = cq;
+
+ wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
+ if (IS_ERR(wq)) {
+ ret = PTR_ERR(wq);
+ goto err;
+ }
+
+ obj->uevent.uobject.object = wq;
+ wq->wq_type = wq_init_attr.wq_type;
+ wq->cq = cq;
+ wq->pd = pd;
+ wq->device = pd->device;
+ wq->wq_context = wq_init_attr.wq_context;
+ atomic_set(&wq->usecnt, 0);
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&cq->usecnt);
+ wq->uobject = obj;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_WQ_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR,
+ &wq_init_attr.max_wr,
+ sizeof(wq_init_attr.max_wr));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE,
+ &wq_init_attr.max_sge,
+ sizeof(wq_init_attr.max_sge));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM,
+ &wq->wq_num,
+ sizeof(wq->wq_num));
+ return ret;
+
+err:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_WQ_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_HANDLE,
+ UVERBS_OBJECT_WQ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_WQ_TYPE,
+ enum ib_wq_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_WQ_FLAGS,
+ enum ib_uverbs_wq_flags,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_WQ_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_WQ_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_WQ_HANDLE);
+ struct ib_uwq_object *obj =
+ container_of(uobj, struct ib_uwq_object, uevent.uobject);
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_WQ_RESP,
+ &obj->uevent.events_reported,
+ sizeof(obj->uevent.events_reported));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_WQ_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_WQ_HANDLE,
+ UVERBS_OBJECT_WQ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_WQ_RESP,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_WQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq),
+ &UVERBS_METHOD(UVERBS_METHOD_WQ_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_WQ_DESTROY)
+);
+
+const struct uapi_definition uverbs_def_obj_wq[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_WQ,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_wq)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 3f121ac31e0a..5addc8fae3f3 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -634,6 +634,9 @@ static const struct uapi_definition uverbs_core_api[] = {
UAPI_DEF_CHAIN(uverbs_def_obj_flow_action),
UAPI_DEF_CHAIN(uverbs_def_obj_intf),
UAPI_DEF_CHAIN(uverbs_def_obj_mr),
+ UAPI_DEF_CHAIN(uverbs_def_obj_qp),
+ UAPI_DEF_CHAIN(uverbs_def_obj_srq),
+ UAPI_DEF_CHAIN(uverbs_def_obj_wq),
UAPI_DEF_CHAIN(uverbs_def_write_intf),
{},
};
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 56a71337112c..53d6505c0c7b 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -50,6 +50,7 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
#include <rdma/rw.h>
+#include <rdma/lag.h>
#include "core_priv.h"
#include <trace/events/rdma_core.h>
@@ -500,8 +501,10 @@ rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr,
u32 flags,
- struct ib_udata *udata)
+ struct ib_udata *udata,
+ struct net_device *xmit_slave)
{
+ struct rdma_ah_init_attr init_attr = {};
struct ib_device *device = pd->device;
struct ib_ah *ah;
int ret;
@@ -521,8 +524,11 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
ah->pd = pd;
ah->type = ah_attr->type;
ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
+ init_attr.ah_attr = ah_attr;
+ init_attr.flags = flags;
+ init_attr.xmit_slave = xmit_slave;
- ret = device->ops.create_ah(ah, ah_attr, flags, udata);
+ ret = device->ops.create_ah(ah, &init_attr, udata);
if (ret) {
kfree(ah);
return ERR_PTR(ret);
@@ -547,15 +553,22 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
u32 flags)
{
const struct ib_gid_attr *old_sgid_attr;
+ struct net_device *slave;
struct ib_ah *ah;
int ret;
ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
if (ret)
return ERR_PTR(ret);
-
- ah = _rdma_create_ah(pd, ah_attr, flags, NULL);
-
+ slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
+ (flags & RDMA_CREATE_AH_SLEEPABLE) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (IS_ERR(slave)) {
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
+ return (void *)slave;
+ }
+ ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
+ rdma_lag_put_ah_roce_slave(slave);
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
return ah;
}
@@ -594,7 +607,8 @@ struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
}
}
- ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE, udata);
+ ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
+ udata, NULL);
out:
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
@@ -967,15 +981,29 @@ EXPORT_SYMBOL(rdma_destroy_ah_user);
/* Shared receive queues */
-struct ib_srq *ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr)
+/**
+ * ib_create_srq_user - Creates a SRQ associated with the specified protection
+ * domain.
+ * @pd: The protection domain associated with the SRQ.
+ * @srq_init_attr: A list of initial attributes required to create the
+ * SRQ. If SRQ creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created SRQ.
+ * @uobject - uobject pointer if this is not a kernel SRQ
+ * @udata - udata pointer if this is not a kernel SRQ
+ *
+ * srq_attr->max_wr and srq_attr->max_sge are read the determine the
+ * requested size of the SRQ, and set to the actual values allocated
+ * on return. If ib_create_srq() succeeds, then max_wr and max_sge
+ * will always be at least as large as the requested values.
+ */
+struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_usrq_object *uobject,
+ struct ib_udata *udata)
{
struct ib_srq *srq;
int ret;
- if (!pd->device->ops.create_srq)
- return ERR_PTR(-EOPNOTSUPP);
-
srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
if (!srq)
return ERR_PTR(-ENOMEM);
@@ -985,6 +1013,7 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
srq->event_handler = srq_init_attr->event_handler;
srq->srq_context = srq_init_attr->srq_context;
srq->srq_type = srq_init_attr->srq_type;
+ srq->uobject = uobject;
if (ib_srq_has_cq(srq->srq_type)) {
srq->ext.cq = srq_init_attr->ext.cq;
@@ -996,7 +1025,7 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
}
atomic_inc(&pd->usecnt);
- ret = pd->device->ops.create_srq(srq, srq_init_attr, NULL);
+ ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
if (ret) {
atomic_dec(&srq->pd->usecnt);
if (srq->srq_type == IB_SRQT_XRC)
@@ -1009,7 +1038,7 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
return srq;
}
-EXPORT_SYMBOL(ib_create_srq);
+EXPORT_SYMBOL(ib_create_srq_user);
int ib_modify_srq(struct ib_srq *srq,
struct ib_srq_attr *srq_attr,
@@ -1633,11 +1662,35 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
const struct ib_gid_attr *old_sgid_attr_alt_av;
int ret;
+ attr->xmit_slave = NULL;
if (attr_mask & IB_QP_AV) {
ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
&old_sgid_attr_av);
if (ret)
return ret;
+
+ if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
+ is_qp_type_connected(qp)) {
+ struct net_device *slave;
+
+ /*
+ * If the user provided the qp_attr then we have to
+ * resolve it. Kerne users have to provide already
+ * resolved rdma_ah_attr's.
+ */
+ if (udata) {
+ ret = ib_resolve_eth_dmac(qp->device,
+ &attr->ah_attr);
+ if (ret)
+ goto out_av;
+ }
+ slave = rdma_lag_get_ah_roce_slave(qp->device,
+ &attr->ah_attr,
+ GFP_KERNEL);
+ if (IS_ERR(slave))
+ goto out_av;
+ attr->xmit_slave = slave;
+ }
}
if (attr_mask & IB_QP_ALT_PATH) {
/*
@@ -1664,18 +1717,6 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
}
}
- /*
- * If the user provided the qp_attr then we have to resolve it. Kernel
- * users have to provide already resolved rdma_ah_attr's
- */
- if (udata && (attr_mask & IB_QP_AV) &&
- attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
- is_qp_type_connected(qp)) {
- ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
- if (ret)
- goto out;
- }
-
if (rdma_ib_or_roce(qp->device, port)) {
if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
dev_warn(&qp->device->dev,
@@ -1717,8 +1758,10 @@ out:
if (attr_mask & IB_QP_ALT_PATH)
rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
out_av:
- if (attr_mask & IB_QP_AV)
+ if (attr_mask & IB_QP_AV) {
+ rdma_lag_put_ah_roce_slave(attr->xmit_slave);
rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
+ }
return ret;
}
@@ -1962,6 +2005,9 @@ EXPORT_SYMBOL(__ib_create_cq);
int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
+ if (cq->shared)
+ return -EOPNOTSUPP;
+
return cq->device->ops.modify_cq ?
cq->device->ops.modify_cq(cq, cq_count,
cq_period) : -EOPNOTSUPP;
@@ -1970,6 +2016,9 @@ EXPORT_SYMBOL(rdma_set_cq_moderation);
int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{
+ if (WARN_ON_ONCE(cq->shared))
+ return -EOPNOTSUPP;
+
if (atomic_read(&cq->usecnt))
return -EBUSY;
@@ -1982,6 +2031,9 @@ EXPORT_SYMBOL(ib_destroy_cq_user);
int ib_resize_cq(struct ib_cq *cq, int cqe)
{
+ if (cq->shared)
+ return -EOPNOTSUPP;
+
return cq->device->ops.resize_cq ?
cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
}
@@ -2160,54 +2212,6 @@ out:
}
EXPORT_SYMBOL(ib_alloc_mr_integrity);
-/* "Fast" memory regions */
-
-struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
- int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct ib_fmr *fmr;
-
- if (!pd->device->ops.alloc_fmr)
- return ERR_PTR(-EOPNOTSUPP);
-
- fmr = pd->device->ops.alloc_fmr(pd, mr_access_flags, fmr_attr);
- if (!IS_ERR(fmr)) {
- fmr->device = pd->device;
- fmr->pd = pd;
- atomic_inc(&pd->usecnt);
- }
-
- return fmr;
-}
-EXPORT_SYMBOL(ib_alloc_fmr);
-
-int ib_unmap_fmr(struct list_head *fmr_list)
-{
- struct ib_fmr *fmr;
-
- if (list_empty(fmr_list))
- return 0;
-
- fmr = list_entry(fmr_list->next, struct ib_fmr, list);
- return fmr->device->ops.unmap_fmr(fmr_list);
-}
-EXPORT_SYMBOL(ib_unmap_fmr);
-
-int ib_dealloc_fmr(struct ib_fmr *fmr)
-{
- struct ib_pd *pd;
- int ret;
-
- pd = fmr->pd;
- ret = fmr->device->ops.dealloc_fmr(fmr);
- if (!ret)
- atomic_dec(&pd->usecnt);
-
- return ret;
-}
-EXPORT_SYMBOL(ib_dealloc_fmr);
-
/* Multicast groups */
static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
@@ -2574,6 +2578,7 @@ EXPORT_SYMBOL(ib_map_mr_sg_pi);
* @page_size: page vector desired page size
*
* Constraints:
+ *
* - The first sg element is allowed to have an offset.
* - Each sg element must either be aligned to page_size or virtually
* contiguous to the previous element. In case an sg element has a
@@ -2607,10 +2612,12 @@ EXPORT_SYMBOL(ib_map_mr_sg);
* @mr: memory region
* @sgl: dma mapped scatterlist
* @sg_nents: number of entries in sg
- * @sg_offset_p: IN: start offset in bytes into sg
- * OUT: offset in bytes for element n of the sg of the first
+ * @sg_offset_p: ==== =======================================================
+ * IN start offset in bytes into sg
+ * OUT offset in bytes for element n of the sg of the first
* byte that has not been processed where n is the return
* value of this function.
+ * ==== =======================================================
* @set_page: driver page assignment function pointer
*
* Core service helper for drivers to convert the largest
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 95f6d493d1b9..8b6ad5cddfce 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -177,9 +177,6 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_total_mcast_qp_attach = 0;
ib_attr->max_ah = dev_attr->max_ah;
- ib_attr->max_fmr = 0;
- ib_attr->max_map_per_fmr = 0;
-
ib_attr->max_srq = dev_attr->max_srq;
ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
ib_attr->max_srq_sge = dev_attr->max_srq_sges;
@@ -631,11 +628,12 @@ static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
return nw_type;
}
-int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct ib_pd *ib_pd = ib_ah->pd;
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
struct bnxt_re_dev *rdev = pd->rdev;
const struct ib_gid_attr *sgid_attr;
@@ -673,7 +671,8 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
- !(flags & RDMA_CREATE_AH_SLEEPABLE));
+ !(init_attr->flags &
+ RDMA_CREATE_AH_SLEEPABLE));
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
return rc;
@@ -856,7 +855,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
return -EFAULT;
- bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
+ bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
/* Consider mapping PSN search memory only for RC QPs. */
if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
@@ -879,7 +878,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
qplib_qp->qp_handle = ureq.qp_handle;
if (!qp->qplib_qp.srq) {
- bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
IB_ACCESS_LOCAL_WRITE);
@@ -976,6 +975,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.sig_type = true;
/* Shadow QP SQ depth should be same as QP1 RQ depth */
+ qp->qplib_qp.sq.wqe_size = bnxt_re_get_swqe_size();
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sge = 2;
/* Q full delta can be 1 since it is internal QP */
@@ -986,6 +986,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.scq = qp1_qp->scq;
qp->qplib_qp.rcq = qp1_qp->rcq;
+ qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size();
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
/* Q full delta can be 1 since it is internal QP */
@@ -1021,10 +1022,12 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_q *rq;
int entries;
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
+ rq = &qplqp->rq;
dev_attr = &rdev->dev_attr;
if (init_attr->srq) {
@@ -1036,23 +1039,21 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
return -EINVAL;
}
qplqp->srq = &srq->qplib_srq;
- qplqp->rq.max_wqe = 0;
+ rq->max_wqe = 0;
} else {
+ rq->wqe_size = bnxt_re_get_rwqe_size();
/* Allocate 1 more than what's provided so posting max doesn't
* mean empty.
*/
entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
- qplqp->rq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
-
- qplqp->rq.q_full_delta = qplqp->rq.max_wqe -
- init_attr->cap.max_recv_wr;
- qplqp->rq.max_sge = init_attr->cap.max_recv_sge;
- if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
- qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ rq->q_full_delta = rq->max_wqe - init_attr->cap.max_recv_wr;
+ rq->max_sge = init_attr->cap.max_recv_sge;
+ if (rq->max_sge > dev_attr->max_qp_sges)
+ rq->max_sge = dev_attr->max_qp_sges;
}
- qplqp->rq.sg_info.pgsize = PAGE_SIZE;
- qplqp->rq.sg_info.pgshft = PAGE_SHIFT;
+ rq->sg_info.pgsize = PAGE_SIZE;
+ rq->sg_info.pgshft = PAGE_SHIFT;
return 0;
}
@@ -1080,15 +1081,18 @@ static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_q *sq;
int entries;
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
+ sq = &qplqp->sq;
dev_attr = &rdev->dev_attr;
- qplqp->sq.max_sge = init_attr->cap.max_send_sge;
- if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
- qplqp->sq.max_sge = dev_attr->max_qp_sges;
+ sq->wqe_size = bnxt_re_get_swqe_size();
+ sq->max_sge = init_attr->cap.max_send_sge;
+ if (sq->max_sge > dev_attr->max_qp_sges)
+ sq->max_sge = dev_attr->max_qp_sges;
/*
* Change the SQ depth if user has requested minimum using
* configfs. Only supported for kernel consumers
@@ -1096,9 +1100,9 @@ static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
entries = init_attr->cap.max_send_wr;
/* Allocate 128 + 1 more than what's provided */
entries = roundup_pow_of_two(entries + BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes +
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qplqp->sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes +
+ BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ sq->q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
/*
* Reserving one slot for Phantom WQE. Application can
* post one extra entry in this case. But allowing this to avoid
@@ -1511,7 +1515,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
return -EFAULT;
- bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
IB_ACCESS_LOCAL_WRITE);
@@ -1534,15 +1538,20 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata)
{
- struct ib_pd *ib_pd = ib_srq->pd;
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
- struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_srq *srq =
- container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+ struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_nq *nq = NULL;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_srq *srq;
+ struct bnxt_re_pd *pd;
+ struct ib_pd *ib_pd;
int rc, entries;
+ ib_pd = ib_srq->pd;
+ pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ rdev = pd->rdev;
+ dev_attr = &rdev->dev_attr;
+ srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+
if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
rc = -EINVAL;
@@ -1563,8 +1572,9 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
if (entries > dev_attr->max_srq_wqes + 1)
entries = dev_attr->max_srq_wqes + 1;
-
srq->qplib_srq.max_wqe = entries;
+
+ srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size();
srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
srq->srq_limit = srq_init_attr->attr.srq_limit;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 23d972da5652..e5fbbeba6d28 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -122,12 +122,6 @@ struct bnxt_re_frpl {
u64 *page_list;
};
-struct bnxt_re_fmr {
- struct bnxt_re_dev *rdev;
- struct ib_fmr ib_fmr;
- struct bnxt_qplib_mrw qplib_fmr;
-};
-
struct bnxt_re_mw {
struct bnxt_re_dev *rdev;
struct ib_mw ib_mw;
@@ -142,6 +136,16 @@ struct bnxt_re_ucontext {
spinlock_t sh_lock; /* protect shpg */
};
+static inline u16 bnxt_re_get_swqe_size(void)
+{
+ return sizeof(struct sq_send);
+}
+
+static inline u16 bnxt_re_get_rwqe_size(void)
+{
+ return sizeof(struct rq_wqe);
+}
+
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
@@ -160,7 +164,7 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
u8 port_num);
int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 899a5d2c100e..c5e29577cd43 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -300,12 +300,12 @@ static void bnxt_qplib_service_nq(unsigned long data)
{
struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
struct bnxt_qplib_hwq *hwq = &nq->hwq;
- struct nq_base *nqe, **nq_ptr;
- struct bnxt_qplib_cq *cq;
- int num_cqne_processed = 0;
int num_srqne_processed = 0;
+ int num_cqne_processed = 0;
+ struct bnxt_qplib_cq *cq;
int budget = nq->budget;
u32 sw_cons, raw_cons;
+ struct nq_base *nqe;
uintptr_t q_handle;
u16 type;
@@ -314,8 +314,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
raw_cons = hwq->cons;
while (budget--) {
sw_cons = HWQ_CMP(raw_cons, hwq);
- nq_ptr = (struct nq_base **)hwq->pbl_ptr;
- nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
+ nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
break;
@@ -392,13 +391,11 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_nq *nq = dev_instance;
struct bnxt_qplib_hwq *hwq = &nq->hwq;
- struct nq_base **nq_ptr;
u32 sw_cons;
/* Prefetch the NQ element */
sw_cons = HWQ_CMP(hwq->cons, hwq);
- nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
- prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
+ prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
/* Fan out to CPU affinitized kthreads? */
tasklet_schedule(&nq->nq_tasklet);
@@ -612,12 +609,13 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
struct cmdq_create_srq req;
struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
+ u16 pg_sz_lvl;
int rc, idx;
hwq_attr.res = res;
hwq_attr.sginfo = &srq->sg_info;
hwq_attr.depth = srq->max_wqe;
- hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.stride = srq->wqe_size;
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
if (rc)
@@ -638,22 +636,11 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
pbl = &srq->hwq.pbl[PBL_LVL_0];
- req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
- CMDQ_CREATE_SRQ_LVL_MASK) <<
- CMDQ_CREATE_SRQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
+ pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
+ CMDQ_CREATE_SRQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
+ CMDQ_CREATE_SRQ_LVL_SFT;
+ req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
req.pd_id = cpu_to_le32(srq->pd->id);
req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
@@ -740,7 +727,7 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
struct bnxt_qplib_swqe *wqe)
{
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
- struct rq_wqe *srqe, **srqe_ptr;
+ struct rq_wqe *srqe;
struct sq_sge *hw_sge;
u32 sw_prod, sw_cons, count = 0;
int i, rc = 0, next;
@@ -758,9 +745,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
spin_unlock(&srq_hwq->lock);
sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
- srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
- srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
- memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
+ memset(srqe, 0, srq->wqe_size);
/* Calculate wqe_size16 and data_len */
for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
i < wqe->num_sge; i++, hw_sge++) {
@@ -809,6 +795,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
u32 qp_flags = 0;
+ u8 pg_sz_lvl;
int rc;
RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
@@ -822,7 +809,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.depth = sq->max_wqe;
- hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE;
+ hwq_attr.stride = sq->wqe_size;
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
@@ -835,33 +822,18 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.sq_pg_size_sq_lvl =
- ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
- << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
+ CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
+ req.sq_pg_size_sq_lvl = pg_sz_lvl;
if (qp->scq)
req.scq_cid = cpu_to_le32(qp->scq->id);
-
- qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
-
/* RQ */
if (rq->max_wqe) {
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
- hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.stride = rq->wqe_size;
hwq_attr.depth = qp->rq.max_wqe;
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
@@ -876,32 +848,20 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
pbl = &rq->hwq.pbl[PBL_LVL_0];
req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.rq_pg_size_rq_lvl =
- ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
- CMDQ_CREATE_QP1_RQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
+ CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
+ req.rq_pg_size_rq_lvl = pg_sz_lvl;
if (qp->rcq)
req.rcq_cid = cpu_to_le32(qp->rcq->id);
}
-
/* Header buffer - allow hdr_buf pass in */
rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
if (rc) {
rc = -ENOMEM;
goto fail;
}
+ qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
req.qp_flags = cpu_to_le32(qp_flags);
req.sq_size = cpu_to_le32(sq->hwq.max_elements);
req.rq_size = cpu_to_le32(rq->hwq.max_elements);
@@ -948,23 +908,47 @@ exit:
return rc;
}
+static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
+{
+ struct bnxt_qplib_hwq *hwq;
+ struct bnxt_qplib_q *sq;
+ u64 fpsne, psne, psn_pg;
+ u16 indx_pad = 0, indx;
+ u16 pg_num, pg_indx;
+ u64 *page;
+
+ sq = &qp->sq;
+ hwq = &sq->hwq;
+
+ fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->max_elements, &psn_pg);
+ if (!IS_ALIGNED(fpsne, PAGE_SIZE))
+ indx_pad = ALIGN(fpsne, PAGE_SIZE) / size;
+
+ page = (u64 *)psn_pg;
+ for (indx = 0; indx < hwq->max_elements; indx++) {
+ pg_num = (indx + indx_pad) / (PAGE_SIZE / size);
+ pg_indx = (indx + indx_pad) % (PAGE_SIZE / size);
+ psne = page[pg_num] + pg_indx * size;
+ sq->swq[indx].psn_ext = (struct sq_psn_search_ext *)psne;
+ sq->swq[indx].psn_search = (struct sq_psn_search *)psne;
+ }
+}
+
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_hwq_attr hwq_attr = {};
- unsigned long int psn_search, poff = 0;
struct bnxt_qplib_sg_info sginfo = {};
- struct sq_psn_search **psn_search_ptr;
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq;
- int i, rc, req_size, psn_sz = 0;
- struct sq_send **hw_sq_send_ptr;
struct creq_create_qp_resp resp;
+ int rc, req_size, psn_sz = 0;
struct bnxt_qplib_hwq *xrrq;
u16 cmd_flags = 0, max_ssge;
- struct cmdq_create_qp req;
struct bnxt_qplib_pbl *pbl;
+ struct cmdq_create_qp req;
u32 qp_flags = 0;
+ u8 pg_sz_lvl;
u16 max_rsge;
RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
@@ -983,7 +967,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
- hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE;
+ hwq_attr.stride = sq->wqe_size;
hwq_attr.depth = sq->max_wqe;
hwq_attr.aux_stride = psn_sz;
hwq_attr.aux_depth = hwq_attr.depth;
@@ -997,64 +981,25 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rc = -ENOMEM;
goto fail_sq;
}
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
- if (psn_sz) {
- psn_search_ptr = (struct sq_psn_search **)
- &hw_sq_send_ptr[get_sqe_pg
- (sq->hwq.max_elements)];
- psn_search = (unsigned long int)
- &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
- [get_sqe_idx(sq->hwq.max_elements)];
- if (psn_search & ~PAGE_MASK) {
- /* If the psn_search does not start on a page boundary,
- * then calculate the offset
- */
- poff = (psn_search & ~PAGE_MASK) /
- BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
- }
- for (i = 0; i < sq->hwq.max_elements; i++) {
- sq->swq[i].psn_search =
- &psn_search_ptr[get_psne_pg(i + poff)]
- [get_psne_idx(i + poff)];
- /*psns_ext will be used only for P5 chips. */
- sq->swq[i].psn_ext =
- (struct sq_psn_search_ext *)
- &psn_search_ptr[get_psne_pg(i + poff)]
- [get_psne_idx(i + poff)];
- }
- }
+
+ if (psn_sz)
+ bnxt_qplib_init_psn_ptr(qp, psn_sz);
+
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.sq_pg_size_sq_lvl =
- ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
- << CMDQ_CREATE_QP_SQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
+ CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
+ req.sq_pg_size_sq_lvl = pg_sz_lvl;
if (qp->scq)
req.scq_cid = cpu_to_le32(qp->scq->id);
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
- if (qp->sig_type)
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
-
/* RQ */
if (rq->max_wqe) {
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
- hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.stride = rq->wqe_size;
hwq_attr.depth = rq->max_wqe;
hwq_attr.aux_stride = 0;
hwq_attr.aux_depth = 0;
@@ -1071,22 +1016,10 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
pbl = &rq->hwq.pbl[PBL_LVL_0];
req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.rq_pg_size_rq_lvl =
- ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
- CMDQ_CREATE_QP_RQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
+ CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
+ req.rq_pg_size_rq_lvl = pg_sz_lvl;
} else {
/* SRQ */
if (qp->srq) {
@@ -1097,7 +1030,13 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (qp->rcq)
req.rcq_cid = cpu_to_le32(qp->rcq->id);
+
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
+ if (qp->sig_type)
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
req.qp_flags = cpu_to_le32(qp_flags);
+
req.sq_size = cpu_to_le32(sq->hwq.max_elements);
req.rq_size = cpu_to_le32(rq->hwq.max_elements);
qp->sq_hdr_buf = NULL;
@@ -1483,12 +1422,11 @@ bail:
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
{
struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
- struct cq_base *hw_cqe, **hw_cqe_ptr;
+ struct cq_base *hw_cqe;
int i;
for (i = 0; i < cq_hwq->max_elements; i++) {
- hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
- hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
+ hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
continue;
/*
@@ -1615,6 +1553,34 @@ void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
return NULL;
}
+static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ struct bnxt_qplib_swq *swq)
+{
+ struct sq_psn_search_ext *psns_ext;
+ struct sq_psn_search *psns;
+ u32 flg_npsn;
+ u32 op_spsn;
+
+ psns = swq->psn_search;
+ psns_ext = swq->psn_ext;
+
+ op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
+ SQ_PSN_SEARCH_START_PSN_MASK);
+ op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
+ SQ_PSN_SEARCH_OPCODE_MASK);
+ flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
+ SQ_PSN_SEARCH_NEXT_PSN_MASK);
+
+ if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
+ psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
+ psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
+ } else {
+ psns->opcode_start_psn = cpu_to_le32(op_spsn);
+ psns->flags_next_psn = cpu_to_le32(flg_npsn);
+ }
+}
+
void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *sq = &qp->sq;
@@ -1625,16 +1591,16 @@ void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe)
{
+ struct bnxt_qplib_nq_work *nq_work = NULL;
+ int i, rc = 0, data_len = 0, pkt_num = 0;
struct bnxt_qplib_q *sq = &qp->sq;
+ struct sq_send *hw_sq_send_hdr;
struct bnxt_qplib_swq *swq;
- struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
- struct sq_sge *hw_sge;
- struct bnxt_qplib_nq_work *nq_work = NULL;
bool sch_handler = false;
- u32 sw_prod;
+ struct sq_sge *hw_sge;
u8 wqe_size16;
- int i, rc = 0, data_len = 0, pkt_num = 0;
__le32 temp32;
+ u32 sw_prod;
if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
@@ -1663,11 +1629,8 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
swq->start_psn = sq->psn & BTH_PSN_MASK;
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
- hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
- [get_sqe_idx(sw_prod)];
-
- memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
+ hw_sq_send_hdr = bnxt_qplib_get_qe(&sq->hwq, sw_prod, NULL);
+ memset(hw_sq_send_hdr, 0, sq->wqe_size);
if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
/* Copy the inline data */
@@ -1854,28 +1817,8 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
goto done;
}
swq->next_psn = sq->psn & BTH_PSN_MASK;
- if (swq->psn_search) {
- u32 opcd_spsn;
- u32 flg_npsn;
-
- opcd_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
- SQ_PSN_SEARCH_START_PSN_MASK);
- opcd_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
- SQ_PSN_SEARCH_OPCODE_MASK);
- flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
- SQ_PSN_SEARCH_NEXT_PSN_MASK);
- if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
- swq->psn_ext->opcode_start_psn =
- cpu_to_le32(opcd_spsn);
- swq->psn_ext->flags_next_psn =
- cpu_to_le32(flg_npsn);
- } else {
- swq->psn_search->opcode_start_psn =
- cpu_to_le32(opcd_spsn);
- swq->psn_search->flags_next_psn =
- cpu_to_le32(flg_npsn);
- }
- }
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RC)
+ bnxt_qplib_fill_psn_search(qp, wqe, swq);
queue_err:
if (sch_handler) {
/* Store the ULP info in the software structures */
@@ -1918,13 +1861,13 @@ void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe)
{
- struct bnxt_qplib_q *rq = &qp->rq;
- struct rq_wqe *rqe, **rqe_ptr;
- struct sq_sge *hw_sge;
struct bnxt_qplib_nq_work *nq_work = NULL;
+ struct bnxt_qplib_q *rq = &qp->rq;
bool sch_handler = false;
- u32 sw_prod;
+ struct sq_sge *hw_sge;
+ struct rq_wqe *rqe;
int i, rc = 0;
+ u32 sw_prod;
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
sch_handler = true;
@@ -1941,10 +1884,8 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
rq->swq[sw_prod].wr_id = wqe->wr_id;
- rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
- rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
-
- memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ rqe = bnxt_qplib_get_qe(&rq->hwq, sw_prod, NULL);
+ memset(rqe, 0, rq->wqe_size);
/* Calculate wqe_size16 and data_len */
for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
@@ -1997,9 +1938,10 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct creq_create_cq_resp resp;
- struct cmdq_create_cq req;
struct bnxt_qplib_pbl *pbl;
+ struct cmdq_create_cq req;
u16 cmd_flags = 0;
+ u32 pg_sz_lvl;
int rc;
hwq_attr.res = res;
@@ -2020,22 +1962,13 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
}
req.dpi = cpu_to_le32(cq->dpi->dpi);
req.cq_handle = cpu_to_le64(cq->cq_handle);
-
req.cq_size = cpu_to_le32(cq->hwq.max_elements);
pbl = &cq->hwq.pbl[PBL_LVL_0];
- req.pg_size_lvl = cpu_to_le32(
- ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
- CMDQ_CREATE_CQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
-
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
+ CMDQ_CREATE_CQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
+ req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
-
req.cq_fco_cnq_id = cpu_to_le32(
(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
CMDQ_CREATE_CQ_CNQ_ID_SFT);
@@ -2194,13 +2127,13 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
{
- struct bnxt_qplib_q *sq = &qp->sq;
- struct bnxt_qplib_swq *swq;
u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
- struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
+ struct bnxt_qplib_q *sq = &qp->sq;
struct cq_req *peek_req_hwcqe;
struct bnxt_qplib_qp *peek_qp;
struct bnxt_qplib_q *peek_sq;
+ struct bnxt_qplib_swq *swq;
+ struct cq_base *peek_hwcqe;
int i, rc = 0;
/* Normal mode */
@@ -2230,9 +2163,8 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
i = cq->hwq.max_elements;
while (i--) {
peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
- peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
- peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
- [CQE_IDX(peek_sw_cq_cons)];
+ peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
+ peek_sw_cq_cons, NULL);
/* If the next hwcqe is VALID */
if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
cq->hwq.max_elements)) {
@@ -2294,11 +2226,11 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe **pcqe, int *budget,
u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
{
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *sq;
- struct bnxt_qplib_cqe *cqe;
u32 sw_sq_cons, cqe_sq_cons;
struct bnxt_qplib_swq *swq;
+ struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *sq;
int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -2408,10 +2340,10 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe **pcqe,
int *budget)
{
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *rq;
struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *rq;
u32 wr_id_idx;
int rc = 0;
@@ -2483,10 +2415,10 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe **pcqe,
int *budget)
{
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *rq;
struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *rq;
u32 wr_id_idx;
int rc = 0;
@@ -2561,15 +2493,13 @@ done:
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
{
- struct cq_base *hw_cqe, **hw_cqe_ptr;
+ struct cq_base *hw_cqe;
u32 sw_cons, raw_cons;
bool rc = true;
raw_cons = cq->hwq.cons;
sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
- hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
- hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
-
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
/* Check for Valid bit. If the CQE is valid, return false */
rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
return rc;
@@ -2813,7 +2743,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num_cqes, struct bnxt_qplib_qp **lib_qp)
{
- struct cq_base *hw_cqe, **hw_cqe_ptr;
+ struct cq_base *hw_cqe;
u32 sw_cons, raw_cons;
int budget, rc = 0;
@@ -2822,8 +2752,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
while (budget) {
sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
- hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
- hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
/* Check for Valid bit */
if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 7edb70b6bb16..568ca390322c 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -45,6 +45,7 @@ struct bnxt_qplib_srq {
struct bnxt_qplib_db_info dbinfo;
u64 srq_handle;
u32 id;
+ u16 wqe_size;
u32 max_wqe;
u32 max_sge;
u32 threshold;
@@ -65,38 +66,7 @@ struct bnxt_qplib_sge {
u32 size;
};
-#define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE sizeof(struct sq_send)
-
-#define SQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE)
-#define SQE_MAX_IDX_PER_PG (SQE_CNT_PER_PG - 1)
-
-static inline u32 get_sqe_pg(u32 val)
-{
- return ((val & ~SQE_MAX_IDX_PER_PG) / SQE_CNT_PER_PG);
-}
-
-static inline u32 get_sqe_idx(u32 val)
-{
- return (val & SQE_MAX_IDX_PER_PG);
-}
-
-#define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE sizeof(struct sq_psn_search)
-
-#define PSNE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE)
-#define PSNE_MAX_IDX_PER_PG (PSNE_CNT_PER_PG - 1)
-
-static inline u32 get_psne_pg(u32 val)
-{
- return ((val & ~PSNE_MAX_IDX_PER_PG) / PSNE_CNT_PER_PG);
-}
-
-static inline u32 get_psne_idx(u32 val)
-{
- return (val & PSNE_MAX_IDX_PER_PG);
-}
-
#define BNXT_QPLIB_QP_MAX_SGL 6
-
struct bnxt_qplib_swq {
u64 wr_id;
int next_idx;
@@ -226,19 +196,13 @@ struct bnxt_qplib_swqe {
};
};
-#define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE sizeof(struct rq_wqe)
-
-#define RQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE)
-#define RQE_MAX_IDX_PER_PG (RQE_CNT_PER_PG - 1)
-#define RQE_PG(x) (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG)
-#define RQE_IDX(x) ((x) & RQE_MAX_IDX_PER_PG)
-
struct bnxt_qplib_q {
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_swq *swq;
struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sg_info;
u32 max_wqe;
+ u16 wqe_size;
u16 q_full_delta;
u16 max_sge;
u32 psn;
@@ -256,7 +220,7 @@ struct bnxt_qplib_qp {
struct bnxt_qplib_dpi *dpi;
struct bnxt_qplib_chip_ctx *cctx;
u64 qp_handle;
-#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
+#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
u32 id;
u8 type;
u8 sig_type;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index f01e864bb611..4e211162acee 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -89,10 +89,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
struct creq_base *resp, void *sb, u8 is_block)
{
struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
- struct bnxt_qplib_cmdqe *cmdqe, **hwq_ptr;
struct bnxt_qplib_hwq *hwq = &cmdq->hwq;
struct bnxt_qplib_crsqe *crsqe;
- u32 cmdq_depth = rcfw->cmdq_depth;
+ struct bnxt_qplib_cmdqe *cmdqe;
u32 sw_prod, cmdq_prod;
struct pci_dev *pdev;
unsigned long flags;
@@ -163,13 +162,11 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
BNXT_QPLIB_CMDQE_UNITS;
}
- hwq_ptr = (struct bnxt_qplib_cmdqe **)hwq->pbl_ptr;
preq = (u8 *)req;
do {
/* Locate the next cmdq slot */
sw_prod = HWQ_CMP(hwq->prod, hwq);
- cmdqe = &hwq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)]
- [get_cmdq_idx(sw_prod, cmdq_depth)];
+ cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL);
if (!cmdqe) {
dev_err(&pdev->dev,
"RCFW request failed with no cmdqe!\n");
@@ -378,7 +375,7 @@ static void bnxt_qplib_service_creq(unsigned long data)
struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
struct bnxt_qplib_hwq *hwq = &creq->hwq;
- struct creq_base *creqe, **hwq_ptr;
+ struct creq_base *creqe;
u32 sw_cons, raw_cons;
unsigned long flags;
@@ -387,8 +384,7 @@ static void bnxt_qplib_service_creq(unsigned long data)
raw_cons = hwq->cons;
while (budget > 0) {
sw_cons = HWQ_CMP(raw_cons, hwq);
- hwq_ptr = (struct creq_base **)hwq->pbl_ptr;
- creqe = &hwq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
+ creqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements))
break;
/* The valid test of the entry must be done first before
@@ -434,7 +430,6 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_rcfw *rcfw = dev_instance;
struct bnxt_qplib_creq_ctx *creq;
- struct creq_base **creq_ptr;
struct bnxt_qplib_hwq *hwq;
u32 sw_cons;
@@ -442,8 +437,7 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
hwq = &creq->hwq;
/* Prefetch the CREQ element */
sw_cons = HWQ_CMP(hwq->cons, hwq);
- creq_ptr = (struct creq_base **)creq->hwq.pbl_ptr;
- prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
+ prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
tasklet_schedule(&creq->creq_tasklet);
@@ -468,29 +462,13 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
return 0;
}
-static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
-{
- return (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
-}
-
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn)
{
- struct cmdq_initialize_fw req;
struct creq_initialize_fw_resp resp;
- u16 cmd_flags = 0, level;
+ struct cmdq_initialize_fw req;
+ u16 cmd_flags = 0;
+ u8 pgsz, lvl;
int rc;
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
@@ -511,32 +489,30 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
goto config_vf_res;
- level = ctx->qpc_tbl.level;
- req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
- level = ctx->mrw_tbl.level;
- req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
- level = ctx->srqc_tbl.level;
- req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
- level = ctx->cq_tbl.level;
- req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
- level = ctx->srqc_tbl.level;
- req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
- level = ctx->cq_tbl.level;
- req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
- level = ctx->tim_tbl.level;
- req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
- level = ctx->tqm_ctx.pde.level;
- req.tqm_pg_size_tqm_lvl =
- (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->tqm_ctx.pde.pbl[level]);
-
+ lvl = ctx->qpc_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->qpc_tbl);
+ req.qpc_pg_size_qpc_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->mrw_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->mrw_tbl);
+ req.mrw_pg_size_mrw_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->srqc_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->srqc_tbl);
+ req.srq_pg_size_srq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->cq_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->cq_tbl);
+ req.cq_pg_size_cq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->tim_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->tim_tbl);
+ req.tim_pg_size_tim_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->tqm_ctx.pde.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->tqm_ctx.pde);
+ req.tqm_pg_size_tqm_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
req.qpc_page_dir =
cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
req.mrw_page_dir =
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 411fce3493b6..157387636d00 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -87,12 +87,6 @@ static inline u32 bnxt_qplib_cmdqe_page_size(u32 depth)
return (bnxt_qplib_cmdqe_npages(depth) * PAGE_SIZE);
}
-static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
-{
- return (bnxt_qplib_cmdqe_page_size(depth) /
- BNXT_QPLIB_CMDQE_UNITS);
-}
-
/* Set the cmd_size to a factor of CMDQE unit */
static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
{
@@ -100,30 +94,12 @@ static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
BNXT_QPLIB_CMDQE_UNITS;
}
-#define MAX_CMDQ_IDX(depth) ((depth) - 1)
-
-static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
-{
- return (bnxt_qplib_cmdqe_cnt_per_pg(depth) - 1);
-}
-
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
#define RCFW_CMD_IS_BLOCKING 0x8000
#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
#define HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK 0x1000900020011ULL
-static inline u32 get_cmdq_pg(u32 val, u32 depth)
-{
- return (val & ~(bnxt_qplib_max_cmdq_idx_per_pg(depth))) /
- (bnxt_qplib_cmdqe_cnt_per_pg(depth));
-}
-
-static inline u32 get_cmdq_idx(u32 val, u32 depth)
-{
- return val & (bnxt_qplib_max_cmdq_idx_per_pg(depth));
-}
-
/* Crsq buf is 1024-Byte */
struct bnxt_qplib_crsbe {
u8 data[1024];
@@ -133,76 +109,9 @@ struct bnxt_qplib_crsbe {
/* Allocate 1 per QP for async error notification for now */
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
#define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
-#define BNXT_QPLIB_CREQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CREQE_UNITS)
-
-#define MAX_CREQ_IDX (BNXT_QPLIB_CREQE_MAX_CNT - 1)
-#define MAX_CREQ_IDX_PER_PG (BNXT_QPLIB_CREQE_CNT_PER_PG - 1)
-
-static inline u32 get_creq_pg(u32 val)
-{
- return (val & ~MAX_CREQ_IDX_PER_PG) / BNXT_QPLIB_CREQE_CNT_PER_PG;
-}
-
-static inline u32 get_creq_idx(u32 val)
-{
- return val & MAX_CREQ_IDX_PER_PG;
-}
-
-#define BNXT_QPLIB_CREQE_PER_PG (PAGE_SIZE / sizeof(struct creq_base))
-
#define CREQ_CMP_VALID(hdr, raw_cons, cp_bit) \
(!!((hdr)->v & CREQ_BASE_V) == \
!((raw_cons) & (cp_bit)))
-
-#define CREQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
-#define CREQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
-#define CREQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
-#define CREQ_DB_CP_FLAGS_REARM (CREQ_DB_KEY_CP | \
- CREQ_DB_IDX_VALID)
-#define CREQ_DB_CP_FLAGS (CREQ_DB_KEY_CP | \
- CREQ_DB_IDX_VALID | \
- CREQ_DB_IRQ_DIS)
-
-static inline void bnxt_qplib_ring_creq_db64(void __iomem *db, u32 index,
- u32 xid, bool arm)
-{
- u64 val = 0;
-
- val = xid & DBC_DBC_XID_MASK;
- val |= DBC_DBC_PATH_ROCE;
- val |= arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
- val <<= 32;
- val |= index & DBC_DBC_INDEX_MASK;
-
- writeq(val, db);
-}
-
-static inline void bnxt_qplib_ring_creq_db_rearm(void __iomem *db, u32 raw_cons,
- u32 max_elements, u32 xid,
- bool gen_p5)
-{
- u32 index = raw_cons & (max_elements - 1);
-
- if (gen_p5)
- bnxt_qplib_ring_creq_db64(db, index, xid, true);
- else
- writel(CREQ_DB_CP_FLAGS_REARM | (index & DBC_DBC32_XID_MASK),
- db);
-}
-
-static inline void bnxt_qplib_ring_creq_db(void __iomem *db, u32 raw_cons,
- u32 max_elements, u32 xid,
- bool gen_p5)
-{
- u32 index = raw_cons & (max_elements - 1);
-
- if (gen_p5)
- bnxt_qplib_ring_creq_db64(db, index, xid, true);
- else
- writel(CREQ_DB_CP_FLAGS | (index & DBC_DBC32_XID_MASK),
- db);
-}
-
#define CREQ_ENTRY_POLL_BUDGET 0x100
/* HWQ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index cab1adf1fed9..7efa6e5dce62 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -347,6 +347,7 @@ done:
hwq->depth = hwq_attr->depth;
hwq->max_elements = depth;
hwq->element_size = stride;
+ hwq->qe_ppg = pg_size / stride;
/* For direct access to the elements */
lvl = hwq->level;
if (hwq_attr->sginfo->nopte && hwq->level)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 95b645dbbc2d..c29cbd3a2d7b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -80,6 +80,15 @@ enum bnxt_qplib_pbl_lvl {
#define ROCE_PG_SIZE_8M (8 * 1024 * 1024)
#define ROCE_PG_SIZE_1G (1024 * 1024 * 1024)
+enum bnxt_qplib_hwrm_pg_size {
+ BNXT_QPLIB_HWRM_PG_SIZE_4K = 0,
+ BNXT_QPLIB_HWRM_PG_SIZE_8K = 1,
+ BNXT_QPLIB_HWRM_PG_SIZE_64K = 2,
+ BNXT_QPLIB_HWRM_PG_SIZE_2M = 3,
+ BNXT_QPLIB_HWRM_PG_SIZE_8M = 4,
+ BNXT_QPLIB_HWRM_PG_SIZE_1G = 5,
+};
+
struct bnxt_qplib_reg_desc {
u8 bar_id;
resource_size_t bar_base;
@@ -126,6 +135,7 @@ struct bnxt_qplib_hwq {
u32 max_elements;
u32 depth;
u16 element_size; /* Size of each entry */
+ u16 qe_ppg; /* queue entry per page */
u32 prod; /* raw */
u32 cons; /* raw */
@@ -263,6 +273,49 @@ static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
}
+static inline u8 bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq *hwq)
+{
+ u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
+ struct bnxt_qplib_pbl *pbl;
+
+ pbl = &hwq->pbl[PBL_LVL_0];
+ switch (pbl->pg_size) {
+ case ROCE_PG_SIZE_4K:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
+ break;
+ case ROCE_PG_SIZE_8K:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K;
+ break;
+ case ROCE_PG_SIZE_64K:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K;
+ break;
+ case ROCE_PG_SIZE_2M:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M;
+ break;
+ case ROCE_PG_SIZE_8M:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M;
+ break;
+ case ROCE_PG_SIZE_1G:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G;
+ break;
+ default:
+ break;
+ }
+
+ return pg_size;
+}
+
+static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq,
+ u32 indx, u64 *pg)
+{
+ u32 pg_num, pg_idx;
+
+ pg_num = (indx / hwq->qe_ppg);
+ pg_idx = (indx % hwq->qe_ppg);
+ if (pg)
+ *pg = (u64)&hwq->pbl_ptr[pg_num];
+ return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx);
+}
#define to_bnxt_qplib(ptr, type, member) \
container_of(ptr, type, member)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 66954ff6a2f2..4cd475ea97a2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -132,9 +132,6 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
attr->max_ah = le32_to_cpu(sb->max_ah);
- attr->max_fmr = le32_to_cpu(sb->max_fmr);
- attr->max_map_per_fmr = sb->max_map_per_fmr;
-
attr->max_srq = le16_to_cpu(sb->max_srq);
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
attr->max_srq_sges = sb->max_srq_sge;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 13d9432d5ce2..6404f0da1051 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -64,8 +64,6 @@ struct bnxt_qplib_dev_attr {
u32 max_mw;
u32 max_raw_ethy_qp;
u32 max_ah;
- u32 max_fmr;
- u32 max_map_per_fmr;
u32 max_srq;
u32 max_srq_wqes;
u32 max_srq_sges;
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index e4b09e7c2175..6f00f07420b7 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -210,6 +210,20 @@ struct sq_send {
__le32 data[24];
};
+/* sq_send_hdr (size:256b/32B) */
+struct sq_send_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8_1;
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 q_key;
+ __le32 dst_qp;
+ __le32 avid;
+ __le64 reserved64;
+};
+
/* Send Raw Ethernet and QP1 SQ WQE (40 bytes) */
struct sq_send_raweth_qp1 {
u8 wqe_type;
@@ -265,6 +279,21 @@ struct sq_send_raweth_qp1 {
__le32 data[24];
};
+/* sq_send_raweth_qp1_hdr (size:256b/32B) */
+struct sq_send_raweth_qp1_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le16 lflags;
+ __le16 cfa_action;
+ __le32 length;
+ __le32 reserved32_1;
+ __le32 cfa_meta;
+ __le32 reserved32_2;
+ __le64 reserved64;
+};
+
/* RDMA SQ WQE (40 bytes) */
struct sq_rdma {
u8 wqe_type;
@@ -288,6 +317,20 @@ struct sq_rdma {
__le32 data[24];
};
+/* sq_rdma_hdr (size:256b/32B) */
+struct sq_rdma_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 imm_data;
+ __le32 length;
+ __le32 reserved32_1;
+ __le64 remote_va;
+ __le32 remote_key;
+ __le32 reserved32_2;
+};
+
/* Atomic SQ WQE (40 bytes) */
struct sq_atomic {
u8 wqe_type;
@@ -307,6 +350,17 @@ struct sq_atomic {
__le32 data[24];
};
+/* sq_atomic_hdr (size:256b/32B) */
+struct sq_atomic_hdr {
+ u8 wqe_type;
+ u8 flags;
+ __le16 reserved16;
+ __le32 remote_key;
+ __le64 remote_va;
+ __le64 swap_data;
+ __le64 cmp_data;
+};
+
/* Local Invalidate SQ WQE (40 bytes) */
struct sq_localinvalidate {
u8 wqe_type;
@@ -324,6 +378,16 @@ struct sq_localinvalidate {
__le32 data[24];
};
+/* sq_localinvalidate_hdr (size:256b/32B) */
+struct sq_localinvalidate_hdr {
+ u8 wqe_type;
+ u8 flags;
+ __le16 reserved16;
+ __le32 inv_l_key;
+ __le64 reserved64;
+ u8 reserved128[16];
+};
+
/* FR-PMR SQ WQE (40 bytes) */
struct sq_fr_pmr {
u8 wqe_type;
@@ -380,6 +444,21 @@ struct sq_fr_pmr {
__le32 data[24];
};
+/* sq_fr_pmr_hdr (size:256b/32B) */
+struct sq_fr_pmr_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 access_cntl;
+ u8 zero_based_page_size_log;
+ __le32 l_key;
+ u8 length[5];
+ u8 reserved8_1;
+ u8 reserved8_2;
+ u8 numlevels_pbl_page_size_log;
+ __le64 pblptr;
+ __le64 va;
+};
+
/* Bind SQ WQE (40 bytes) */
struct sq_bind {
u8 wqe_type;
@@ -417,6 +496,22 @@ struct sq_bind {
#define SQ_BIND_DATA_SFT 0
};
+/* sq_bind_hdr (size:256b/32B) */
+struct sq_bind_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 access_cntl;
+ u8 reserved8_1;
+ u8 mw_type_zero_based;
+ u8 reserved8_2;
+ __le16 reserved16;
+ __le32 parent_l_key;
+ __le32 l_key;
+ __le64 va;
+ u8 length[5];
+ u8 reserved24[3];
+};
+
/* RQ/SRQ WQE Structures */
/* RQ/SRQ WQE (40 bytes) */
struct rq_wqe {
@@ -435,6 +530,17 @@ struct rq_wqe {
__le32 data[24];
};
+/* rq_wqe_hdr (size:256b/32B) */
+struct rq_wqe_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 reserved32;
+ __le32 wr_id[2];
+ u8 reserved128[16];
+};
+
/* CQ CQE Structures */
/* Base CQE (32 bytes) */
struct cq_base {
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 599340c1f0b8..541dbcf22d0e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -953,6 +953,7 @@ void c4iw_dealloc(struct uld_ctx *ctx)
static void c4iw_remove(struct uld_ctx *ctx)
{
pr_debug("c4iw_dev %p\n", ctx->dev);
+ debugfs_remove_recursive(ctx->dev->debugfs_root);
c4iw_unregister_device(ctx->dev);
c4iw_dealloc(ctx);
}
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index aa7396a1588a..1889dd172a25 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_H_
@@ -40,6 +40,7 @@ struct efa_sw_stats {
atomic64_t reg_mr_err;
atomic64_t alloc_ucontext_err;
atomic64_t create_ah_err;
+ atomic64_t mmap_err;
};
/* Don't use anything other than atomic64 */
@@ -153,8 +154,7 @@ int efa_mmap(struct ib_ucontext *ibucontext,
struct vm_area_struct *vma);
void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int efa_create_ah(struct ib_ah *ibah,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
+ struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
void efa_destroy_ah(struct ib_ah *ibah, u32 flags);
int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 96b104ab5415..bef2bd291054 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -37,7 +37,7 @@ enum efa_admin_aq_feature_id {
EFA_ADMIN_NETWORK_ATTR = 3,
EFA_ADMIN_QUEUE_ATTR = 4,
EFA_ADMIN_HW_HINTS = 5,
- EFA_ADMIN_FEATURES_OPCODE_NUM = 8,
+ EFA_ADMIN_HOST_INFO = 6,
};
/* QP transport type */
@@ -799,6 +799,54 @@ struct efa_admin_mmio_req_read_less_resp {
u32 reg_val;
};
+enum efa_admin_os_type {
+ EFA_ADMIN_OS_LINUX = 0,
+};
+
+struct efa_admin_host_info {
+ /* OS distribution string format */
+ u8 os_dist_str[128];
+
+ /* Defined in enum efa_admin_os_type */
+ u32 os_type;
+
+ /* Kernel version string format */
+ u8 kernel_ver_str[32];
+
+ /* Kernel version numeric format */
+ u32 kernel_ver;
+
+ /*
+ * 7:0 : driver_module_type
+ * 15:8 : driver_sub_minor
+ * 23:16 : driver_minor
+ * 31:24 : driver_major
+ */
+ u32 driver_ver;
+
+ /*
+ * Device's Bus, Device and Function
+ * 2:0 : function
+ * 7:3 : device
+ * 15:8 : bus
+ */
+ u16 bdf;
+
+ /*
+ * Spec version
+ * 7:0 : spec_minor
+ * 15:8 : spec_major
+ */
+ u16 spec_ver;
+
+ /*
+ * 0 : intree - Intree driver
+ * 1 : gdr - GPUDirect RDMA supported
+ * 31:2 : reserved2
+ */
+ u32 flags;
+};
+
/* create_qp_cmd */
#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0)
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1)
@@ -820,4 +868,17 @@ struct efa_admin_mmio_req_read_less_resp {
/* feature_device_attr_desc */
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
+/* host_info */
+#define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK GENMASK(7, 0)
+#define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK GENMASK(15, 8)
+#define EFA_ADMIN_HOST_INFO_DRIVER_MINOR_MASK GENMASK(23, 16)
+#define EFA_ADMIN_HOST_INFO_DRIVER_MAJOR_MASK GENMASK(31, 24)
+#define EFA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0)
+#define EFA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
+#define EFA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
+#define EFA_ADMIN_HOST_INFO_SPEC_MINOR_MASK GENMASK(7, 0)
+#define EFA_ADMIN_HOST_INFO_SPEC_MAJOR_MASK GENMASK(15, 8)
+#define EFA_ADMIN_HOST_INFO_INTREE_MASK BIT(0)
+#define EFA_ADMIN_HOST_INFO_GDR_MASK BIT(1)
+
#endif /* _EFA_ADMIN_CMDS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 7fce69f5568f..336bc2c57bb1 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -631,17 +631,20 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
up(&aq->avail_cmds);
+ atomic64_inc(&aq->stats.cmd_err);
return PTR_ERR(comp_ctx);
}
err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
- if (err)
+ if (err) {
ibdev_err_ratelimited(
aq->efa_dev,
"Failed to process command %s (opcode %u) comp_status %d err %d\n",
efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
cmd->aq_common_descriptor.opcode, comp_ctx->comp_status,
err);
+ atomic64_inc(&aq->stats.cmd_err);
+ }
up(&aq->avail_cmds);
diff --git a/drivers/infiniband/hw/efa/efa_com.h b/drivers/infiniband/hw/efa/efa_com.h
index c67dd8109d1c..5e4c88877ddb 100644
--- a/drivers/infiniband/hw/efa/efa_com.h
+++ b/drivers/infiniband/hw/efa/efa_com.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_H_
@@ -47,6 +47,7 @@ struct efa_com_admin_sq {
struct efa_com_stats_admin {
atomic64_t submitted_cmd;
atomic64_t completed_cmd;
+ atomic64_t cmd_err;
atomic64_t no_completion;
};
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index eea5574a62e8..fabd8df2e78f 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -351,7 +351,7 @@ int efa_com_destroy_ah(struct efa_com_dev *edev,
return 0;
}
-static bool
+bool
efa_com_check_supported_feature_id(struct efa_com_dev *edev,
enum efa_admin_aq_feature_id feature_id)
{
@@ -388,7 +388,7 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
if (control_buff_size)
EFA_SET(&get_cmd.aq_common_descriptor.flags,
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
efa_com_set_dma_addr(control_buf_dma_addr,
&get_cmd.control_buffer.address.mem_addr_high,
@@ -517,12 +517,12 @@ int efa_com_get_hw_hints(struct efa_com_dev *edev,
return 0;
}
-static int efa_com_set_feature_ex(struct efa_com_dev *edev,
- struct efa_admin_set_feature_resp *set_resp,
- struct efa_admin_set_feature_cmd *set_cmd,
- enum efa_admin_aq_feature_id feature_id,
- dma_addr_t control_buf_dma_addr,
- u32 control_buff_size)
+int efa_com_set_feature_ex(struct efa_com_dev *edev,
+ struct efa_admin_set_feature_resp *set_resp,
+ struct efa_admin_set_feature_cmd *set_cmd,
+ enum efa_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size)
{
struct efa_com_admin_queue *aq;
int err;
@@ -540,7 +540,7 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
if (control_buff_size) {
set_cmd->aq_common_descriptor.flags = 0;
EFA_SET(&set_cmd->aq_common_descriptor.flags,
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
efa_com_set_dma_addr(control_buf_dma_addr,
&set_cmd->control_buffer.address.mem_addr_high,
&set_cmd->control_buffer.address.mem_addr_low);
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 31db5a0cbd5b..41ce4a476ee6 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_CMD_H_
@@ -270,6 +270,15 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result);
int efa_com_get_hw_hints(struct efa_com_dev *edev,
struct efa_com_get_hw_hints_result *result);
+bool
+efa_com_check_supported_feature_id(struct efa_com_dev *edev,
+ enum efa_admin_aq_feature_id feature_id);
+int efa_com_set_feature_ex(struct efa_com_dev *edev,
+ struct efa_admin_set_feature_resp *set_resp,
+ struct efa_admin_set_feature_cmd *set_cmd,
+ enum efa_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size);
int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups);
int efa_com_alloc_pd(struct efa_com_dev *edev,
struct efa_com_alloc_pd_result *result);
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index faf3ff1bca2a..82145574c928 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
#include <rdma/ib_user_verbs.h>
@@ -187,6 +189,52 @@ static void efa_stats_init(struct efa_dev *dev)
atomic64_set(s, 0);
}
+static void efa_set_host_info(struct efa_dev *dev)
+{
+ struct efa_admin_set_feature_resp resp = {};
+ struct efa_admin_set_feature_cmd cmd = {};
+ struct efa_admin_host_info *hinf;
+ u32 bufsz = sizeof(*hinf);
+ dma_addr_t hinf_dma;
+
+ if (!efa_com_check_supported_feature_id(&dev->edev,
+ EFA_ADMIN_HOST_INFO))
+ return;
+
+ /* Failures in host info set shall not disturb probe */
+ hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma,
+ GFP_KERNEL);
+ if (!hinf)
+ return;
+
+ strlcpy(hinf->os_dist_str, utsname()->release,
+ min(sizeof(hinf->os_dist_str), sizeof(utsname()->release)));
+ hinf->os_type = EFA_ADMIN_OS_LINUX;
+ strlcpy(hinf->kernel_ver_str, utsname()->version,
+ min(sizeof(hinf->kernel_ver_str), sizeof(utsname()->version)));
+ hinf->kernel_ver = LINUX_VERSION_CODE;
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0);
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0);
+ EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number);
+ EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE,
+ PCI_SLOT(dev->pdev->devfn));
+ EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION,
+ PCI_FUNC(dev->pdev->devfn));
+ EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR,
+ EFA_COMMON_SPEC_VERSION_MAJOR);
+ EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR,
+ EFA_COMMON_SPEC_VERSION_MINOR);
+ EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1);
+ EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0);
+
+ efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO,
+ hinf_dma, bufsz);
+
+ dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
+}
+
static const struct ib_device_ops efa_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_EFA,
@@ -251,6 +299,8 @@ static int efa_ib_device_add(struct efa_dev *dev)
if (err)
goto err_release_doorbell_bar;
+ efa_set_host_info(dev);
+
dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = 1;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 5c57098a4aee..08313f7c73bc 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -37,13 +37,16 @@ struct efa_user_mmap_entry {
op(EFA_RX_DROPS, "rx_drops") \
op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
op(EFA_COMPLETED_CMDS, "completed_cmds") \
+ op(EFA_CMDS_ERR, "cmds_err") \
op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
op(EFA_CREATE_QP_ERR, "create_qp_err") \
+ op(EFA_CREATE_CQ_ERR, "create_cq_err") \
op(EFA_REG_MR_ERR, "reg_mr_err") \
op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
- op(EFA_CREATE_AH_ERR, "create_ah_err")
+ op(EFA_CREATE_AH_ERR, "create_ah_err") \
+ op(EFA_MMAP_ERR, "mmap_err")
#define EFA_STATS_ENUM(ename, name) ename,
#define EFA_STATS_STR(ename, name) [ename] = name,
@@ -1568,6 +1571,7 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
ibdev_dbg(&dev->ibdev,
"pgoff[%#lx] does not have valid entry\n",
vma->vm_pgoff);
+ atomic64_inc(&dev->stats.sw_stats.mmap_err);
return -EINVAL;
}
entry = to_emmap(rdma_entry);
@@ -1603,12 +1607,14 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
err = -EINVAL;
}
- if (err)
+ if (err) {
ibdev_dbg(
&dev->ibdev,
"Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
entry->address, rdma_entry->npages * PAGE_SIZE,
entry->mmap_flag, err);
+ atomic64_inc(&dev->stats.sw_stats.mmap_err);
+ }
rdma_user_mmap_entry_put(rdma_entry);
return err;
@@ -1639,10 +1645,10 @@ static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
}
int efa_create_ah(struct ib_ah *ibah,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
+ struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
struct efa_dev *dev = to_edev(ibah->device);
struct efa_com_create_ah_params params = {};
struct efa_ibv_create_ah_resp resp = {};
@@ -1650,7 +1656,7 @@ int efa_create_ah(struct ib_ah *ibah,
struct efa_ah *ah = to_eah(ibah);
int err;
- if (!(flags & RDMA_CREATE_AH_SLEEPABLE)) {
+ if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
ibdev_dbg(&dev->ibdev,
"Create address handle is not supported in atomic context\n");
err = -EOPNOTSUPP;
@@ -1747,15 +1753,18 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
as = &dev->edev.aq.stats;
stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
+ stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
s = &dev->stats;
stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
+ stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->sw_stats.create_cq_err);
stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
+ stats->value[EFA_MMAP_ERR] = atomic64_read(&s->sw_stats.mmap_err);
return ARRAY_SIZE(efa_stats_names);
}
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 0405d26d0833..2e89ec10efed 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -22,9 +22,13 @@ hfi1-y := \
init.o \
intr.o \
iowait.o \
+ ipoib_main.o \
+ ipoib_rx.o \
+ ipoib_tx.o \
mad.o \
mmu_rb.o \
msix.o \
+ netdev_rx.o \
opfn.o \
pcie.o \
pio.o \
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 1aeea5d65c01..2a91b8d95e12 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -64,6 +64,7 @@ struct hfi1_affinity_node_list node_affinity = {
static const char * const irq_type_names[] = {
"SDMA",
"RCVCTXT",
+ "NETDEVCTXT",
"GENERAL",
"OTHER",
};
@@ -915,6 +916,11 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
set = &entry->rcv_intr;
scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
break;
+ case IRQ_NETDEVCTXT:
+ rcd = (struct hfi1_ctxtdata *)msix->arg;
+ set = &entry->def_intr;
+ scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
+ break;
default:
dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
return -EINVAL;
@@ -987,6 +993,10 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
if (rcd->ctxt != HFI1_CTRL_CTXT)
set = &entry->rcv_intr;
break;
+ case IRQ_NETDEVCTXT:
+ rcd = (struct hfi1_ctxtdata *)msix->arg;
+ set = &entry->def_intr;
+ break;
default:
mutex_unlock(&node_affinity.lock);
return;
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 6a7e6ea4e426..f94ed5d7c7a3 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -52,6 +52,7 @@
enum irq_type {
IRQ_SDMA,
IRQ_RCVCTXT,
+ IRQ_NETDEVCTXT,
IRQ_GENERAL,
IRQ_OTHER
};
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e0b1238d31df..15f9c635f292 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -66,10 +66,7 @@
#include "affinity.h"
#include "debugfs.h"
#include "fault.h"
-
-uint kdeth_qp;
-module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
-MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
+#include "netdev.h"
uint num_vls = HFI1_MAX_VLS_SUPPORTED;
module_param(num_vls, uint, S_IRUGO);
@@ -128,13 +125,15 @@ struct flag_table {
/*
* RSM instance allocation
- * 0 - Verbs
- * 1 - User Fecn Handling
- * 2 - Vnic
+ * 0 - User Fecn Handling
+ * 1 - Vnic
+ * 2 - AIP
+ * 3 - Verbs
*/
-#define RSM_INS_VERBS 0
-#define RSM_INS_FECN 1
-#define RSM_INS_VNIC 2
+#define RSM_INS_FECN 0
+#define RSM_INS_VNIC 1
+#define RSM_INS_AIP 2
+#define RSM_INS_VERBS 3
/* Bit offset into the GUID which carries HFI id information */
#define GUID_HFI_INDEX_SHIFT 39
@@ -175,6 +174,25 @@ struct flag_table {
/* QPN[m+n:1] QW 1, OFFSET 1 */
#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
+/* RSM fields for AIP */
+/* LRH.BTH above is reused for this rule */
+
+/* BTH.DESTQP: QW 1, OFFSET 16 for match */
+#define BTH_DESTQP_QW 1ull
+#define BTH_DESTQP_BIT_OFFSET 16ull
+#define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off))
+#define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET)
+#define BTH_DESTQP_MASK 0xFFull
+#define BTH_DESTQP_VALUE 0x81ull
+
+/* DETH.SQPN: QW 1 Offset 56 for select */
+/* We use 8 most significant Soure QPN bits as entropy fpr AIP */
+#define DETH_AIP_SQPN_QW 3ull
+#define DETH_AIP_SQPN_BIT_OFFSET 56ull
+#define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off))
+#define DETH_AIP_SQPN_SELECT_OFFSET \
+ DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET)
+
/* RSM fields for Vnic */
/* L2_TYPE: QW 0, OFFSET 61 - for match */
#define L2_TYPE_QW 0ull
@@ -8463,6 +8481,49 @@ static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
local_irq_restore(flags);
}
+/**
+ * hfi1_netdev_rx_napi - napi poll function to move eoi inline
+ * @napi - pointer to napi object
+ * @budget - netdev budget
+ */
+int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct hfi1_netdev_rxq *rxq = container_of(napi,
+ struct hfi1_netdev_rxq, napi);
+ struct hfi1_ctxtdata *rcd = rxq->rcd;
+ int work_done = 0;
+
+ work_done = rcd->do_interrupt(rcd, budget);
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ hfi1_rcd_eoi_intr(rcd);
+ }
+
+ return work_done;
+}
+
+/* Receive packet napi handler for netdevs VNIC and AIP */
+irqreturn_t receive_context_interrupt_napi(int irq, void *data)
+{
+ struct hfi1_ctxtdata *rcd = data;
+
+ receive_interrupt_common(rcd);
+
+ if (likely(rcd->napi)) {
+ if (likely(napi_schedule_prep(rcd->napi)))
+ __napi_schedule_irqoff(rcd->napi);
+ else
+ __hfi1_rcd_eoi_intr(rcd);
+ } else {
+ WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n",
+ rcd->ctxt);
+ __hfi1_rcd_eoi_intr(rcd);
+ }
+
+ return IRQ_HANDLED;
+}
+
/*
* Receive packet IRQ handler. This routine expects to be on its own IRQ.
* This routine will try to handle packets immediately (latency), but if
@@ -13330,13 +13391,12 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
* in array of contexts
* freectxts - number of free user contexts
* num_send_contexts - number of PIO send contexts being used
- * num_vnic_contexts - number of contexts reserved for VNIC
+ * num_netdev_contexts - number of contexts reserved for netdev
*/
static int set_up_context_variables(struct hfi1_devdata *dd)
{
unsigned long num_kernel_contexts;
- u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
- int total_contexts;
+ u16 num_netdev_contexts;
int ret;
unsigned ngroups;
int rmt_count;
@@ -13373,13 +13433,6 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
num_kernel_contexts = send_contexts - num_vls - 1;
}
- /* Accommodate VNIC contexts if possible */
- if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
- dd_dev_err(dd, "No receive contexts available for VNIC\n");
- num_vnic_contexts = 0;
- }
- total_contexts = num_kernel_contexts + num_vnic_contexts;
-
/*
* User contexts:
* - default to 1 user context per real (non-HT) CPU core if
@@ -13392,28 +13445,32 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
/*
* Adjust the counts given a global max.
*/
- if (total_contexts + n_usr_ctxts > rcv_contexts) {
+ if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) {
dd_dev_err(dd,
- "Reducing # user receive contexts to: %d, from %u\n",
- rcv_contexts - total_contexts,
+ "Reducing # user receive contexts to: %u, from %u\n",
+ (u32)(rcv_contexts - num_kernel_contexts),
n_usr_ctxts);
/* recalculate */
- n_usr_ctxts = rcv_contexts - total_contexts;
+ n_usr_ctxts = rcv_contexts - num_kernel_contexts;
}
+ num_netdev_contexts =
+ hfi1_num_netdev_contexts(dd, rcv_contexts -
+ (num_kernel_contexts + n_usr_ctxts),
+ &node_affinity.real_cpu_mask);
/*
* The RMT entries are currently allocated as shown below:
* 1. QOS (0 to 128 entries);
* 2. FECN (num_kernel_context - 1 + num_user_contexts +
- * num_vnic_contexts);
- * 3. VNIC (num_vnic_contexts).
- * It should be noted that FECN oversubscribe num_vnic_contexts
- * entries of RMT because both VNIC and PSM could allocate any receive
+ * num_netdev_contexts);
+ * 3. netdev (num_netdev_contexts).
+ * It should be noted that FECN oversubscribe num_netdev_contexts
+ * entries of RMT because both netdev and PSM could allocate any receive
* context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
* and PSM FECN must reserve an RMT entry for each possible PSM receive
* context.
*/
- rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+ rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2);
if (HFI1_CAP_IS_KSET(TID_RDMA))
rmt_count += num_kernel_contexts - 1;
if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
@@ -13426,21 +13483,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
n_usr_ctxts = user_rmt_reduced;
}
- total_contexts += n_usr_ctxts;
-
- /* the first N are kernel contexts, the rest are user/vnic contexts */
- dd->num_rcv_contexts = total_contexts;
+ /* the first N are kernel contexts, the rest are user/netdev contexts */
+ dd->num_rcv_contexts =
+ num_kernel_contexts + n_usr_ctxts + num_netdev_contexts;
dd->n_krcv_queues = num_kernel_contexts;
dd->first_dyn_alloc_ctxt = num_kernel_contexts;
- dd->num_vnic_contexts = num_vnic_contexts;
+ dd->num_netdev_contexts = num_netdev_contexts;
dd->num_user_contexts = n_usr_ctxts;
dd->freectxts = n_usr_ctxts;
dd_dev_info(dd,
- "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
+ "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n",
rcv_contexts,
(int)dd->num_rcv_contexts,
(int)dd->n_krcv_queues,
- dd->num_vnic_contexts,
+ dd->num_netdev_contexts,
dd->num_user_contexts);
/*
@@ -14119,21 +14175,12 @@ static void init_early_variables(struct hfi1_devdata *dd)
static void init_kdeth_qp(struct hfi1_devdata *dd)
{
- /* user changed the KDETH_QP */
- if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
- /* out of range or illegal value */
- dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
- kdeth_qp = 0;
- }
- if (kdeth_qp == 0) /* not set, or failed range check */
- kdeth_qp = DEFAULT_KDETH_QP;
-
write_csr(dd, SEND_BTH_QP,
- (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
+ (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) <<
SEND_BTH_QP_KDETH_QP_SHIFT);
write_csr(dd, RCV_BTH_QP,
- (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
+ (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) <<
RCV_BTH_QP_KDETH_QP_SHIFT);
}
@@ -14249,6 +14296,12 @@ static void complete_rsm_map_table(struct hfi1_devdata *dd,
}
}
+/* Is a receive side mapping rule */
+static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
+{
+ return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0;
+}
+
/*
* Add a receive side mapping rule.
*/
@@ -14485,77 +14538,138 @@ static void init_fecn_handling(struct hfi1_devdata *dd,
rmt->used += total_cnt;
}
-/* Initialize RSM for VNIC */
-void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
+static inline bool hfi1_is_rmt_full(int start, int spare)
+{
+ return (start + spare) > NUM_MAP_ENTRIES;
+}
+
+static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd)
{
u8 i, j;
u8 ctx_id = 0;
u64 reg;
u32 regoff;
- struct rsm_rule_data rrd;
+ int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
+ int ctxt_count = hfi1_netdev_ctxt_count(dd);
- if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
- dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
- dd->vnic.rmt_start);
- return;
+ /* We already have contexts mapped in RMT */
+ if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) {
+ dd_dev_info(dd, "Contexts are already mapped in RMT\n");
+ return true;
+ }
+
+ if (hfi1_is_rmt_full(rmt_start, NUM_NETDEV_MAP_ENTRIES)) {
+ dd_dev_err(dd, "Not enough RMT entries used = %d\n",
+ rmt_start);
+ return false;
}
- dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
- dd->vnic.rmt_start,
- dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
+ dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n",
+ rmt_start,
+ rmt_start + NUM_NETDEV_MAP_ENTRIES);
/* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
- regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
+ regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8;
reg = read_csr(dd, regoff);
- for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
- /* Update map register with vnic context */
- j = (dd->vnic.rmt_start + i) % 8;
+ for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) {
+ /* Update map register with netdev context */
+ j = (rmt_start + i) % 8;
reg &= ~(0xffllu << (j * 8));
- reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
- /* Wrap up vnic ctx index */
- ctx_id %= dd->vnic.num_ctxt;
+ reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8);
+ /* Wrap up netdev ctx index */
+ ctx_id %= ctxt_count;
/* Write back map register */
- if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
+ if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) {
dev_dbg(&(dd)->pcidev->dev,
- "Vnic rsm map reg[%d] =0x%llx\n",
+ "RMT[%d] =0x%llx\n",
regoff - RCV_RSM_MAP_TABLE, reg);
write_csr(dd, regoff, reg);
regoff += 8;
- if (i < (NUM_VNIC_MAP_ENTRIES - 1))
+ if (i < (NUM_NETDEV_MAP_ENTRIES - 1))
reg = read_csr(dd, regoff);
}
}
- /* Add rule for vnic */
- rrd.offset = dd->vnic.rmt_start;
- rrd.pkt_type = 4;
- /* Match 16B packets */
- rrd.field1_off = L2_TYPE_MATCH_OFFSET;
- rrd.mask1 = L2_TYPE_MASK;
- rrd.value1 = L2_16B_VALUE;
- /* Match ETH L4 packets */
- rrd.field2_off = L4_TYPE_MATCH_OFFSET;
- rrd.mask2 = L4_16B_TYPE_MASK;
- rrd.value2 = L4_16B_ETH_VALUE;
- /* Calc context from veswid and entropy */
- rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
- rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
- rrd.index2_off = L2_16B_ENTROPY_OFFSET;
- rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
- add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
-
- /* Enable RSM if not already enabled */
+ return true;
+}
+
+static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd,
+ int rule, struct rsm_rule_data *rrd)
+{
+ if (!hfi1_netdev_update_rmt(dd)) {
+ dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule);
+ return;
+ }
+
+ add_rsm_rule(dd, rule, rrd);
add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
}
+void hfi1_init_aip_rsm(struct hfi1_devdata *dd)
+{
+ /*
+ * go through with the initialisation only if this rule actually doesn't
+ * exist yet
+ */
+ if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) {
+ int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
+ struct rsm_rule_data rrd = {
+ .offset = rmt_start,
+ .pkt_type = IB_PACKET_TYPE,
+ .field1_off = LRH_BTH_MATCH_OFFSET,
+ .mask1 = LRH_BTH_MASK,
+ .value1 = LRH_BTH_VALUE,
+ .field2_off = BTH_DESTQP_MATCH_OFFSET,
+ .mask2 = BTH_DESTQP_MASK,
+ .value2 = BTH_DESTQP_VALUE,
+ .index1_off = DETH_AIP_SQPN_SELECT_OFFSET +
+ ilog2(NUM_NETDEV_MAP_ENTRIES),
+ .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
+ .index2_off = DETH_AIP_SQPN_SELECT_OFFSET,
+ .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
+ };
+
+ hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd);
+ }
+}
+
+/* Initialize RSM for VNIC */
+void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
+{
+ int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
+ struct rsm_rule_data rrd = {
+ /* Add rule for vnic */
+ .offset = rmt_start,
+ .pkt_type = 4,
+ /* Match 16B packets */
+ .field1_off = L2_TYPE_MATCH_OFFSET,
+ .mask1 = L2_TYPE_MASK,
+ .value1 = L2_16B_VALUE,
+ /* Match ETH L4 packets */
+ .field2_off = L4_TYPE_MATCH_OFFSET,
+ .mask2 = L4_16B_TYPE_MASK,
+ .value2 = L4_16B_ETH_VALUE,
+ /* Calc context from veswid and entropy */
+ .index1_off = L4_16B_HDR_VESWID_OFFSET,
+ .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
+ .index2_off = L2_16B_ENTROPY_OFFSET,
+ .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
+ };
+
+ hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd);
+}
+
void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
{
clear_rsm_rule(dd, RSM_INS_VNIC);
+}
- /* Disable RSM if used only by vnic */
- if (dd->vnic.rmt_start == 0)
- clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
+void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd)
+{
+ /* only actually clear the rule if it's the last user asking to do so */
+ if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1)
+ clear_rsm_rule(dd, RSM_INS_AIP);
}
static int init_rxe(struct hfi1_devdata *dd)
@@ -14574,8 +14688,8 @@ static int init_rxe(struct hfi1_devdata *dd)
init_qos(dd, rmt);
init_fecn_handling(dd, rmt);
complete_rsm_map_table(dd, rmt);
- /* record number of used rsm map entries for vnic */
- dd->vnic.rmt_start = rmt->used;
+ /* record number of used rsm map entries for netdev */
+ hfi1_netdev_set_free_rmt_idx(dd, rmt->used);
kfree(rmt);
/*
@@ -15129,6 +15243,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
(dd->revision >> CCE_REVISION_SW_SHIFT)
& CCE_REVISION_SW_MASK);
+ /* alloc netdev data */
+ if (hfi1_netdev_alloc(dd))
+ goto bail_cleanup;
+
ret = set_up_context_variables(dd);
if (ret)
goto bail_cleanup;
@@ -15229,6 +15347,7 @@ bail_clear_intr:
hfi1_comp_vectors_clean_up(dd);
msix_clean_up_interrupts(dd);
bail_cleanup:
+ hfi1_netdev_free(dd);
hfi1_pcie_ddcleanup(dd);
bail_free:
hfi1_free_devdata(dd);
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 725509261016..2c6f2de74d4d 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -1,7 +1,7 @@
#ifndef _CHIP_H
#define _CHIP_H
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -1447,6 +1447,7 @@ irqreturn_t general_interrupt(int irq, void *data);
irqreturn_t sdma_interrupt(int irq, void *data);
irqreturn_t receive_context_interrupt(int irq, void *data);
irqreturn_t receive_context_thread(int irq, void *data);
+irqreturn_t receive_context_interrupt_napi(int irq, void *data);
int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set);
void init_qsfp_int(struct hfi1_devdata *dd);
@@ -1455,6 +1456,8 @@ void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);
void reset_interrupts(struct hfi1_devdata *dd);
u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx);
+void hfi1_init_aip_rsm(struct hfi1_devdata *dd);
+void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd);
/*
* Interrupt source table.
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index 40a1ff0c8a8e..ff423e546b80 100644
--- a/drivers/infiniband/hw/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -72,13 +72,6 @@
* compilation unit
*/
-/*
- * If a packet's QP[23:16] bits match this value, then it is
- * a PSM packet and the hardware will expect a KDETH header
- * following the BTH.
- */
-#define DEFAULT_KDETH_QP 0x80
-
/* driver/hw feature set bitmask */
#define HFI1_CAP_USER_SHIFT 24
#define HFI1_CAP_MASK ((1UL << HFI1_CAP_USER_SHIFT) - 1)
@@ -149,7 +142,8 @@
HFI1_CAP_NO_INTEGRITY | \
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_TID_RDMA | \
- HFI1_CAP_OPFN) << \
+ HFI1_CAP_OPFN | \
+ HFI1_CAP_AIP) << \
HFI1_CAP_USER_SHIFT)
/*
* Set of capabilities that need to be enabled for kernel context in
@@ -166,6 +160,7 @@
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_MULTI_PKT_EGR | \
HFI1_CAP_EXTENDED_PSN | \
+ HFI1_CAP_AIP | \
((HFI1_CAP_HDRSUPP | \
HFI1_CAP_MULTI_PKT_EGR | \
HFI1_CAP_STATIC_RATE_CTRL | \
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 049d15befe58..a40701a6e1b6 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2018 Intel Corporation.
+ * Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -54,6 +54,7 @@
#include <linux/module.h>
#include <linux/prefetch.h>
#include <rdma/ib_verbs.h>
+#include <linux/etherdevice.h>
#include "hfi.h"
#include "trace.h"
@@ -63,6 +64,9 @@
#include "vnic.h"
#include "fault.h"
+#include "ipoib.h"
+#include "netdev.h"
+
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -748,6 +752,39 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
return ret;
}
+static void process_rcv_packet_napi(struct hfi1_packet *packet)
+{
+ packet->etype = rhf_rcv_type(packet->rhf);
+
+ /* total length */
+ packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
+ /* retrieve eager buffer details */
+ packet->etail = rhf_egr_index(packet->rhf);
+ packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
+ &packet->updegr);
+ /*
+ * Prefetch the contents of the eager buffer. It is
+ * OK to send a negative length to prefetch_range().
+ * The +2 is the size of the RHF.
+ */
+ prefetch_range(packet->ebuf,
+ packet->tlen - ((packet->rcd->rcvhdrqentsize -
+ (rhf_hdrq_offset(packet->rhf)
+ + 2)) * 4));
+
+ packet->rcd->rhf_rcv_function_map[packet->etype](packet);
+ packet->numpkt++;
+
+ /* Set up for the next packet */
+ packet->rhqoff += packet->rsize;
+ if (packet->rhqoff >= packet->maxcnt)
+ packet->rhqoff = 0;
+
+ packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
+ packet->rcd->rhf_offset;
+ packet->rhf = rhf_to_cpu(packet->rhf_addr);
+}
+
static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
{
int ret;
@@ -827,6 +864,36 @@ static inline void finish_packet(struct hfi1_packet *packet)
}
/*
+ * handle_receive_interrupt_napi_fp - receive a packet
+ * @rcd: the context
+ * @budget: polling budget
+ *
+ * Called from interrupt handler for receive interrupt.
+ * This is the fast path interrupt handler
+ * when executing napi soft irq environment.
+ */
+int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget)
+{
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
+ goto bail;
+
+ while (packet.numpkt < budget) {
+ process_rcv_packet_napi(&packet);
+ if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
+ break;
+
+ process_rcv_update(0, &packet);
+ }
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
+bail:
+ finish_packet(&packet);
+ return packet.numpkt;
+}
+
+/*
* Handle receive interrupts when using the no dma rtail option.
*/
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
@@ -1074,6 +1141,63 @@ bail:
}
/*
+ * handle_receive_interrupt_napi_sp - receive a packet
+ * @rcd: the context
+ * @budget: polling budget
+ *
+ * Called from interrupt handler for errors or receive interrupt.
+ * This is the slow path interrupt handler
+ * when executing napi soft irq environment.
+ */
+int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ int last = RCV_PKT_OK;
+ bool needset = true;
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
+ goto bail;
+
+ while (last != RCV_PKT_DONE && packet.numpkt < budget) {
+ if (hfi1_need_drop(dd)) {
+ /* On to the next packet */
+ packet.rhqoff += packet.rsize;
+ packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
+ packet.rhqoff +
+ rcd->rhf_offset;
+ packet.rhf = rhf_to_cpu(packet.rhf_addr);
+
+ } else {
+ if (set_armed_to_active(&packet))
+ goto bail;
+ process_rcv_packet_napi(&packet);
+ }
+
+ if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
+ last = RCV_PKT_DONE;
+
+ if (needset) {
+ needset = false;
+ set_all_fastpath(dd, rcd);
+ }
+
+ process_rcv_update(last, &packet);
+ }
+
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
+
+bail:
+ /*
+ * Always write head at end, and setup rcv interrupt, even
+ * if no packets were processed.
+ */
+ finish_packet(&packet);
+ return packet.numpkt;
+}
+
+/*
* We may discover in the interrupt that the hardware link state has
* changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
* and we need to update the driver's notion of the link state. We cannot
@@ -1550,6 +1674,82 @@ void handle_eflags(struct hfi1_packet *packet)
show_eflags_errs(packet);
}
+static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ibport *ibp;
+ struct net_device *netdev;
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct napi_struct *napi = rcd->napi;
+ struct sk_buff *skb;
+ struct hfi1_netdev_rxq *rxq = container_of(napi,
+ struct hfi1_netdev_rxq, napi);
+ u32 extra_bytes;
+ u32 tlen, qpnum;
+ bool do_work, do_cnp;
+ struct hfi1_ipoib_dev_priv *priv;
+
+ trace_hfi1_rcvhdr(packet);
+
+ hfi1_setup_ib_header(packet);
+
+ packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth;
+ packet->grh = NULL;
+
+ if (unlikely(rhf_err_flags(packet->rhf))) {
+ handle_eflags(packet);
+ return;
+ }
+
+ qpnum = ib_bth_get_qpn(packet->ohdr);
+ netdev = hfi1_netdev_get_data(rcd->dd, qpnum);
+ if (!netdev)
+ goto drop_no_nd;
+
+ trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
+ trace_ctxt_rsm_hist(rcd->ctxt);
+
+ /* handle congestion notifications */
+ do_work = hfi1_may_ecn(packet);
+ if (unlikely(do_work)) {
+ do_cnp = (packet->opcode != IB_OPCODE_CNP);
+ (void)hfi1_process_ecn_slowpath(hfi1_ipoib_priv(netdev)->qp,
+ packet, do_cnp);
+ }
+
+ /*
+ * We have split point after last byte of DETH
+ * lets strip padding and CRC and ICRC.
+ * tlen is whole packet len so we need to
+ * subtract header size as well.
+ */
+ tlen = packet->tlen;
+ extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) +
+ packet->hlen;
+ if (unlikely(tlen < extra_bytes))
+ goto drop;
+
+ tlen -= extra_bytes;
+
+ skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf);
+ if (unlikely(!skb))
+ goto drop;
+
+ priv = hfi1_ipoib_priv(netdev);
+ hfi1_ipoib_update_rx_netstats(priv, 1, skb->len);
+
+ skb->dev = netdev;
+ skb->pkt_type = PACKET_HOST;
+ netif_receive_skb(skb);
+
+ return;
+
+drop:
+ ++netdev->stats.rx_dropped;
+drop_no_nd:
+ ibp = rcd_to_iport(packet->rcd);
+ ++ibp->rvp.n_pkt_drops;
+}
+
/*
* The following functions are called by the interrupt handler. They are type
* specific handlers for each packet type.
@@ -1572,28 +1772,10 @@ static void process_receive_ib(struct hfi1_packet *packet)
hfi1_ib_rcv(packet);
}
-static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
-{
- /* Packet received in VNIC context via RSM */
- if (packet->rcd->is_vnic)
- return true;
-
- if ((hfi1_16B_get_l2(packet->ebuf) == OPA_16B_L2_TYPE) &&
- (hfi1_16B_get_l4(packet->ebuf) == OPA_16B_L4_ETHR))
- return true;
-
- return false;
-}
-
static void process_receive_bypass(struct hfi1_packet *packet)
{
struct hfi1_devdata *dd = packet->rcd->dd;
- if (hfi1_is_vnic_packet(packet)) {
- hfi1_vnic_bypass_rcv(packet);
- return;
- }
-
if (hfi1_setup_bypass_packet(packet))
return;
@@ -1757,3 +1939,14 @@ const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = {
[RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
[RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
};
+
+const rhf_rcv_function_ptr netdev_rhf_rcv_functions[] = {
+ [RHF_RCV_TYPE_EXPECTED] = process_receive_invalid,
+ [RHF_RCV_TYPE_EAGER] = process_receive_invalid,
+ [RHF_RCV_TYPE_IB] = hfi1_ipoib_ib_rcv,
+ [RHF_RCV_TYPE_ERROR] = process_receive_error,
+ [RHF_RCV_TYPE_BYPASS] = hfi1_vnic_bypass_rcv,
+ [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
+};
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index e7fdd70c6e78..8ca51e43cf53 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -1264,7 +1264,7 @@ static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
memset(&binfo, 0, sizeof(binfo));
binfo.hw_version = dd->revision;
binfo.sw_version = HFI1_KERN_SWVERSION;
- binfo.bthqp = kdeth_qp;
+ binfo.bthqp = RVT_KDETH_QP_PREFIX;
binfo.jkey = uctxt->jkey;
/*
* If more than 64 contexts are enabled the allocated credit
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index b06c2594105a..b4c6bff60a4e 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_KERNEL_H
#define _HFI1_KERNEL_H
/*
- * Copyright(c) 2015-2018 Intel Corporation.
+ * Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -233,6 +233,8 @@ struct hfi1_ctxtdata {
intr_handler fast_handler;
/** slow handler */
intr_handler slow_handler;
+ /* napi pointer assiociated with netdev */
+ struct napi_struct *napi;
/* verbs rx_stats per rcd */
struct hfi1_opcode_stats_perctx *opstats;
/* clear interrupt mask */
@@ -383,11 +385,11 @@ struct hfi1_packet {
u32 rhqoff;
u32 dlid;
u32 slid;
+ int numpkt;
u16 tlen;
s16 etail;
u16 pkey;
u8 hlen;
- u8 numpkt;
u8 rsize;
u8 updegr;
u8 etype;
@@ -985,7 +987,7 @@ typedef void (*hfi1_make_req)(struct rvt_qp *qp,
struct hfi1_pkt_state *ps,
struct rvt_swqe *wqe);
extern const rhf_rcv_function_ptr normal_rhf_rcv_functions[];
-
+extern const rhf_rcv_function_ptr netdev_rhf_rcv_functions[];
/* return values for the RHF receive functions */
#define RHF_RCV_CONTINUE 0 /* keep going */
@@ -1045,23 +1047,10 @@ struct hfi1_asic_data {
#define NUM_MAP_ENTRIES 256
#define NUM_MAP_REGS 32
-/*
- * Number of VNIC contexts used. Ensure it is less than or equal to
- * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
- */
-#define HFI1_NUM_VNIC_CTXT 8
-
-/* Number of VNIC RSM entries */
-#define NUM_VNIC_MAP_ENTRIES 8
-
/* Virtual NIC information */
struct hfi1_vnic_data {
- struct hfi1_ctxtdata *ctxt[HFI1_NUM_VNIC_CTXT];
struct kmem_cache *txreq_cache;
- struct xarray vesws;
u8 num_vports;
- u8 rmt_start;
- u8 num_ctxt;
};
struct hfi1_vnic_vport_info;
@@ -1167,8 +1156,8 @@ struct hfi1_devdata {
u64 z_send_schedule;
u64 __percpu *send_schedule;
- /* number of reserved contexts for VNIC usage */
- u16 num_vnic_contexts;
+ /* number of reserved contexts for netdev usage */
+ u16 num_netdev_contexts;
/* number of receive contexts in use by the driver */
u32 num_rcv_contexts;
/* number of pio send contexts in use by the driver */
@@ -1417,12 +1406,12 @@ struct hfi1_devdata {
struct hfi1_vnic_data vnic;
/* Lock to protect IRQ SRC register access */
spinlock_t irq_src_lock;
-};
+ int vnic_num_vports;
+ struct net_device *dummy_netdev;
-static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare)
-{
- return (dd->vnic.rmt_start + spare) > NUM_MAP_ENTRIES;
-}
+ /* Keeps track of IPoIB RSM rule users */
+ atomic_t ipoib_rsm_usr_num;
+};
/* 8051 firmware version helper */
#define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c))
@@ -1500,6 +1489,8 @@ struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
+int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget);
+int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget);
void set_all_slowpath(struct hfi1_devdata *dd);
extern const struct pci_device_id hfi1_pci_tbl[];
@@ -2250,7 +2241,6 @@ extern int num_user_contexts;
extern unsigned long n_krcvqs;
extern uint krcvqs[];
extern int krcvqsset;
-extern uint kdeth_qp;
extern uint loopback;
extern uint quick_linkup;
extern uint rcv_intr_timeout;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 3759d9233a1c..5eed4360695f 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -69,6 +69,7 @@
#include "affinity.h"
#include "vnic.h"
#include "exp_rcv.h"
+#include "netdev.h"
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -374,6 +375,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
rcd->numa_id = numa;
rcd->rcv_array_groups = dd->rcv_entries.ngroups;
rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
+ rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
mutex_init(&rcd->exp_mutex);
spin_lock_init(&rcd->exp_lock);
@@ -1316,6 +1318,7 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
goto bail;
}
+ atomic_set(&dd->ipoib_rsm_usr_num, 0);
return dd;
bail:
@@ -1663,9 +1666,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* do the generic initialization */
initfail = hfi1_init(dd, 0);
- /* setup vnic */
- hfi1_vnic_setup(dd);
-
ret = hfi1_register_ib_device(dd);
/*
@@ -1704,7 +1704,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hfi1_device_remove(dd);
if (!ret)
hfi1_unregister_ib_device(dd);
- hfi1_vnic_cleanup(dd);
postinit_cleanup(dd);
if (initfail)
ret = initfail;
@@ -1749,8 +1748,8 @@ static void remove_one(struct pci_dev *pdev)
/* unregister from IB core */
hfi1_unregister_ib_device(dd);
- /* cleanup vnic */
- hfi1_vnic_cleanup(dd);
+ /* free netdev data */
+ hfi1_netdev_free(dd);
/*
* Disable the IB link, disable interrupts on the device,
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
new file mode 100644
index 000000000000..185c9b02c974
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for IPOIB functionality
+ */
+
+#ifndef HFI1_IPOIB_H
+#define HFI1_IPOIB_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/if_infiniband.h>
+
+#include "hfi.h"
+#include "iowait.h"
+#include "netdev.h"
+
+#include <rdma/ib_verbs.h>
+
+#define HFI1_IPOIB_ENTROPY_SHIFT 24
+
+#define HFI1_IPOIB_TXREQ_NAME_LEN 32
+
+#define HFI1_IPOIB_PSEUDO_LEN 20
+#define HFI1_IPOIB_ENCAP_LEN 4
+
+struct hfi1_ipoib_dev_priv;
+
+union hfi1_ipoib_flow {
+ u16 as_int;
+ struct {
+ u8 tx_queue;
+ u8 sc5;
+ } __attribute__((__packed__));
+};
+
+/**
+ * struct hfi1_ipoib_circ_buf - List of items to be processed
+ * @items: ring of items
+ * @head: ring head
+ * @tail: ring tail
+ * @max_items: max items + 1 that the ring can contain
+ * @producer_lock: producer sync lock
+ * @consumer_lock: consumer sync lock
+ */
+struct hfi1_ipoib_circ_buf {
+ void **items;
+ unsigned long head;
+ unsigned long tail;
+ unsigned long max_items;
+ spinlock_t producer_lock; /* head sync lock */
+ spinlock_t consumer_lock; /* tail sync lock */
+};
+
+/**
+ * struct hfi1_ipoib_txq - IPOIB per Tx queue information
+ * @priv: private pointer
+ * @sde: sdma engine
+ * @tx_list: tx request list
+ * @sent_txreqs: count of txreqs posted to sdma
+ * @flow: tracks when list needs to be flushed for a flow change
+ * @q_idx: ipoib Tx queue index
+ * @pkts_sent: indicator packets have been sent from this queue
+ * @wait: iowait structure
+ * @complete_txreqs: count of txreqs completed by sdma
+ * @napi: pointer to tx napi interface
+ * @tx_ring: ring of ipoib txreqs to be reaped by napi callback
+ */
+struct hfi1_ipoib_txq {
+ struct hfi1_ipoib_dev_priv *priv;
+ struct sdma_engine *sde;
+ struct list_head tx_list;
+ u64 sent_txreqs;
+ union hfi1_ipoib_flow flow;
+ u8 q_idx;
+ bool pkts_sent;
+ struct iowait wait;
+
+ atomic64_t ____cacheline_aligned_in_smp complete_txreqs;
+ struct napi_struct *napi;
+ struct hfi1_ipoib_circ_buf tx_ring;
+};
+
+struct hfi1_ipoib_dev_priv {
+ struct hfi1_devdata *dd;
+ struct net_device *netdev;
+ struct ib_device *device;
+ struct hfi1_ipoib_txq *txqs;
+ struct kmem_cache *txreq_cache;
+ struct napi_struct *tx_napis;
+ u16 pkey;
+ u16 pkey_index;
+ u32 qkey;
+ u8 port_num;
+
+ const struct net_device_ops *netdev_ops;
+ struct rvt_qp *qp;
+ struct pcpu_sw_netstats __percpu *netstats;
+};
+
+/* hfi1 ipoib rdma netdev's private data structure */
+struct hfi1_ipoib_rdma_netdev {
+ struct rdma_netdev rn; /* keep this first */
+ /* followed by device private data */
+ struct hfi1_ipoib_dev_priv dev_priv;
+};
+
+static inline struct hfi1_ipoib_dev_priv *
+hfi1_ipoib_priv(const struct net_device *dev)
+{
+ return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
+}
+
+static inline void
+hfi1_ipoib_update_rx_netstats(struct hfi1_ipoib_dev_priv *priv,
+ u64 packets,
+ u64 bytes)
+{
+ struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
+
+ u64_stats_update_begin(&netstats->syncp);
+ netstats->rx_packets += packets;
+ netstats->rx_bytes += bytes;
+ u64_stats_update_end(&netstats->syncp);
+}
+
+static inline void
+hfi1_ipoib_update_tx_netstats(struct hfi1_ipoib_dev_priv *priv,
+ u64 packets,
+ u64 bytes)
+{
+ struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
+
+ u64_stats_update_begin(&netstats->syncp);
+ netstats->tx_packets += packets;
+ netstats->tx_bytes += bytes;
+ u64_stats_update_end(&netstats->syncp);
+}
+
+int hfi1_ipoib_send_dma(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn);
+
+int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv);
+void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv);
+
+int hfi1_ipoib_rxq_init(struct net_device *dev);
+void hfi1_ipoib_rxq_deinit(struct net_device *dev);
+
+void hfi1_ipoib_napi_tx_enable(struct net_device *dev);
+void hfi1_ipoib_napi_tx_disable(struct net_device *dev);
+
+struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
+ int size, void *data);
+
+int hfi1_ipoib_rn_get_params(struct ib_device *device,
+ u8 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params);
+
+#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
new file mode 100644
index 000000000000..014351ebbefa
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for ipoib functionality
+ */
+
+#include "ipoib.h"
+#include "hfi.h"
+
+static u32 qpn_from_mac(u8 *mac_arr)
+{
+ return (u32)mac_arr[1] << 16 | mac_arr[2] << 8 | mac_arr[3];
+}
+
+static int hfi1_ipoib_dev_init(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int ret;
+
+ priv->netstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+
+ ret = priv->netdev_ops->ndo_init(dev);
+ if (ret)
+ return ret;
+
+ ret = hfi1_netdev_add_data(priv->dd,
+ qpn_from_mac(priv->netdev->dev_addr),
+ dev);
+ if (ret < 0) {
+ priv->netdev_ops->ndo_uninit(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hfi1_ipoib_dev_uninit(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
+
+ priv->netdev_ops->ndo_uninit(dev);
+}
+
+static int hfi1_ipoib_dev_open(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int ret;
+
+ ret = priv->netdev_ops->ndo_open(dev);
+ if (!ret) {
+ struct hfi1_ibport *ibp = to_iport(priv->device,
+ priv->port_num);
+ struct rvt_qp *qp;
+ u32 qpn = qpn_from_mac(priv->netdev->dev_addr);
+
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
+ if (!qp) {
+ rcu_read_unlock();
+ priv->netdev_ops->ndo_stop(dev);
+ return -EINVAL;
+ }
+ rvt_get_qp(qp);
+ priv->qp = qp;
+ rcu_read_unlock();
+
+ hfi1_netdev_enable_queues(priv->dd);
+ hfi1_ipoib_napi_tx_enable(dev);
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_dev_stop(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ if (!priv->qp)
+ return 0;
+
+ hfi1_ipoib_napi_tx_disable(dev);
+ hfi1_netdev_disable_queues(priv->dd);
+
+ rvt_put_qp(priv->qp);
+ priv->qp = NULL;
+
+ return priv->netdev_ops->ndo_stop(dev);
+}
+
+static void hfi1_ipoib_dev_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ u64 rx_packets = 0ull;
+ u64 rx_bytes = 0ull;
+ u64 tx_packets = 0ull;
+ u64 tx_bytes = 0ull;
+ int i;
+
+ netdev_stats_to_stats64(storage, &dev->stats);
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_sw_netstats *stats;
+ unsigned int start;
+ u64 trx_packets;
+ u64 trx_bytes;
+ u64 ttx_packets;
+ u64 ttx_bytes;
+
+ stats = per_cpu_ptr(priv->netstats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ trx_packets = stats->rx_packets;
+ trx_bytes = stats->rx_bytes;
+ ttx_packets = stats->tx_packets;
+ ttx_bytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ rx_packets += trx_packets;
+ rx_bytes += trx_bytes;
+ tx_packets += ttx_packets;
+ tx_bytes += ttx_bytes;
+ }
+
+ storage->rx_packets += rx_packets;
+ storage->rx_bytes += rx_bytes;
+ storage->tx_packets += tx_packets;
+ storage->tx_bytes += tx_bytes;
+}
+
+static const struct net_device_ops hfi1_ipoib_netdev_ops = {
+ .ndo_init = hfi1_ipoib_dev_init,
+ .ndo_uninit = hfi1_ipoib_dev_uninit,
+ .ndo_open = hfi1_ipoib_dev_open,
+ .ndo_stop = hfi1_ipoib_dev_stop,
+ .ndo_get_stats64 = hfi1_ipoib_dev_get_stats64,
+};
+
+static int hfi1_ipoib_send(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn)
+{
+ return hfi1_ipoib_send_dma(dev, skb, address, dqpn);
+}
+
+static int hfi1_ipoib_mcast_attach(struct net_device *dev,
+ struct ib_device *device,
+ union ib_gid *mgid,
+ u16 mlid,
+ int set_qkey,
+ u32 qkey)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr);
+ struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num);
+ struct rvt_qp *qp;
+ int ret = -EINVAL;
+
+ rcu_read_lock();
+
+ qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
+ if (qp) {
+ rvt_get_qp(qp);
+ rcu_read_unlock();
+ if (set_qkey)
+ priv->qkey = qkey;
+
+ /* attach QP to multicast group */
+ ret = ib_attach_mcast(&qp->ibqp, mgid, mlid);
+ rvt_put_qp(qp);
+ } else {
+ rcu_read_unlock();
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_mcast_detach(struct net_device *dev,
+ struct ib_device *device,
+ union ib_gid *mgid,
+ u16 mlid)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr);
+ struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num);
+ struct rvt_qp *qp;
+ int ret = -EINVAL;
+
+ rcu_read_lock();
+
+ qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
+ if (qp) {
+ rvt_get_qp(qp);
+ rcu_read_unlock();
+ ret = ib_detach_mcast(&qp->ibqp, mgid, mlid);
+ rvt_put_qp(qp);
+ } else {
+ rcu_read_unlock();
+ }
+ return ret;
+}
+
+static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ hfi1_ipoib_txreq_deinit(priv);
+ hfi1_ipoib_rxq_deinit(priv->netdev);
+
+ free_percpu(priv->netstats);
+}
+
+static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
+{
+ hfi1_ipoib_netdev_dtor(dev);
+ free_netdev(dev);
+}
+
+static void hfi1_ipoib_set_id(struct net_device *dev, int id)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ priv->pkey_index = (u16)id;
+ ib_query_pkey(priv->device,
+ priv->port_num,
+ priv->pkey_index,
+ &priv->pkey);
+}
+
+static int hfi1_ipoib_setup_rn(struct ib_device *device,
+ u8 port_num,
+ struct net_device *netdev,
+ void *param)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(device);
+ struct rdma_netdev *rn = netdev_priv(netdev);
+ struct hfi1_ipoib_dev_priv *priv;
+ int rc;
+
+ rn->send = hfi1_ipoib_send;
+ rn->attach_mcast = hfi1_ipoib_mcast_attach;
+ rn->detach_mcast = hfi1_ipoib_mcast_detach;
+ rn->set_id = hfi1_ipoib_set_id;
+ rn->hca = device;
+ rn->port_num = port_num;
+ rn->mtu = netdev->mtu;
+
+ priv = hfi1_ipoib_priv(netdev);
+ priv->dd = dd;
+ priv->netdev = netdev;
+ priv->device = device;
+ priv->port_num = port_num;
+ priv->netdev_ops = netdev->netdev_ops;
+
+ netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
+
+ ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey);
+
+ rc = hfi1_ipoib_txreq_init(priv);
+ if (rc) {
+ dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc);
+ hfi1_ipoib_free_rdma_netdev(netdev);
+ return rc;
+ }
+
+ rc = hfi1_ipoib_rxq_init(netdev);
+ if (rc) {
+ dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc);
+ hfi1_ipoib_free_rdma_netdev(netdev);
+ return rc;
+ }
+
+ netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
+ netdev->needs_free_netdev = true;
+
+ return 0;
+}
+
+int hfi1_ipoib_rn_get_params(struct ib_device *device,
+ u8 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(device);
+
+ if (type != RDMA_NETDEV_IPOIB)
+ return -EOPNOTSUPP;
+
+ if (!HFI1_CAP_IS_KSET(AIP) || !dd->num_netdev_contexts)
+ return -EOPNOTSUPP;
+
+ if (!port_num || port_num > dd->num_pports)
+ return -EINVAL;
+
+ params->sizeof_priv = sizeof(struct hfi1_ipoib_rdma_netdev);
+ params->txqs = dd->num_sdma;
+ params->rxqs = dd->num_netdev_contexts;
+ params->param = NULL;
+ params->initialize_rdma_netdev = hfi1_ipoib_setup_rn;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hfi1/ipoib_rx.c b/drivers/infiniband/hw/hfi1/ipoib_rx.c
new file mode 100644
index 000000000000..3afa7545242c
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib_rx.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+#include "netdev.h"
+#include "ipoib.h"
+
+#define HFI1_IPOIB_SKB_PAD ((NET_SKB_PAD) + (NET_IP_ALIGN))
+
+static void copy_ipoib_buf(struct sk_buff *skb, void *data, int size)
+{
+ void *dst_data;
+
+ skb_checksum_none_assert(skb);
+ skb->protocol = *((__be16 *)data);
+
+ dst_data = skb_put(skb, size);
+ memcpy(dst_data, data, size);
+ skb->mac_header = HFI1_IPOIB_PSEUDO_LEN;
+ skb_pull(skb, HFI1_IPOIB_ENCAP_LEN);
+}
+
+static struct sk_buff *prepare_frag_skb(struct napi_struct *napi, int size)
+{
+ struct sk_buff *skb;
+ int skb_size = SKB_DATA_ALIGN(size + HFI1_IPOIB_SKB_PAD);
+ void *frag;
+
+ skb_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ skb_size = SKB_DATA_ALIGN(skb_size);
+ frag = napi_alloc_frag(skb_size);
+
+ if (unlikely(!frag))
+ return napi_alloc_skb(napi, size);
+
+ skb = build_skb(frag, skb_size);
+
+ if (unlikely(!skb)) {
+ skb_free_frag(frag);
+ return NULL;
+ }
+
+ skb_reserve(skb, HFI1_IPOIB_SKB_PAD);
+ return skb;
+}
+
+struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
+ int size, void *data)
+{
+ struct napi_struct *napi = &rxq->napi;
+ int skb_size = size + HFI1_IPOIB_ENCAP_LEN;
+ struct sk_buff *skb;
+
+ /*
+ * For smaller(4k + skb overhead) allocations we will go using
+ * napi cache. Otherwise we will try to use napi frag cache.
+ */
+ if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE))
+ skb = napi_alloc_skb(napi, skb_size);
+ else
+ skb = prepare_frag_skb(napi, skb_size);
+
+ if (unlikely(!skb))
+ return NULL;
+
+ copy_ipoib_buf(skb, data, size);
+
+ return skb;
+}
+
+int hfi1_ipoib_rxq_init(struct net_device *netdev)
+{
+ struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev);
+ struct hfi1_devdata *dd = ipoib_priv->dd;
+ int ret;
+
+ ret = hfi1_netdev_rx_init(dd);
+ if (ret)
+ return ret;
+
+ hfi1_init_aip_rsm(dd);
+
+ return ret;
+}
+
+void hfi1_ipoib_rxq_deinit(struct net_device *netdev)
+{
+ struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev);
+ struct hfi1_devdata *dd = ipoib_priv->dd;
+
+ hfi1_deinit_aip_rsm(dd);
+ hfi1_netdev_rx_destroy(dd);
+}
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
new file mode 100644
index 000000000000..883cb9d48022
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for IPOIB SDMA functionality
+ */
+
+#include <linux/log2.h>
+#include <linux/circ_buf.h>
+
+#include "sdma.h"
+#include "verbs.h"
+#include "trace_ibhdrs.h"
+#include "ipoib.h"
+
+/* Add a convenience helper */
+#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
+#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
+#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
+
+/**
+ * struct ipoib_txreq - IPOIB transmit descriptor
+ * @txreq: sdma transmit request
+ * @sdma_hdr: 9b ib headers
+ * @sdma_status: status returned by sdma engine
+ * @priv: ipoib netdev private data
+ * @txq: txq on which skb was output
+ * @skb: skb to send
+ */
+struct ipoib_txreq {
+ struct sdma_txreq txreq;
+ struct hfi1_sdma_header sdma_hdr;
+ int sdma_status;
+ struct hfi1_ipoib_dev_priv *priv;
+ struct hfi1_ipoib_txq *txq;
+ struct sk_buff *skb;
+};
+
+struct ipoib_txparms {
+ struct hfi1_devdata *dd;
+ struct rdma_ah_attr *ah_attr;
+ struct hfi1_ibport *ibp;
+ struct hfi1_ipoib_txq *txq;
+ union hfi1_ipoib_flow flow;
+ u32 dqpn;
+ u8 hdr_dwords;
+ u8 entropy;
+};
+
+static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
+{
+ return sent - completed;
+}
+
+static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+{
+ if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs,
+ atomic64_read(&txq->complete_txreqs)) >=
+ min_t(unsigned int, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items - 1)))
+ netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
+}
+
+static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
+{
+ struct net_device *dev = txq->priv->netdev;
+
+ /* If the queue is already running just return */
+ if (likely(!__netif_subqueue_stopped(dev, txq->q_idx)))
+ return;
+
+ /* If shutting down just return as queue state is irrelevant */
+ if (unlikely(dev->reg_state != NETREG_REGISTERED))
+ return;
+
+ /*
+ * When the queue has been drained to less than half full it will be
+ * restarted.
+ * The size of the txreq ring is fixed at initialization.
+ * The tx queue len can be adjusted upward while the interface is
+ * running.
+ * The tx queue len can be large enough to overflow the txreq_ring.
+ * Use the minimum of the current tx_queue_len or the rings max txreqs
+ * to protect against ring overflow.
+ */
+ if (hfi1_ipoib_txreqs(txq->sent_txreqs,
+ atomic64_read(&txq->complete_txreqs))
+ < min_t(unsigned int, dev->tx_queue_len,
+ txq->tx_ring.max_items) >> 1)
+ netif_wake_subqueue(dev, txq->q_idx);
+}
+
+static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
+{
+ struct hfi1_ipoib_dev_priv *priv = tx->priv;
+
+ if (likely(!tx->sdma_status)) {
+ hfi1_ipoib_update_tx_netstats(priv, 1, tx->skb->len);
+ } else {
+ ++priv->netdev->stats.tx_errors;
+ dd_dev_warn(priv->dd,
+ "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
+ __func__, tx->sdma_status,
+ le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
+ tx->txq->sde->this_idx);
+ }
+
+ napi_consume_skb(tx->skb, budget);
+ sdma_txclean(priv->dd, &tx->txreq);
+ kmem_cache_free(priv->txreq_cache, tx);
+}
+
+static int hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq, int budget)
+{
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
+ unsigned long head;
+ unsigned long tail;
+ unsigned int max_tx;
+ int work_done;
+ int tx_count;
+
+ spin_lock_bh(&tx_ring->consumer_lock);
+
+ /* Read index before reading contents at that index. */
+ head = smp_load_acquire(&tx_ring->head);
+ tail = tx_ring->tail;
+ max_tx = tx_ring->max_items;
+
+ work_done = min_t(int, CIRC_CNT(head, tail, max_tx), budget);
+
+ for (tx_count = work_done; tx_count; tx_count--) {
+ hfi1_ipoib_free_tx(tx_ring->items[tail], budget);
+ tail = CIRC_NEXT(tail, max_tx);
+ }
+
+ atomic64_add(work_done, &txq->complete_txreqs);
+
+ /* Finished freeing tx items so store the tail value. */
+ smp_store_release(&tx_ring->tail, tail);
+
+ spin_unlock_bh(&tx_ring->consumer_lock);
+
+ hfi1_ipoib_check_queue_stopped(txq);
+
+ return work_done;
+}
+
+static int hfi1_ipoib_process_tx_ring(struct napi_struct *napi, int budget)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(napi->dev);
+ struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis];
+
+ int work_done = hfi1_ipoib_drain_tx_ring(txq, budget);
+
+ if (work_done < budget)
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx)
+{
+ struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring;
+ unsigned long head;
+ unsigned long tail;
+ size_t max_tx;
+
+ spin_lock(&tx_ring->producer_lock);
+
+ head = tx_ring->head;
+ tail = READ_ONCE(tx_ring->tail);
+ max_tx = tx_ring->max_items;
+
+ if (likely(CIRC_SPACE(head, tail, max_tx))) {
+ tx_ring->items[head] = tx;
+
+ /* Finish storing txreq before incrementing head. */
+ smp_store_release(&tx_ring->head, CIRC_ADD(head, 1, max_tx));
+ napi_schedule(tx->txq->napi);
+ } else {
+ struct hfi1_ipoib_txq *txq = tx->txq;
+ struct hfi1_ipoib_dev_priv *priv = tx->priv;
+
+ /* Ring was full */
+ hfi1_ipoib_free_tx(tx, 0);
+ atomic64_inc(&txq->complete_txreqs);
+ dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx);
+ }
+
+ spin_unlock(&tx_ring->producer_lock);
+}
+
+static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status)
+{
+ struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq);
+
+ tx->sdma_status = status;
+
+ hfi1_ipoib_add_tx(tx);
+}
+
+static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_devdata *dd = txp->dd;
+ struct sdma_txreq *txreq = &tx->txreq;
+ struct sk_buff *skb = tx->skb;
+ int ret = 0;
+ int i;
+
+ if (skb_headlen(skb)) {
+ ret = sdma_txadd_kvaddr(dd, txreq, skb->data, skb_headlen(skb));
+ if (unlikely(ret))
+ return ret;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ ret = sdma_txadd_page(dd,
+ txreq,
+ skb_frag_page(frag),
+ frag->bv_offset,
+ skb_frag_size(frag));
+ if (unlikely(ret))
+ break;
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_devdata *dd = txp->dd;
+ struct sdma_txreq *txreq = &tx->txreq;
+ struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ u16 pkt_bytes =
+ sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
+ int ret;
+
+ ret = sdma_txinit(txreq, 0, pkt_bytes, hfi1_ipoib_sdma_complete);
+ if (unlikely(ret))
+ return ret;
+
+ /* add pbc + headers */
+ ret = sdma_txadd_kvaddr(dd,
+ txreq,
+ sdma_hdr,
+ sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2));
+ if (unlikely(ret))
+ return ret;
+
+ /* add the ulp payload */
+ return hfi1_ipoib_build_ulp_payload(tx, txp);
+}
+
+static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_dev_priv *priv = tx->priv;
+ struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ struct sk_buff *skb = tx->skb;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
+ struct rdma_ah_attr *ah_attr = txp->ah_attr;
+ struct ib_other_headers *ohdr;
+ struct ib_grh *grh;
+ u16 dwords;
+ u16 slid;
+ u16 dlid;
+ u16 lrh0;
+ u32 bth0;
+ u32 sqpn = (u32)(priv->netdev->dev_addr[1] << 16 |
+ priv->netdev->dev_addr[2] << 8 |
+ priv->netdev->dev_addr[3]);
+ u16 payload_dwords;
+ u8 pad_cnt;
+
+ pad_cnt = -skb->len & 3;
+
+ /* Includes ICRC */
+ payload_dwords = ((skb->len + pad_cnt) >> 2) + SIZE_OF_CRC;
+
+ /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
+ txp->hdr_dwords = 7;
+
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ grh = &sdma_hdr->hdr.ibh.u.l.grh;
+ txp->hdr_dwords +=
+ hfi1_make_grh(txp->ibp,
+ grh,
+ rdma_ah_read_grh(ah_attr),
+ txp->hdr_dwords - LRH_9B_DWORDS,
+ payload_dwords);
+ lrh0 = HFI1_LRH_GRH;
+ ohdr = &sdma_hdr->hdr.ibh.u.l.oth;
+ } else {
+ lrh0 = HFI1_LRH_BTH;
+ ohdr = &sdma_hdr->hdr.ibh.u.oth;
+ }
+
+ lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
+ lrh0 |= (txp->flow.sc5 & 0xf) << 12;
+
+ dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
+ if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ } else {
+ u16 lid = (u16)ppd->lid;
+
+ if (lid) {
+ lid |= rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1);
+ slid = lid;
+ } else {
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ }
+ }
+
+ /* Includes ICRC */
+ dwords = txp->hdr_dwords + payload_dwords;
+
+ /* Build the lrh */
+ sdma_hdr->hdr.hdr_type = HFI1_PKT_TYPE_9B;
+ hfi1_make_ib_hdr(&sdma_hdr->hdr.ibh, lrh0, dwords, dlid, slid);
+
+ /* Build the bth */
+ bth0 = (IB_OPCODE_UD_SEND_ONLY << 24) | (pad_cnt << 20) | priv->pkey;
+
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(txp->dqpn);
+ ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs));
+
+ /* Build the deth */
+ ohdr->u.ud.deth[0] = cpu_to_be32(priv->qkey);
+ ohdr->u.ud.deth[1] = cpu_to_be32((txp->entropy <<
+ HFI1_IPOIB_ENTROPY_SHIFT) | sqpn);
+
+ /* Construct the pbc. */
+ sdma_hdr->pbc =
+ cpu_to_le64(create_pbc(ppd,
+ ib_is_sc5(txp->flow.sc5) <<
+ PBC_DC_INFO_SHIFT,
+ 0,
+ sc_to_vlt(priv->dd, txp->flow.sc5),
+ dwords - SIZE_OF_CRC +
+ (sizeof(sdma_hdr->pbc) >> 2)));
+}
+
+static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct ipoib_txreq *tx;
+ int ret;
+
+ tx = kmem_cache_alloc_node(priv->txreq_cache,
+ GFP_ATOMIC,
+ priv->dd->node);
+ if (unlikely(!tx))
+ return ERR_PTR(-ENOMEM);
+
+ /* so that we can test if the sdma decriptors are there */
+ tx->txreq.num_desc = 0;
+ tx->priv = priv;
+ tx->txq = txp->txq;
+ tx->skb = skb;
+
+ hfi1_ipoib_build_ib_tx_headers(tx, txp);
+
+ ret = hfi1_ipoib_build_tx_desc(tx, txp);
+ if (likely(!ret)) {
+ if (txp->txq->flow.as_int != txp->flow.as_int) {
+ txp->txq->flow.tx_queue = txp->flow.tx_queue;
+ txp->txq->flow.sc5 = txp->flow.sc5;
+ txp->txq->sde =
+ sdma_select_engine_sc(priv->dd,
+ txp->flow.tx_queue,
+ txp->flow.sc5);
+ }
+
+ return tx;
+ }
+
+ sdma_txclean(priv->dd, &tx->txreq);
+ kmem_cache_free(priv->txreq_cache, tx);
+
+ return ERR_PTR(ret);
+}
+
+static int hfi1_ipoib_submit_tx_list(struct net_device *dev,
+ struct hfi1_ipoib_txq *txq)
+{
+ int ret;
+ u16 count_out;
+
+ ret = sdma_send_txlist(txq->sde,
+ iowait_get_ib_work(&txq->wait),
+ &txq->tx_list,
+ &count_out);
+ if (likely(!ret) || ret == -EBUSY || ret == -ECOMM)
+ return ret;
+
+ dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret);
+
+ return ret;
+}
+
+static int hfi1_ipoib_flush_tx_list(struct net_device *dev,
+ struct hfi1_ipoib_txq *txq)
+{
+ int ret = 0;
+
+ if (!list_empty(&txq->tx_list)) {
+ /* Flush the current list */
+ ret = hfi1_ipoib_submit_tx_list(dev, txq);
+
+ if (unlikely(ret))
+ if (ret != -EBUSY)
+ ++dev->stats.tx_carrier_errors;
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq,
+ struct ipoib_txreq *tx)
+{
+ int ret;
+
+ ret = sdma_send_txreq(txq->sde,
+ iowait_get_ib_work(&txq->wait),
+ &tx->txreq,
+ txq->pkts_sent);
+ if (likely(!ret)) {
+ txq->pkts_sent = true;
+ iowait_starve_clear(txq->pkts_sent, &txq->wait);
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_send_dma_single(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct hfi1_ipoib_txq *txq = txp->txq;
+ struct ipoib_txreq *tx;
+ int ret;
+
+ tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
+ if (IS_ERR(tx)) {
+ int ret = PTR_ERR(tx);
+
+ dev_kfree_skb_any(skb);
+
+ if (ret == -ENOMEM)
+ ++dev->stats.tx_errors;
+ else
+ ++dev->stats.tx_carrier_errors;
+
+ return NETDEV_TX_OK;
+ }
+
+ ret = hfi1_ipoib_submit_tx(txq, tx);
+ if (likely(!ret)) {
+ trace_sdma_output_ibhdr(tx->priv->dd,
+ &tx->sdma_hdr.hdr,
+ ib_is_sc5(txp->flow.sc5));
+ hfi1_ipoib_check_queue_depth(txq);
+ return NETDEV_TX_OK;
+ }
+
+ txq->pkts_sent = false;
+
+ if (ret == -EBUSY) {
+ list_add_tail(&tx->txreq.list, &txq->tx_list);
+
+ trace_sdma_output_ibhdr(tx->priv->dd,
+ &tx->sdma_hdr.hdr,
+ ib_is_sc5(txp->flow.sc5));
+ hfi1_ipoib_check_queue_depth(txq);
+ return NETDEV_TX_OK;
+ }
+
+ if (ret == -ECOMM) {
+ hfi1_ipoib_check_queue_depth(txq);
+ return NETDEV_TX_OK;
+ }
+
+ sdma_txclean(priv->dd, &tx->txreq);
+ dev_kfree_skb_any(skb);
+ kmem_cache_free(priv->txreq_cache, tx);
+ ++dev->stats.tx_carrier_errors;
+
+ return NETDEV_TX_OK;
+}
+
+static int hfi1_ipoib_send_dma_list(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_txq *txq = txp->txq;
+ struct ipoib_txreq *tx;
+
+ /* Has the flow change ? */
+ if (txq->flow.as_int != txp->flow.as_int)
+ (void)hfi1_ipoib_flush_tx_list(dev, txq);
+
+ tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
+ if (IS_ERR(tx)) {
+ int ret = PTR_ERR(tx);
+
+ dev_kfree_skb_any(skb);
+
+ if (ret == -ENOMEM)
+ ++dev->stats.tx_errors;
+ else
+ ++dev->stats.tx_carrier_errors;
+
+ return NETDEV_TX_OK;
+ }
+
+ list_add_tail(&tx->txreq.list, &txq->tx_list);
+
+ hfi1_ipoib_check_queue_depth(txq);
+
+ trace_sdma_output_ibhdr(tx->priv->dd,
+ &tx->sdma_hdr.hdr,
+ ib_is_sc5(txp->flow.sc5));
+
+ if (!netdev_xmit_more())
+ (void)hfi1_ipoib_flush_tx_list(dev, txq);
+
+ return NETDEV_TX_OK;
+}
+
+static u8 hfi1_ipoib_calc_entropy(struct sk_buff *skb)
+{
+ if (skb_transport_header_was_set(skb)) {
+ u8 *hdr = (u8 *)skb_transport_header(skb);
+
+ return (hdr[0] ^ hdr[1] ^ hdr[2] ^ hdr[3]);
+ }
+
+ return (u8)skb_get_queue_mapping(skb);
+}
+
+int hfi1_ipoib_send_dma(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct ipoib_txparms txp;
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ if (unlikely(skb->len > rn->mtu + HFI1_IPOIB_ENCAP_LEN)) {
+ dd_dev_warn(priv->dd, "packet len %d (> %d) too long to send, dropping\n",
+ skb->len,
+ rn->mtu + HFI1_IPOIB_ENCAP_LEN);
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ txp.dd = priv->dd;
+ txp.ah_attr = &ibah_to_rvtah(address)->attr;
+ txp.ibp = to_iport(priv->device, priv->port_num);
+ txp.txq = &priv->txqs[skb_get_queue_mapping(skb)];
+ txp.dqpn = dqpn;
+ txp.flow.sc5 = txp.ibp->sl_to_sc[rdma_ah_get_sl(txp.ah_attr)];
+ txp.flow.tx_queue = (u8)skb_get_queue_mapping(skb);
+ txp.entropy = hfi1_ipoib_calc_entropy(skb);
+
+ if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list))
+ return hfi1_ipoib_send_dma_list(dev, skb, &txp);
+
+ return hfi1_ipoib_send_dma_single(dev, skb, &txp);
+}
+
+/*
+ * hfi1_ipoib_sdma_sleep - ipoib sdma sleep function
+ *
+ * This function gets called from sdma_send_txreq() when there are not enough
+ * sdma descriptors available to send the packet. It adds Tx queue's wait
+ * structure to sdma engine's dmawait list to be woken up when descriptors
+ * become available.
+ */
+static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *txreq,
+ uint seq,
+ bool pkts_sent)
+{
+ struct hfi1_ipoib_txq *txq =
+ container_of(wait->iow, struct hfi1_ipoib_txq, wait);
+
+ write_seqlock(&sde->waitlock);
+
+ if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) {
+ if (sdma_progress(sde, seq, txreq)) {
+ write_sequnlock(&sde->waitlock);
+ return -EAGAIN;
+ }
+
+ netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
+
+ if (list_empty(&txq->wait.list))
+ iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+
+ write_sequnlock(&sde->waitlock);
+ return -EBUSY;
+ }
+
+ write_sequnlock(&sde->waitlock);
+ return -EINVAL;
+}
+
+/*
+ * hfi1_ipoib_sdma_wakeup - ipoib sdma wakeup function
+ *
+ * This function gets called when SDMA descriptors becomes available and Tx
+ * queue's wait structure was previously added to sdma engine's dmawait list.
+ */
+static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
+{
+ struct hfi1_ipoib_txq *txq =
+ container_of(wait, struct hfi1_ipoib_txq, wait);
+
+ if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
+ iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
+}
+
+static void hfi1_ipoib_flush_txq(struct work_struct *work)
+{
+ struct iowait_work *ioww =
+ container_of(work, struct iowait_work, iowork);
+ struct iowait *wait = iowait_ioww_to_iow(ioww);
+ struct hfi1_ipoib_txq *txq =
+ container_of(wait, struct hfi1_ipoib_txq, wait);
+ struct net_device *dev = txq->priv->netdev;
+
+ if (likely(dev->reg_state == NETREG_REGISTERED) &&
+ likely(__netif_subqueue_stopped(dev, txq->q_idx)) &&
+ likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
+ netif_wake_subqueue(dev, txq->q_idx);
+}
+
+int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
+{
+ struct net_device *dev = priv->netdev;
+ char buf[HFI1_IPOIB_TXREQ_NAME_LEN];
+ unsigned long tx_ring_size;
+ int i;
+
+ /*
+ * Ring holds 1 less than tx_ring_size
+ * Round up to next power of 2 in order to hold at least tx_queue_len
+ */
+ tx_ring_size = roundup_pow_of_two((unsigned long)dev->tx_queue_len + 1);
+
+ snprintf(buf, sizeof(buf), "hfi1_%u_ipoib_txreq_cache", priv->dd->unit);
+ priv->txreq_cache = kmem_cache_create(buf,
+ sizeof(struct ipoib_txreq),
+ 0,
+ 0,
+ NULL);
+ if (!priv->txreq_cache)
+ return -ENOMEM;
+
+ priv->tx_napis = kcalloc_node(dev->num_tx_queues,
+ sizeof(struct napi_struct),
+ GFP_ATOMIC,
+ priv->dd->node);
+ if (!priv->tx_napis)
+ goto free_txreq_cache;
+
+ priv->txqs = kcalloc_node(dev->num_tx_queues,
+ sizeof(struct hfi1_ipoib_txq),
+ GFP_ATOMIC,
+ priv->dd->node);
+ if (!priv->txqs)
+ goto free_tx_napis;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ iowait_init(&txq->wait,
+ 0,
+ hfi1_ipoib_flush_txq,
+ NULL,
+ hfi1_ipoib_sdma_sleep,
+ hfi1_ipoib_sdma_wakeup,
+ NULL,
+ NULL);
+ txq->priv = priv;
+ txq->sde = NULL;
+ INIT_LIST_HEAD(&txq->tx_list);
+ atomic64_set(&txq->complete_txreqs, 0);
+ txq->q_idx = i;
+ txq->flow.tx_queue = 0xff;
+ txq->flow.sc5 = 0xff;
+ txq->pkts_sent = false;
+
+ netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
+ priv->dd->node);
+
+ txq->tx_ring.items =
+ vzalloc_node(array_size(tx_ring_size,
+ sizeof(struct ipoib_txreq)),
+ priv->dd->node);
+ if (!txq->tx_ring.items)
+ goto free_txqs;
+
+ spin_lock_init(&txq->tx_ring.producer_lock);
+ spin_lock_init(&txq->tx_ring.consumer_lock);
+ txq->tx_ring.max_items = tx_ring_size;
+
+ txq->napi = &priv->tx_napis[i];
+ netif_tx_napi_add(dev, txq->napi,
+ hfi1_ipoib_process_tx_ring,
+ NAPI_POLL_WEIGHT);
+ }
+
+ return 0;
+
+free_txqs:
+ for (i--; i >= 0; i--) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ netif_napi_del(txq->napi);
+ vfree(txq->tx_ring.items);
+ }
+
+ kfree(priv->txqs);
+ priv->txqs = NULL;
+
+free_tx_napis:
+ kfree(priv->tx_napis);
+ priv->tx_napis = NULL;
+
+free_txreq_cache:
+ kmem_cache_destroy(priv->txreq_cache);
+ priv->txreq_cache = NULL;
+ return -ENOMEM;
+}
+
+static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
+{
+ struct sdma_txreq *txreq;
+ struct sdma_txreq *txreq_tmp;
+ atomic64_t *complete_txreqs = &txq->complete_txreqs;
+
+ list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) {
+ struct ipoib_txreq *tx =
+ container_of(txreq, struct ipoib_txreq, txreq);
+
+ list_del(&txreq->list);
+ sdma_txclean(txq->priv->dd, &tx->txreq);
+ dev_kfree_skb_any(tx->skb);
+ kmem_cache_free(txq->priv->txreq_cache, tx);
+ atomic64_inc(complete_txreqs);
+ }
+
+ if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs)))
+ dd_dev_warn(txq->priv->dd,
+ "txq %d not empty found %llu requests\n",
+ txq->q_idx,
+ hfi1_ipoib_txreqs(txq->sent_txreqs,
+ atomic64_read(complete_txreqs)));
+}
+
+void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->netdev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ iowait_cancel_work(&txq->wait);
+ iowait_sdma_drain(&txq->wait);
+ hfi1_ipoib_drain_tx_list(txq);
+ netif_napi_del(txq->napi);
+ (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
+ vfree(txq->tx_ring.items);
+ }
+
+ kfree(priv->txqs);
+ priv->txqs = NULL;
+
+ kfree(priv->tx_napis);
+ priv->tx_napis = NULL;
+
+ kmem_cache_destroy(priv->txreq_cache);
+ priv->txreq_cache = NULL;
+}
+
+void hfi1_ipoib_napi_tx_enable(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ napi_enable(txq->napi);
+ }
+}
+
+void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ napi_disable(txq->napi);
+ (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
+ }
+}
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 14d2a90964c3..24ca17b77b72 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -333,7 +333,7 @@ static void do_remove(struct mmu_rb_handler *handler,
/*
* Work queue function to remove all nodes that have been queued up to
- * be removed. The key feature is that mm->mmap_sem is not being held
+ * be removed. The key feature is that mm->mmap_lock is not being held
* and the remove callback can sleep while taking it, if needed.
*/
static void handle_remove(struct work_struct *work)
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
index db82db497b2c..d61ee853d215 100644
--- a/drivers/infiniband/hw/hfi1/msix.c
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * Copyright(c) 2018 Intel Corporation.
+ * Copyright(c) 2018 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -49,6 +49,7 @@
#include "hfi.h"
#include "affinity.h"
#include "sdma.h"
+#include "netdev.h"
/**
* msix_initialize() - Calculate, request and configure MSIx IRQs
@@ -69,7 +70,7 @@ int msix_initialize(struct hfi1_devdata *dd)
* one for each VNIC context
* ...any new IRQs should be added here.
*/
- total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
+ total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts;
if (total >= CCE_NUM_MSIX_VECTORS)
return -EINVAL;
@@ -140,7 +141,7 @@ static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
if (ret) {
dd_dev_err(dd,
- "%s: request for IRQ %d failed, MSIx %lu, err %d\n",
+ "%s: request for IRQ %d failed, MSIx %lx, err %d\n",
name, irq, nr, ret);
spin_lock(&dd->msix_info.msix_lock);
__clear_bit(nr, dd->msix_info.in_use_msix);
@@ -160,7 +161,7 @@ static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
/* This is a request, so a failure is not fatal */
ret = hfi1_get_irq_affinity(dd, me);
if (ret)
- dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
+ dd_dev_err(dd, "%s: unable to pin IRQ %d\n", name, ret);
return nr;
}
@@ -171,7 +172,8 @@ static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd,
const char *name)
{
int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
- IRQ_RCVCTXT, name);
+ rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT,
+ name);
if (nr < 0)
return nr;
@@ -204,6 +206,21 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
}
/**
+ * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
+ * for netdev context
+ * @rcd: valid netdev contexti
+ */
+int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd)
+{
+ char name[MAX_NAME_SIZE];
+
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d nd kctxt%d",
+ rcd->dd->unit, rcd->ctxt);
+ return msix_request_rcd_irq_common(rcd, receive_context_interrupt_napi,
+ NULL, name);
+}
+
+/**
* msix_request_smda_ira() - Helper for getting SDMA IRQ resources
* @sde: valid sdma engine
*
@@ -355,15 +372,16 @@ void msix_clean_up_interrupts(struct hfi1_devdata *dd)
}
/**
- * msix_vnic_syncrhonize_irq() - Vnic IRQ synchronize
+ * msix_netdev_syncrhonize_irq() - netdev IRQ synchronize
* @dd: valid devdata
*/
-void msix_vnic_synchronize_irq(struct hfi1_devdata *dd)
+void msix_netdev_synchronize_irq(struct hfi1_devdata *dd)
{
int i;
+ int ctxt_count = hfi1_netdev_ctxt_count(dd);
- for (i = 0; i < dd->vnic.num_ctxt; i++) {
- struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
+ for (i = 0; i < ctxt_count; i++) {
+ struct hfi1_ctxtdata *rcd = hfi1_netdev_get_ctxt(dd, i);
struct hfi1_msix_entry *me;
me = &dd->msix_info.msix_entries[rcd->msix_intr];
diff --git a/drivers/infiniband/hw/hfi1/msix.h b/drivers/infiniband/hw/hfi1/msix.h
index 1a02ab7971c8..e63e944bf0fc 100644
--- a/drivers/infiniband/hw/hfi1/msix.h
+++ b/drivers/infiniband/hw/hfi1/msix.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
- * Copyright(c) 2018 Intel Corporation.
+ * Copyright(c) 2018 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -59,7 +59,8 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd);
int msix_request_sdma_irq(struct sdma_engine *sde);
void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr);
-/* VNIC interface */
-void msix_vnic_synchronize_irq(struct hfi1_devdata *dd);
+/* Netdev interface */
+void msix_netdev_synchronize_irq(struct hfi1_devdata *dd);
+int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd);
#endif
diff --git a/drivers/infiniband/hw/hfi1/netdev.h b/drivers/infiniband/hw/hfi1/netdev.h
new file mode 100644
index 000000000000..947543a3e0c4
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/netdev.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+#ifndef HFI1_NETDEV_H
+#define HFI1_NETDEV_H
+
+#include "hfi.h"
+
+#include <linux/netdevice.h>
+#include <linux/xarray.h>
+
+/**
+ * struct hfi1_netdev_rxq - Receive Queue for HFI
+ * dummy netdev. Both IPoIB and VNIC netdevices will be working on
+ * top of this device.
+ * @napi: napi object
+ * @priv: ptr to netdev_priv
+ * @rcd: ptr to receive context data
+ */
+struct hfi1_netdev_rxq {
+ struct napi_struct napi;
+ struct hfi1_netdev_priv *priv;
+ struct hfi1_ctxtdata *rcd;
+};
+
+/*
+ * Number of netdev contexts used. Ensure it is less than or equal to
+ * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
+ */
+#define HFI1_MAX_NETDEV_CTXTS 8
+
+/* Number of NETDEV RSM entries */
+#define NUM_NETDEV_MAP_ENTRIES HFI1_MAX_NETDEV_CTXTS
+
+/**
+ * struct hfi1_netdev_priv: data required to setup and run HFI netdev.
+ * @dd: hfi1_devdata
+ * @rxq: pointer to dummy netdev receive queues.
+ * @num_rx_q: number of receive queues
+ * @rmt_index: first free index in RMT Array
+ * @msix_start: first free MSI-X interrupt vector.
+ * @dev_tbl: netdev table for unique identifier VNIC and IPoIb VLANs.
+ * @enabled: atomic counter of netdevs enabling receive queues.
+ * When 0 NAPI will be disabled.
+ * @netdevs: atomic counter of netdevs using dummy netdev.
+ * When 0 receive queues will be freed.
+ */
+struct hfi1_netdev_priv {
+ struct hfi1_devdata *dd;
+ struct hfi1_netdev_rxq *rxq;
+ int num_rx_q;
+ int rmt_start;
+ struct xarray dev_tbl;
+ /* count of enabled napi polls */
+ atomic_t enabled;
+ /* count of netdevs on top */
+ atomic_t netdevs;
+};
+
+static inline
+struct hfi1_netdev_priv *hfi1_netdev_priv(struct net_device *dev)
+{
+ return (struct hfi1_netdev_priv *)&dev[1];
+}
+
+static inline
+int hfi1_netdev_ctxt_count(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return priv->num_rx_q;
+}
+
+static inline
+struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return priv->rxq[ctxt].rcd;
+}
+
+static inline
+int hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return priv->rmt_start;
+}
+
+static inline
+void hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata *dd, int rmt_idx)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ priv->rmt_start = rmt_idx;
+}
+
+u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
+ struct cpumask *cpu_mask);
+
+void hfi1_netdev_enable_queues(struct hfi1_devdata *dd);
+void hfi1_netdev_disable_queues(struct hfi1_devdata *dd);
+int hfi1_netdev_rx_init(struct hfi1_devdata *dd);
+int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd);
+int hfi1_netdev_alloc(struct hfi1_devdata *dd);
+void hfi1_netdev_free(struct hfi1_devdata *dd);
+int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data);
+void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id);
+void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id);
+void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id);
+
+/* chip.c */
+int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget);
+
+#endif /* HFI1_NETDEV_H */
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
new file mode 100644
index 000000000000..63688e85e8da
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for netdev RX functionality
+ */
+
+#include "sdma.h"
+#include "verbs.h"
+#include "netdev.h"
+#include "hfi.h"
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <rdma/ib_verbs.h>
+
+static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv *priv,
+ struct hfi1_ctxtdata *uctxt)
+{
+ unsigned int rcvctrl_ops;
+ struct hfi1_devdata *dd = priv->dd;
+ int ret;
+
+ uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
+ uctxt->do_interrupt = &handle_receive_interrupt_napi_sp;
+
+ /* Now allocate the RcvHdr queue and eager buffers. */
+ ret = hfi1_create_rcvhdrq(dd, uctxt);
+ if (ret)
+ goto done;
+
+ ret = hfi1_setup_eagerbufs(uctxt);
+ if (ret)
+ goto done;
+
+ clear_rcvhdrtail(uctxt);
+
+ rcvctrl_ops = HFI1_RCVCTRL_CTXT_DIS;
+ rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_DIS;
+
+ if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
+ rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
+ rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
+ rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
+ rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
+
+ hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
+done:
+ return ret;
+}
+
+static int hfi1_netdev_allocate_ctxt(struct hfi1_devdata *dd,
+ struct hfi1_ctxtdata **ctxt)
+{
+ struct hfi1_ctxtdata *uctxt;
+ int ret;
+
+ if (dd->flags & HFI1_FROZEN)
+ return -EIO;
+
+ ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
+ if (ret < 0) {
+ dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
+ return -ENOMEM;
+ }
+
+ uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
+ HFI1_CAP_KGET(NODROP_RHQ_FULL) |
+ HFI1_CAP_KGET(NODROP_EGR_FULL) |
+ HFI1_CAP_KGET(DMA_RTAIL);
+ /* Netdev contexts are always NO_RDMA_RTAIL */
+ uctxt->fast_handler = handle_receive_interrupt_napi_fp;
+ uctxt->slow_handler = handle_receive_interrupt_napi_sp;
+ hfi1_set_seq_cnt(uctxt, 1);
+ uctxt->is_vnic = true;
+
+ hfi1_stats.sps_ctxts++;
+
+ dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt);
+ *ctxt = uctxt;
+
+ return 0;
+}
+
+static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
+ struct hfi1_ctxtdata *uctxt)
+{
+ flush_wc();
+
+ /*
+ * Disable receive context and interrupt available, reset all
+ * RcvCtxtCtrl bits to default values.
+ */
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
+ HFI1_RCVCTRL_TIDFLOW_DIS |
+ HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
+ HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
+ HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
+
+ if (uctxt->msix_intr != CCE_NUM_MSIX_VECTORS)
+ msix_free_irq(dd, uctxt->msix_intr);
+
+ uctxt->msix_intr = CCE_NUM_MSIX_VECTORS;
+ uctxt->event_flags = 0;
+
+ hfi1_clear_tids(uctxt);
+ hfi1_clear_ctxt_pkey(dd, uctxt);
+
+ hfi1_stats.sps_ctxts--;
+
+ hfi1_free_ctxt(uctxt);
+}
+
+static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
+ struct hfi1_ctxtdata **ctxt)
+{
+ int rc;
+ struct hfi1_devdata *dd = priv->dd;
+
+ rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
+ if (rc) {
+ dd_dev_err(dd, "netdev ctxt alloc failed %d\n", rc);
+ return rc;
+ }
+
+ rc = hfi1_netdev_setup_ctxt(priv, *ctxt);
+ if (rc) {
+ dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
+ hfi1_netdev_deallocate_ctxt(dd, *ctxt);
+ *ctxt = NULL;
+ }
+
+ return rc;
+}
+
+/**
+ * hfi1_num_netdev_contexts - Count of netdev recv contexts to use.
+ * @dd: device on which to allocate netdev contexts
+ * @available_contexts: count of available receive contexts
+ * @cpu_mask: mask of possible cpus to include for contexts
+ *
+ * Return: count of physical cores on a node or the remaining available recv
+ * contexts for netdev recv context usage up to the maximum of
+ * HFI1_MAX_NETDEV_CTXTS.
+ * A value of 0 can be returned when acceleration is explicitly turned off,
+ * a memory allocation error occurs or when there are no available contexts.
+ *
+ */
+u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
+ struct cpumask *cpu_mask)
+{
+ cpumask_var_t node_cpu_mask;
+ unsigned int available_cpus;
+
+ if (!HFI1_CAP_IS_KSET(AIP))
+ return 0;
+
+ /* Always give user contexts priority over netdev contexts */
+ if (available_contexts == 0) {
+ dd_dev_info(dd, "No receive contexts available for netdevs.\n");
+ return 0;
+ }
+
+ if (!zalloc_cpumask_var(&node_cpu_mask, GFP_KERNEL)) {
+ dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n");
+ return 0;
+ }
+
+ cpumask_and(node_cpu_mask, cpu_mask,
+ cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
+
+ available_cpus = cpumask_weight(node_cpu_mask);
+
+ free_cpumask_var(node_cpu_mask);
+
+ return min3(available_cpus, available_contexts,
+ (u32)HFI1_MAX_NETDEV_CTXTS);
+}
+
+static int hfi1_netdev_rxq_init(struct net_device *dev)
+{
+ int i;
+ int rc;
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
+ struct hfi1_devdata *dd = priv->dd;
+
+ priv->num_rx_q = dd->num_netdev_contexts;
+ priv->rxq = kcalloc_node(priv->num_rx_q, sizeof(struct hfi1_netdev_rxq),
+ GFP_KERNEL, dd->node);
+
+ if (!priv->rxq) {
+ dd_dev_err(dd, "Unable to allocate netdev queue data\n");
+ return (-ENOMEM);
+ }
+
+ for (i = 0; i < priv->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ rc = hfi1_netdev_allot_ctxt(priv, &rxq->rcd);
+ if (rc)
+ goto bail_context_irq_failure;
+
+ hfi1_rcd_get(rxq->rcd);
+ rxq->priv = priv;
+ rxq->rcd->napi = &rxq->napi;
+ dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
+ i, rxq->rcd->ctxt);
+ /*
+ * Disable BUSY_POLL on this NAPI as this is not supported
+ * right now.
+ */
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
+ netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
+ rc = msix_netdev_request_rcd_irq(rxq->rcd);
+ if (rc)
+ goto bail_context_irq_failure;
+ }
+
+ return 0;
+
+bail_context_irq_failure:
+ dd_dev_err(dd, "Unable to allot receive context\n");
+ for (; i >= 0; i--) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ if (rxq->rcd) {
+ hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
+ hfi1_rcd_put(rxq->rcd);
+ rxq->rcd = NULL;
+ }
+ }
+ kfree(priv->rxq);
+ priv->rxq = NULL;
+
+ return rc;
+}
+
+static void hfi1_netdev_rxq_deinit(struct net_device *dev)
+{
+ int i;
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
+ struct hfi1_devdata *dd = priv->dd;
+
+ for (i = 0; i < priv->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ netif_napi_del(&rxq->napi);
+ hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
+ hfi1_rcd_put(rxq->rcd);
+ rxq->rcd = NULL;
+ }
+
+ kfree(priv->rxq);
+ priv->rxq = NULL;
+ priv->num_rx_q = 0;
+}
+
+static void enable_queues(struct hfi1_netdev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ dd_dev_info(priv->dd, "enabling queue %d on context %d\n", i,
+ rxq->rcd->ctxt);
+ napi_enable(&rxq->napi);
+ hfi1_rcvctrl(priv->dd,
+ HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
+ rxq->rcd);
+ }
+}
+
+static void disable_queues(struct hfi1_netdev_priv *priv)
+{
+ int i;
+
+ msix_netdev_synchronize_irq(priv->dd);
+
+ for (i = 0; i < priv->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ dd_dev_info(priv->dd, "disabling queue %d on context %d\n", i,
+ rxq->rcd->ctxt);
+
+ /* wait for napi if it was scheduled */
+ hfi1_rcvctrl(priv->dd,
+ HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
+ rxq->rcd);
+ napi_synchronize(&rxq->napi);
+ napi_disable(&rxq->napi);
+ }
+}
+
+/**
+ * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time,
+ * it allocates receive queue data and calls netif_napi_add
+ * for each queue.
+ *
+ * @dd: hfi1 dev data
+ */
+int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ int res;
+
+ if (atomic_fetch_inc(&priv->netdevs))
+ return 0;
+
+ mutex_lock(&hfi1_mutex);
+ init_dummy_netdev(dd->dummy_netdev);
+ res = hfi1_netdev_rxq_init(dd->dummy_netdev);
+ mutex_unlock(&hfi1_mutex);
+ return res;
+}
+
+/**
+ * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0
+ * napi is deleted and receive queses memory is freed.
+ *
+ * @dd: hfi1 dev data
+ */
+int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ /* destroy the RX queues only if it is the last netdev going away */
+ if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) {
+ mutex_lock(&hfi1_mutex);
+ hfi1_netdev_rxq_deinit(dd->dummy_netdev);
+ mutex_unlock(&hfi1_mutex);
+ }
+
+ return 0;
+}
+
+/**
+ * hfi1_netdev_alloc - Allocates netdev and private data. It is required
+ * because RMT index and MSI-X interrupt can be set only
+ * during driver initialization.
+ *
+ * @dd: hfi1 dev data
+ */
+int hfi1_netdev_alloc(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv;
+ const int netdev_size = sizeof(*dd->dummy_netdev) +
+ sizeof(struct hfi1_netdev_priv);
+
+ dd_dev_info(dd, "allocating netdev size %d\n", netdev_size);
+ dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node);
+
+ if (!dd->dummy_netdev)
+ return -ENOMEM;
+
+ priv = hfi1_netdev_priv(dd->dummy_netdev);
+ priv->dd = dd;
+ xa_init(&priv->dev_tbl);
+ atomic_set(&priv->enabled, 0);
+ atomic_set(&priv->netdevs, 0);
+
+ return 0;
+}
+
+void hfi1_netdev_free(struct hfi1_devdata *dd)
+{
+ if (dd->dummy_netdev) {
+ dd_dev_info(dd, "hfi1 netdev freed\n");
+ free_netdev(dd->dummy_netdev);
+ dd->dummy_netdev = NULL;
+ }
+}
+
+/**
+ * hfi1_netdev_enable_queues - This is napi enable function.
+ * It enables napi objects associated with queues.
+ * When at least one device has called it it increments atomic counter.
+ * Disable function decrements counter and when it is 0,
+ * calls napi_disable for every queue.
+ *
+ * @dd: hfi1 dev data
+ */
+void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv;
+
+ if (!dd->dummy_netdev)
+ return;
+
+ priv = hfi1_netdev_priv(dd->dummy_netdev);
+ if (atomic_fetch_inc(&priv->enabled))
+ return;
+
+ mutex_lock(&hfi1_mutex);
+ enable_queues(priv);
+ mutex_unlock(&hfi1_mutex);
+}
+
+void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv;
+
+ if (!dd->dummy_netdev)
+ return;
+
+ priv = hfi1_netdev_priv(dd->dummy_netdev);
+ if (atomic_dec_if_positive(&priv->enabled))
+ return;
+
+ mutex_lock(&hfi1_mutex);
+ disable_queues(priv);
+ mutex_unlock(&hfi1_mutex);
+}
+
+/**
+ * hfi1_netdev_add_data - Registers data with unique identifier
+ * to be requested later this is needed for VNIC and IPoIB VLANs
+ * implementations.
+ * This call is protected by mutex idr_lock.
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ * @data: data to be associated with index
+ */
+int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return xa_insert(&priv->dev_tbl, id, data, GFP_NOWAIT);
+}
+
+/**
+ * hfi1_netdev_remove_data - Removes data with previously given id.
+ * Returns the reference to removed entry.
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ */
+void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return xa_erase(&priv->dev_tbl, id);
+}
+
+/**
+ * hfi1_netdev_get_data - Gets data with given id
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ */
+void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return xa_load(&priv->dev_tbl, id);
+}
+
+/**
+ * hfi1_netdev_get_first_dat - Gets first entry with greater or equal id.
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ */
+void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ unsigned long index = *start_id;
+ void *ret;
+
+ ret = xa_find(&priv->dev_tbl, &index, UINT_MAX, XA_PRESENT);
+ *start_id = (int)index;
+ return ret;
+}
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index f8e733aa3bb8..0c2ae9f7b3e8 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2019 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -186,15 +186,6 @@ static void flush_iowait(struct rvt_qp *qp)
write_sequnlock_irqrestore(lock, flags);
}
-static inline int opa_mtu_enum_to_int(int mtu)
-{
- switch (mtu) {
- case OPA_MTU_8192: return 8192;
- case OPA_MTU_10240: return 10240;
- default: return -1;
- }
-}
-
/**
* This function is what we would push to the core layer if we wanted to be a
* "first class citizen". Instead we hide this here and rely on Verbs ULPs
@@ -202,15 +193,10 @@ static inline int opa_mtu_enum_to_int(int mtu)
*/
static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{
- int val;
-
/* Constraining 10KB packets to 8KB packets */
if (mtu == (enum ib_mtu)OPA_MTU_10240)
mtu = OPA_MTU_8192;
- val = opa_mtu_enum_to_int((int)mtu);
- if (val > 0)
- return val;
- return ib_mtu_enum_to_int(mtu);
+ return opa_mtu_enum_to_int((enum opa_mtu)mtu);
}
int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 8a2e0d9351e9..243b4ba0b6f6 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * Copyright(c) 2018 Intel Corporation.
+ * Copyright(c) 2018 - 2020 Intel Corporation.
*
*/
@@ -194,7 +194,7 @@ void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p)
{
struct hfi1_qp_priv *priv = qp->priv;
- p->qp = (kdeth_qp << 16) | priv->rcd->ctxt;
+ p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt;
p->max_len = TID_RDMA_MAX_SEGMENT_SIZE;
p->jkey = priv->rcd->jkey;
p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ;
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 9a3d236bcc88..b219ea90fd6f 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -47,6 +47,7 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "exp_rcv.h"
+#include "ipoib.h"
static u8 __get_ib_hdr_len(struct ib_header *hdr)
{
@@ -126,6 +127,7 @@ const char *hfi1_trace_get_packet_l2_str(u8 l2)
#define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x"
#define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x"
#define DETH_PRN "deth qkey:0x%.8x sqpn:0x%.6x"
+#define DETH_ENTROPY_PRN "deth qkey:0x%.8x sqpn:0x%.6x entropy:0x%.2x"
#define IETH_PRN "ieth rkey:0x%.8x"
#define ATOMICACKETH_PRN "origdata:%llx"
#define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx"
@@ -444,6 +446,12 @@ const char *parse_everbs_hdrs(
break;
/* deth */
case OP(UD, SEND_ONLY):
+ trace_seq_printf(p, DETH_ENTROPY_PRN,
+ be32_to_cpu(eh->ud.deth[0]),
+ be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK,
+ be32_to_cpu(eh->ud.deth[1]) >>
+ HFI1_IPOIB_ENTROPY_SHIFT);
+ break;
case OP(UD, SEND_ONLY_WITH_IMMEDIATE):
trace_seq_printf(p, DETH_PRN,
be32_to_cpu(eh->ud.deth[0]),
@@ -512,6 +520,38 @@ u16 hfi1_trace_get_tid_idx(u32 ent)
return EXP_TID_GET(ent, IDX);
}
+struct hfi1_ctxt_hist {
+ atomic_t count;
+ atomic_t data[255];
+};
+
+struct hfi1_ctxt_hist hist = {
+ .count = ATOMIC_INIT(0)
+};
+
+const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt)
+{
+ int i, len = ARRAY_SIZE(hist.data);
+ const char *ret = trace_seq_buffer_ptr(p);
+ unsigned long packet_count = atomic_fetch_inc(&hist.count);
+
+ trace_seq_printf(p, "packet[%lu]", packet_count);
+ for (i = 0; i < len; ++i) {
+ unsigned long val;
+ atomic_t *count = &hist.data[i];
+
+ if (ctxt == i)
+ val = atomic_fetch_inc(count);
+ else
+ val = atomic_read(count);
+
+ if (val)
+ trace_seq_printf(p, "(%d:%lu)", i, val);
+ }
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
__hfi1_trace_fn(AFFINITY);
__hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC);
diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h
index b5fc5c6cd52f..d8c168dc3ea8 100644
--- a/drivers/infiniband/hw/hfi1/trace_ctxts.h
+++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h
@@ -1,5 +1,5 @@
/*
-* Copyright(c) 2015, 2016 Intel Corporation.
+* Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -138,6 +138,15 @@ TRACE_EVENT(hfi1_ctxt_info,
)
);
+const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt);
+TRACE_EVENT(ctxt_rsm_hist,
+ TP_PROTO(unsigned int ctxt),
+ TP_ARGS(ctxt),
+ TP_STRUCT__entry(__field(unsigned int, ctxt)),
+ TP_fast_assign(__entry->ctxt = ctxt;),
+ TP_printk("%s", hfi1_trace_print_rsm_hist(p, __entry->ctxt))
+);
+
#endif /* __HFI1_TRACE_CTXTS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 4da03f823474..f81ca20f4b69 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -206,13 +206,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
return -EINVAL;
}
- /* Verify that access is OK for the user buffer */
- if (!access_ok((void __user *)vaddr,
- npages * PAGE_SIZE)) {
- dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
- (void *)vaddr, npages);
- return -EFAULT;
- }
/* Allocate the array of struct page pointers needed for pinning */
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
if (!pages)
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 2f6323ad9c59..30865635b449 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -66,6 +66,7 @@
#include "vnic.h"
#include "fault.h"
#include "affinity.h"
+#include "ipoib.h"
static unsigned int hfi1_lkey_table_size = 16;
module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
@@ -1342,7 +1343,7 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
IB_DEVICE_MEM_MGT_EXTENSIONS |
- IB_DEVICE_RDMA_NETDEV_OPA_VNIC;
+ IB_DEVICE_RDMA_NETDEV_OPA;
rdi->dparms.props.page_size_cap = PAGE_SIZE;
rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
rdi->dparms.props.vendor_part_id = dd->pcidev->device;
@@ -1360,7 +1361,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
rdi->dparms.props.max_cq = hfi1_max_cqs;
rdi->dparms.props.max_ah = hfi1_max_ahs;
rdi->dparms.props.max_cqe = hfi1_max_cqes;
- rdi->dparms.props.max_map_per_fmr = 32767;
rdi->dparms.props.max_pd = hfi1_max_pds;
rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
rdi->dparms.props.max_qp_init_rd_atom = 255;
@@ -1439,6 +1439,8 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
4096 : hfi1_max_mtu), IB_MTU_4096);
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
+ props->phys_mtu = HFI1_CAP_IS_KSET(AIP) ? hfi1_max_mtu :
+ ib_mtu_enum_to_int(props->max_mtu);
return 0;
}
@@ -1793,6 +1795,7 @@ static const struct ib_device_ops hfi1_dev_ops = {
.modify_device = modify_device,
/* keep process mad in the driver */
.process_mad = hfi1_process_mad,
+ .rdma_netdev_get_params = hfi1_ipoib_rn_get_params,
};
/**
@@ -1863,9 +1866,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.dparms.qpn_start = 0;
dd->verbs_dev.rdi.dparms.qpn_inc = 1;
dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
- dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
- dd->verbs_dev.rdi.dparms.qpn_res_end =
- dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
+ dd->verbs_dev.rdi.dparms.qpn_res_start = RVT_KDETH_QP_BASE;
+ dd->verbs_dev.rdi.dparms.qpn_res_end = RVT_AIP_QP_MAX;
dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
diff --git a/drivers/infiniband/hw/hfi1/vnic.h b/drivers/infiniband/hw/hfi1/vnic.h
index 5ae781514e32..66150a13f374 100644
--- a/drivers/infiniband/hw/hfi1/vnic.h
+++ b/drivers/infiniband/hw/hfi1/vnic.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_VNIC_H
#define _HFI1_VNIC_H
/*
- * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -69,6 +69,7 @@
#define HFI1_VNIC_SC_SHIFT 4
#define HFI1_VNIC_MAX_QUEUE 16
+#define HFI1_NUM_VNIC_CTXT 8
/**
* struct hfi1_vnic_sdma - VNIC per Tx ring SDMA information
@@ -104,7 +105,6 @@ struct hfi1_vnic_rx_queue {
struct hfi1_vnic_vport_info *vinfo;
struct net_device *netdev;
struct napi_struct napi;
- struct sk_buff_head skbq;
};
/**
@@ -146,7 +146,6 @@ struct hfi1_vnic_vport_info {
/* vnic hfi1 internal functions */
void hfi1_vnic_setup(struct hfi1_devdata *dd);
-void hfi1_vnic_cleanup(struct hfi1_devdata *dd);
int hfi1_vnic_txreq_init(struct hfi1_devdata *dd);
void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index 6b14581b9965..a90824de0f57 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2017 - 2018 Intel Corporation.
+ * Copyright(c) 2017 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -53,6 +53,7 @@
#include <linux/if_vlan.h>
#include "vnic.h"
+#include "netdev.h"
#define HFI_TX_TIMEOUT_MS 1000
@@ -62,114 +63,6 @@
static DEFINE_SPINLOCK(vport_cntr_lock);
-static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
-{
- unsigned int rcvctrl_ops = 0;
- int ret;
-
- uctxt->do_interrupt = &handle_receive_interrupt;
-
- /* Now allocate the RcvHdr queue and eager buffers. */
- ret = hfi1_create_rcvhdrq(dd, uctxt);
- if (ret)
- goto done;
-
- ret = hfi1_setup_eagerbufs(uctxt);
- if (ret)
- goto done;
-
- if (hfi1_rcvhdrtail_kvaddr(uctxt))
- clear_rcvhdrtail(uctxt);
-
- rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
- rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_ENB;
-
- if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
- rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
- rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
- rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
- rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
-
- hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
-done:
- return ret;
-}
-
-static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
- struct hfi1_ctxtdata **vnic_ctxt)
-{
- struct hfi1_ctxtdata *uctxt;
- int ret;
-
- if (dd->flags & HFI1_FROZEN)
- return -EIO;
-
- ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
- if (ret < 0) {
- dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
- return -ENOMEM;
- }
-
- uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
- HFI1_CAP_KGET(NODROP_RHQ_FULL) |
- HFI1_CAP_KGET(NODROP_EGR_FULL) |
- HFI1_CAP_KGET(DMA_RTAIL);
- uctxt->seq_cnt = 1;
- uctxt->is_vnic = true;
-
- msix_request_rcd_irq(uctxt);
-
- hfi1_stats.sps_ctxts++;
- dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
- *vnic_ctxt = uctxt;
-
- return 0;
-}
-
-static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
- struct hfi1_ctxtdata *uctxt)
-{
- dd_dev_dbg(dd, "closing vnic context %d\n", uctxt->ctxt);
- flush_wc();
-
- /*
- * Disable receive context and interrupt available, reset all
- * RcvCtxtCtrl bits to default values.
- */
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
- HFI1_RCVCTRL_TIDFLOW_DIS |
- HFI1_RCVCTRL_INTRAVAIL_DIS |
- HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
- HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
- HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
-
- /* msix_intr will always be > 0, only clean up if this is true */
- if (uctxt->msix_intr)
- msix_free_irq(dd, uctxt->msix_intr);
-
- uctxt->event_flags = 0;
-
- hfi1_clear_tids(uctxt);
- hfi1_clear_ctxt_pkey(dd, uctxt);
-
- hfi1_stats.sps_ctxts--;
-
- hfi1_free_ctxt(uctxt);
-}
-
-void hfi1_vnic_setup(struct hfi1_devdata *dd)
-{
- xa_init(&dd->vnic.vesws);
-}
-
-void hfi1_vnic_cleanup(struct hfi1_devdata *dd)
-{
- WARN_ON(!xa_empty(&dd->vnic.vesws));
-}
-
#define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \
u64 *src64, *dst64; \
for (src64 = &qstats->x_grp.unicast, \
@@ -179,6 +72,9 @@ void hfi1_vnic_cleanup(struct hfi1_devdata *dd)
} \
} while (0)
+#define VNIC_MASK (0xFF)
+#define VNIC_ID(val) ((1ull << 24) | ((val) & VNIC_MASK))
+
/* hfi1_vnic_update_stats - update statistics */
static void hfi1_vnic_update_stats(struct hfi1_vnic_vport_info *vinfo,
struct opa_vnic_stats *stats)
@@ -454,71 +350,25 @@ static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue *rxq,
return rc;
}
-static inline struct sk_buff *hfi1_vnic_get_skb(struct hfi1_vnic_rx_queue *rxq)
+static struct hfi1_vnic_vport_info *get_vnic_port(struct hfi1_devdata *dd,
+ int vesw_id)
{
- unsigned char *pad_info;
- struct sk_buff *skb;
+ int vnic_id = VNIC_ID(vesw_id);
- skb = skb_dequeue(&rxq->skbq);
- if (unlikely(!skb))
- return NULL;
-
- /* remove tail padding and icrc */
- pad_info = skb->data + skb->len - 1;
- skb_trim(skb, (skb->len - OPA_VNIC_ICRC_TAIL_LEN -
- ((*pad_info) & 0x7)));
-
- return skb;
+ return hfi1_netdev_get_data(dd, vnic_id);
}
-/* hfi1_vnic_handle_rx - handle skb receive */
-static void hfi1_vnic_handle_rx(struct hfi1_vnic_rx_queue *rxq,
- int *work_done, int work_to_do)
+static struct hfi1_vnic_vport_info *get_first_vnic_port(struct hfi1_devdata *dd)
{
- struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
- struct sk_buff *skb;
- int rc;
-
- while (1) {
- if (*work_done >= work_to_do)
- break;
-
- skb = hfi1_vnic_get_skb(rxq);
- if (unlikely(!skb))
- break;
-
- rc = hfi1_vnic_decap_skb(rxq, skb);
- /* update rx counters */
- hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc);
- if (unlikely(rc)) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- skb_checksum_none_assert(skb);
- skb->protocol = eth_type_trans(skb, rxq->netdev);
-
- napi_gro_receive(&rxq->napi, skb);
- (*work_done)++;
- }
-}
-
-/* hfi1_vnic_napi - napi receive polling callback function */
-static int hfi1_vnic_napi(struct napi_struct *napi, int budget)
-{
- struct hfi1_vnic_rx_queue *rxq = container_of(napi,
- struct hfi1_vnic_rx_queue, napi);
- struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
- int work_done = 0;
+ struct hfi1_vnic_vport_info *vinfo;
+ int next_id = VNIC_ID(0);
- v_dbg("napi %d budget %d\n", rxq->idx, budget);
- hfi1_vnic_handle_rx(rxq, &work_done, budget);
+ vinfo = hfi1_netdev_get_first_data(dd, &next_id);
- v_dbg("napi %d work_done %d\n", rxq->idx, work_done);
- if (work_done < budget)
- napi_complete(napi);
+ if (next_id > VNIC_ID(VNIC_MASK))
+ return NULL;
- return work_done;
+ return vinfo;
}
void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
@@ -527,13 +377,14 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
struct hfi1_vnic_vport_info *vinfo = NULL;
struct hfi1_vnic_rx_queue *rxq;
struct sk_buff *skb;
- int l4_type, vesw_id = -1;
+ int l4_type, vesw_id = -1, rc;
u8 q_idx;
+ unsigned char *pad_info;
l4_type = hfi1_16B_get_l4(packet->ebuf);
if (likely(l4_type == OPA_16B_L4_ETHR)) {
vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf);
- vinfo = xa_load(&dd->vnic.vesws, vesw_id);
+ vinfo = get_vnic_port(dd, vesw_id);
/*
* In case of invalid vesw id, count the error on
@@ -541,10 +392,8 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
*/
if (unlikely(!vinfo)) {
struct hfi1_vnic_vport_info *vinfo_tmp;
- unsigned long index = 0;
- vinfo_tmp = xa_find(&dd->vnic.vesws, &index, ULONG_MAX,
- XA_PRESENT);
+ vinfo_tmp = get_first_vnic_port(dd);
if (vinfo_tmp) {
spin_lock(&vport_cntr_lock);
vinfo_tmp->stats[0].netstats.rx_nohandler++;
@@ -563,12 +412,6 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
rxq = &vinfo->rxq[q_idx];
if (unlikely(!netif_oper_up(vinfo->netdev))) {
vinfo->stats[q_idx].rx_drop_state++;
- skb_queue_purge(&rxq->skbq);
- return;
- }
-
- if (unlikely(skb_queue_len(&rxq->skbq) > HFI1_VNIC_RCV_Q_SIZE)) {
- vinfo->stats[q_idx].netstats.rx_fifo_errors++;
return;
}
@@ -580,62 +423,65 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
memcpy(skb->data, packet->ebuf, packet->tlen);
skb_put(skb, packet->tlen);
- skb_queue_tail(&rxq->skbq, skb);
- if (napi_schedule_prep(&rxq->napi)) {
- v_dbg("napi %d scheduling\n", q_idx);
- __napi_schedule(&rxq->napi);
+ pad_info = skb->data + skb->len - 1;
+ skb_trim(skb, (skb->len - OPA_VNIC_ICRC_TAIL_LEN -
+ ((*pad_info) & 0x7)));
+
+ rc = hfi1_vnic_decap_skb(rxq, skb);
+
+ /* update rx counters */
+ hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc);
+ if (unlikely(rc)) {
+ dev_kfree_skb_any(skb);
+ return;
}
+
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, rxq->netdev);
+
+ napi_gro_receive(&rxq->napi, skb);
}
static int hfi1_vnic_up(struct hfi1_vnic_vport_info *vinfo)
{
struct hfi1_devdata *dd = vinfo->dd;
struct net_device *netdev = vinfo->netdev;
- int i, rc;
+ int rc;
/* ensure virtual eth switch id is valid */
if (!vinfo->vesw_id)
return -EINVAL;
- rc = xa_insert(&dd->vnic.vesws, vinfo->vesw_id, vinfo, GFP_KERNEL);
+ rc = hfi1_netdev_add_data(dd, VNIC_ID(vinfo->vesw_id), vinfo);
if (rc < 0)
return rc;
- for (i = 0; i < vinfo->num_rx_q; i++) {
- struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i];
-
- skb_queue_head_init(&rxq->skbq);
- napi_enable(&rxq->napi);
- }
+ rc = hfi1_netdev_rx_init(dd);
+ if (rc)
+ goto err_remove;
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
set_bit(HFI1_VNIC_UP, &vinfo->flags);
return 0;
+
+err_remove:
+ hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id));
+ return rc;
}
static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo)
{
struct hfi1_devdata *dd = vinfo->dd;
- u8 i;
clear_bit(HFI1_VNIC_UP, &vinfo->flags);
netif_carrier_off(vinfo->netdev);
netif_tx_disable(vinfo->netdev);
- xa_erase(&dd->vnic.vesws, vinfo->vesw_id);
-
- /* ensure irqs see the change */
- msix_vnic_synchronize_irq(dd);
+ hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id));
- /* remove unread skbs */
- for (i = 0; i < vinfo->num_rx_q; i++) {
- struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i];
-
- napi_disable(&rxq->napi);
- skb_queue_purge(&rxq->skbq);
- }
+ hfi1_netdev_rx_destroy(dd);
}
static int hfi1_netdev_open(struct net_device *netdev)
@@ -660,70 +506,31 @@ static int hfi1_netdev_close(struct net_device *netdev)
return 0;
}
-static int hfi1_vnic_allot_ctxt(struct hfi1_devdata *dd,
- struct hfi1_ctxtdata **vnic_ctxt)
-{
- int rc;
-
- rc = allocate_vnic_ctxt(dd, vnic_ctxt);
- if (rc) {
- dd_dev_err(dd, "vnic ctxt alloc failed %d\n", rc);
- return rc;
- }
-
- rc = setup_vnic_ctxt(dd, *vnic_ctxt);
- if (rc) {
- dd_dev_err(dd, "vnic ctxt setup failed %d\n", rc);
- deallocate_vnic_ctxt(dd, *vnic_ctxt);
- *vnic_ctxt = NULL;
- }
-
- return rc;
-}
-
static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo)
{
struct hfi1_devdata *dd = vinfo->dd;
- int i, rc = 0;
+ int rc = 0;
mutex_lock(&hfi1_mutex);
- if (!dd->vnic.num_vports) {
+ if (!dd->vnic_num_vports) {
rc = hfi1_vnic_txreq_init(dd);
if (rc)
goto txreq_fail;
}
- for (i = dd->vnic.num_ctxt; i < vinfo->num_rx_q; i++) {
- rc = hfi1_vnic_allot_ctxt(dd, &dd->vnic.ctxt[i]);
- if (rc)
- break;
- hfi1_rcd_get(dd->vnic.ctxt[i]);
- dd->vnic.ctxt[i]->vnic_q_idx = i;
- }
-
- if (i < vinfo->num_rx_q) {
- /*
- * If required amount of contexts is not
- * allocated successfully then remaining contexts
- * are released.
- */
- while (i-- > dd->vnic.num_ctxt) {
- deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]);
- hfi1_rcd_put(dd->vnic.ctxt[i]);
- dd->vnic.ctxt[i] = NULL;
- }
+ rc = hfi1_netdev_rx_init(dd);
+ if (rc) {
+ dd_dev_err(dd, "Unable to initialize netdev contexts\n");
goto alloc_fail;
}
- if (dd->vnic.num_ctxt != i) {
- dd->vnic.num_ctxt = i;
- hfi1_init_vnic_rsm(dd);
- }
+ hfi1_init_vnic_rsm(dd);
- dd->vnic.num_vports++;
+ dd->vnic_num_vports++;
hfi1_vnic_sdma_init(vinfo);
+
alloc_fail:
- if (!dd->vnic.num_vports)
+ if (!dd->vnic_num_vports)
hfi1_vnic_txreq_deinit(dd);
txreq_fail:
mutex_unlock(&hfi1_mutex);
@@ -733,20 +540,14 @@ txreq_fail:
static void hfi1_vnic_deinit(struct hfi1_vnic_vport_info *vinfo)
{
struct hfi1_devdata *dd = vinfo->dd;
- int i;
mutex_lock(&hfi1_mutex);
- if (--dd->vnic.num_vports == 0) {
- for (i = 0; i < dd->vnic.num_ctxt; i++) {
- deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]);
- hfi1_rcd_put(dd->vnic.ctxt[i]);
- dd->vnic.ctxt[i] = NULL;
- }
+ if (--dd->vnic_num_vports == 0) {
hfi1_deinit_vnic_rsm(dd);
- dd->vnic.num_ctxt = 0;
hfi1_vnic_txreq_deinit(dd);
}
mutex_unlock(&hfi1_mutex);
+ hfi1_netdev_rx_destroy(dd);
}
static void hfi1_vnic_set_vesw_id(struct net_device *netdev, int id)
@@ -804,7 +605,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
struct rdma_netdev *rn;
int i, size, rc;
- if (!dd->num_vnic_contexts)
+ if (!dd->num_netdev_contexts)
return ERR_PTR(-ENOMEM);
if (!port_num || (port_num > dd->num_pports))
@@ -815,15 +616,16 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
- dd->num_sdma, dd->num_vnic_contexts);
+ chip_sdma_engines(dd),
+ dd->num_netdev_contexts);
if (!netdev)
return ERR_PTR(-ENOMEM);
rn = netdev_priv(netdev);
vinfo = opa_vnic_dev_priv(netdev);
vinfo->dd = dd;
- vinfo->num_tx_q = dd->num_sdma;
- vinfo->num_rx_q = dd->num_vnic_contexts;
+ vinfo->num_tx_q = chip_sdma_engines(dd);
+ vinfo->num_rx_q = dd->num_netdev_contexts;
vinfo->netdev = netdev;
rn->free_rdma_netdev = hfi1_vnic_free_rn;
rn->set_id = hfi1_vnic_set_vesw_id;
@@ -841,7 +643,6 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
rxq->idx = i;
rxq->vinfo = vinfo;
rxq->netdev = netdev;
- netif_napi_add(netdev, &rxq->napi, hfi1_vnic_napi, 64);
}
rc = hfi1_vnic_init(vinfo);
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 8a522e14ef62..5b2f9314edd3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -39,13 +39,14 @@
#define HNS_ROCE_VLAN_SL_BIT_MASK 7
#define HNS_ROCE_VLAN_SL_SHIFT 13
-int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
const struct ib_gid_attr *gid_attr;
struct device *dev = hr_dev->dev;
struct hns_roce_ah *ah = to_hr_ah(ibah);
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
u16 vlan_id = 0xffff;
bool vlan_en = false;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index da574c26e063..a522cb2d29ea 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -157,84 +157,78 @@ void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
kfree(bitmap->table);
}
-void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
- struct hns_roce_buf *buf)
+void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
{
- int i;
struct device *dev = hr_dev->dev;
+ u32 size = buf->size;
+ int i;
+
+ if (size == 0)
+ return;
- if (buf->nbufs == 1) {
+ buf->size = 0;
+
+ if (hns_roce_buf_is_direct(buf)) {
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
} else {
- for (i = 0; i < buf->nbufs; ++i)
+ for (i = 0; i < buf->npages; ++i)
if (buf->page_list[i].buf)
dma_free_coherent(dev, 1 << buf->page_shift,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
+ buf->page_list = NULL;
}
}
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
struct hns_roce_buf *buf, u32 page_shift)
{
- int i = 0;
- dma_addr_t t;
+ struct hns_roce_buf_list *buf_list;
struct device *dev = hr_dev->dev;
- u32 page_size = 1 << page_shift;
- u32 order;
+ u32 page_size;
+ int i;
- /* SQ/RQ buf lease than one page, SQ + RQ = 8K */
+ /* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */
+ buf->page_shift = max_t(int, HNS_HW_PAGE_SHIFT, page_shift);
+
+ page_size = 1 << buf->page_shift;
+ buf->npages = DIV_ROUND_UP(size, page_size);
+
+ /* required size is not bigger than one trunk size */
if (size <= max_direct) {
- buf->nbufs = 1;
- /* Npages calculated by page_size */
- order = get_order(size);
- if (order <= page_shift - PAGE_SHIFT)
- order = 0;
- else
- order -= page_shift - PAGE_SHIFT;
- buf->npages = 1 << order;
- buf->page_shift = page_shift;
- /* MTT PA must be recorded in 4k alignment, t is 4k aligned */
- buf->direct.buf = dma_alloc_coherent(dev, size, &t,
+ buf->page_list = NULL;
+ buf->direct.buf = dma_alloc_coherent(dev, size,
+ &buf->direct.map,
GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
-
- buf->direct.map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
} else {
- buf->nbufs = (size + page_size - 1) / page_size;
- buf->npages = buf->nbufs;
- buf->page_shift = page_shift;
- buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- GFP_KERNEL);
-
- if (!buf->page_list)
+ buf_list = kcalloc(buf->npages, sizeof(*buf_list), GFP_KERNEL);
+ if (!buf_list)
return -ENOMEM;
- for (i = 0; i < buf->nbufs; ++i) {
- buf->page_list[i].buf = dma_alloc_coherent(dev,
- page_size,
- &t,
- GFP_KERNEL);
-
- if (!buf->page_list[i].buf)
- goto err_free;
+ for (i = 0; i < buf->npages; i++) {
+ buf_list[i].buf = dma_alloc_coherent(dev, page_size,
+ &buf_list[i].map,
+ GFP_KERNEL);
+ if (!buf_list[i].buf)
+ break;
+ }
- buf->page_list[i].map = t;
+ if (i != buf->npages && i > 0) {
+ while (i-- > 0)
+ dma_free_coherent(dev, page_size,
+ buf_list[i].buf,
+ buf_list[i].map);
+ kfree(buf_list);
+ return -ENOMEM;
}
+ buf->page_list = buf_list;
}
+ buf->size = size;
return 0;
-
-err_free:
- hns_roce_buf_free(hr_dev, size, buf);
- return -ENOMEM;
}
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
@@ -246,33 +240,30 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
end = start + buf_cnt;
if (end > buf->npages) {
dev_err(hr_dev->dev,
- "invalid kmem region,offset %d,buf_cnt %d,total %d!\n",
+ "Failed to check kmem bufs, end %d + %d total %d!\n",
start, buf_cnt, buf->npages);
return -EINVAL;
}
total = 0;
for (i = start; i < end; i++)
- if (buf->nbufs == 1)
- bufs[total++] = buf->direct.map +
- ((dma_addr_t)i << buf->page_shift);
- else
- bufs[total++] = buf->page_list[i].map;
+ bufs[total++] = hns_roce_buf_page(buf, i);
return total;
}
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct ib_umem *umem,
- int page_shift)
+ unsigned int page_shift)
{
struct ib_block_iter biter;
int total = 0;
int idx = 0;
u64 addr;
- if (page_shift < PAGE_SHIFT) {
- dev_err(hr_dev->dev, "invalid page shift %d!\n", page_shift);
+ if (page_shift < HNS_HW_PAGE_SHIFT) {
+ dev_err(hr_dev->dev, "Failed to check umem page shift %d!\n",
+ page_shift);
return -EINVAL;
}
@@ -292,49 +283,6 @@ done:
return total;
}
-void hns_roce_init_buf_region(struct hns_roce_buf_region *region, int hopnum,
- int offset, int buf_cnt)
-{
- if (hopnum == HNS_ROCE_HOP_NUM_0)
- region->hopnum = 0;
- else
- region->hopnum = hopnum;
-
- region->offset = offset;
- region->count = buf_cnt;
-}
-
-void hns_roce_free_buf_list(dma_addr_t **bufs, int region_cnt)
-{
- int i;
-
- for (i = 0; i < region_cnt; i++) {
- kfree(bufs[i]);
- bufs[i] = NULL;
- }
-}
-
-int hns_roce_alloc_buf_list(struct hns_roce_buf_region *regions,
- dma_addr_t **bufs, int region_cnt)
-{
- struct hns_roce_buf_region *r;
- int i;
-
- for (i = 0; i < region_cnt; i++) {
- r = &regions[i];
- bufs[i] = kcalloc(r->count, sizeof(dma_addr_t), GFP_KERNEL);
- if (!bufs[i])
- goto err_alloc;
- }
-
- return 0;
-
-err_alloc:
- hns_roce_free_buf_list(bufs, i);
-
- return -ENOMEM;
-}
-
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
{
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 8e95a1aa1b4f..f5669ff8cfeb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -33,10 +33,6 @@
#ifndef _HNS_ROCE_COMMON_H
#define _HNS_ROCE_COMMON_H
-#ifndef assert
-#define assert(cond)
-#endif
-
#define roce_write(dev, reg, val) writel((val), (dev)->reg_base + (reg))
#define roce_read(dev, reg) readl((dev)->reg_base + (reg))
#define roce_raw_write(value, addr) \
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 5bfb52ffd590..e87d616f7988 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -39,51 +39,40 @@
#include <rdma/hns-abi.h>
#include "hns_roce_common.h"
-static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq)
+static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cmd_mailbox *mailbox;
- struct hns_roce_hem_table *mtt_table;
struct hns_roce_cq_table *cq_table;
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 mtts[MTT_MIN_COUNT] = { 0 };
dma_addr_t dma_handle;
- u64 *mtts;
int ret;
- cq_table = &hr_dev->cq_table;
-
- /* Get the physical address of cq buf */
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- mtt_table = &hr_dev->mr_table.mtt_cqe_table;
- else
- mtt_table = &hr_dev->mr_table.mtt_table;
-
- mtts = hns_roce_table_find(hr_dev, mtt_table, hr_cq->mtt.first_seg,
- &dma_handle);
-
- if (!mtts) {
- dev_err(dev, "Failed to find mtt for CQ buf.\n");
+ ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
+ &dma_handle);
+ if (ret < 1) {
+ ibdev_err(ibdev, "Failed to find CQ mtr\n");
return -EINVAL;
}
+ cq_table = &hr_dev->cq_table;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
if (ret) {
- dev_err(dev, "Num of CQ out of range.\n");
+ ibdev_err(ibdev, "Failed to alloc CQ bitmap, err %d\n", ret);
return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
- dev_err(dev,
- "Get context mem failed(%d) when CQ(0x%lx) alloc.\n",
- ret, hr_cq->cqn);
+ ibdev_err(ibdev, "Failed to get CQ(0x%lx) context, err %d\n",
+ hr_cq->cqn, ret);
goto err_out;
}
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
- dev_err(dev, "Failed to xa_store CQ.\n");
+ ibdev_err(ibdev, "Failed to xa_store CQ\n");
goto err_put;
}
@@ -101,9 +90,9 @@ static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
- dev_err(dev,
- "Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n",
- ret, hr_cq->cqn);
+ ibdev_err(ibdev,
+ "Failed to send create cmd for CQ(0x%lx), err %d\n",
+ hr_cq->cqn, ret);
goto err_xa;
}
@@ -126,7 +115,7 @@ err_out:
return ret;
}
-void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
@@ -153,190 +142,86 @@ void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
-static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
- struct hns_roce_ib_create_cq ucmd,
- struct ib_udata *udata)
-{
- struct hns_roce_buf *buf = &hr_cq->buf;
- struct hns_roce_mtt *mtt = &hr_cq->mtt;
- struct ib_umem **umem = &hr_cq->umem;
- u32 npages;
- int ret;
-
- *umem = ib_umem_get(&hr_dev->ib_dev, ucmd.buf_addr, buf->size,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(*umem))
- return PTR_ERR(*umem);
-
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- mtt->mtt_type = MTT_TYPE_CQE;
- else
- mtt->mtt_type = MTT_TYPE_WQE;
-
- npages = DIV_ROUND_UP(ib_umem_page_count(*umem),
- 1 << hr_dev->caps.cqe_buf_pg_sz);
- ret = hns_roce_mtt_init(hr_dev, npages, buf->page_shift, mtt);
- if (ret)
- goto err_buf;
-
- ret = hns_roce_ib_umem_write_mtt(hr_dev, mtt, *umem);
- if (ret)
- goto err_mtt;
-
- return 0;
-
-err_mtt:
- hns_roce_mtt_cleanup(hr_dev, mtt);
-
-err_buf:
- ib_umem_release(*umem);
- return ret;
-}
-
-static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata, unsigned long addr)
{
- struct hns_roce_buf *buf = &hr_cq->buf;
- struct hns_roce_mtt *mtt = &hr_cq->mtt;
- int ret;
-
- ret = hns_roce_buf_alloc(hr_dev, buf->size, (1 << buf->page_shift) * 2,
- buf, buf->page_shift);
- if (ret)
- goto out;
-
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- mtt->mtt_type = MTT_TYPE_CQE;
- else
- mtt->mtt_type = MTT_TYPE_WQE;
-
- ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, mtt);
- if (ret)
- goto err_buf;
-
- ret = hns_roce_buf_write_mtt(hr_dev, mtt, buf);
- if (ret)
- goto err_mtt;
-
- return 0;
-
-err_mtt:
- hns_roce_mtt_cleanup(hr_dev, mtt);
-
-err_buf:
- hns_roce_buf_free(hr_dev, buf->size, buf);
-
-out:
- return ret;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
+ buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+
+ err = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
+ hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ udata, addr);
+ if (err)
+ ibdev_err(ibdev, "Failed to alloc CQ mtr, err %d\n", err);
+
+ return err;
}
static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
- hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
+ hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
}
-static int create_user_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq,
- struct ib_udata *udata,
- struct hns_roce_ib_create_cq_resp *resp)
+static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata, unsigned long addr,
+ struct hns_roce_ib_create_cq_resp *resp)
{
- struct hns_roce_ib_create_cq ucmd;
- struct device *dev = hr_dev->dev;
- int ret;
- struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
- udata, struct hns_roce_ucontext, ibucontext);
-
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
- dev_err(dev, "Failed to copy_from_udata.\n");
- return -EFAULT;
- }
+ bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB;
+ struct hns_roce_ucontext *uctx;
+ int err;
- /* Get user space address, write it into mtt table */
- ret = get_cq_umem(hr_dev, hr_cq, ucmd, udata);
- if (ret) {
- dev_err(dev, "Failed to get_cq_umem.\n");
- return ret;
- }
-
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB &&
- udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
- ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
- &hr_cq->db);
- if (ret) {
- dev_err(dev, "cq record doorbell map failed!\n");
- goto err_mtt;
+ if (udata) {
+ if (has_db &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ err = hns_roce_db_map_user(uctx, udata, addr,
+ &hr_cq->db);
+ if (err)
+ return err;
+ hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
+ resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
}
- hr_cq->db_en = 1;
- resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
- }
-
- return 0;
-
-err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
- ib_umem_release(hr_cq->umem);
-
- return ret;
-}
-
-static int create_kernel_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq)
-{
- struct device *dev = hr_dev->dev;
- int ret;
-
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
- ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
- if (ret)
- return ret;
-
- hr_cq->set_ci_db = hr_cq->db.db_record;
- *hr_cq->set_ci_db = 0;
- hr_cq->db_en = 1;
- }
-
- /* Init mtt table and write buff address to mtt table */
- ret = alloc_cq_buf(hr_dev, hr_cq);
- if (ret) {
- dev_err(dev, "Failed to alloc_cq_buf.\n");
- goto err_db;
+ } else {
+ if (has_db) {
+ err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
+ if (err)
+ return err;
+ hr_cq->set_ci_db = hr_cq->db.db_record;
+ *hr_cq->set_ci_db = 0;
+ hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
+ }
+ hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
}
- hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
-
return 0;
-
-err_db:
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
- hns_roce_free_db(hr_dev, &hr_cq->db);
-
- return ret;
}
-static void destroy_user_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq,
- struct ib_udata *udata,
- struct hns_roce_ib_create_cq_resp *resp)
+static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata)
{
- struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
- udata, struct hns_roce_ucontext, ibucontext);
+ struct hns_roce_ucontext *uctx;
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB &&
- udata->outlen >= offsetofend(typeof(*resp), cap_flags))
- hns_roce_db_unmap_user(context, &hr_cq->db);
-
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
- ib_umem_release(hr_cq->umem);
-}
-
-static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq)
-{
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
- free_cq_buf(hr_dev, hr_cq);
+ if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB))
+ return;
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
+ hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB;
+ if (udata) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext,
+ ibucontext);
+ hns_roce_db_unmap_user(uctx, &hr_cq->db);
+ } else {
hns_roce_free_db(hr_dev, &hr_cq->db);
+ }
}
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
@@ -345,20 +230,21 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ib_create_cq ucmd = {};
int vector = attr->comp_vector;
u32 cq_entries = attr->cqe;
int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
- dev_err(dev, "Create CQ failed. entries=%d, max=%d\n",
- cq_entries, hr_dev->caps.max_cqes);
+ ibdev_err(ibdev, "Failed to check CQ count %d max=%d\n",
+ cq_entries, hr_dev->caps.max_cqes);
return -EINVAL;
}
if (vector >= hr_dev->caps.num_comp_vectors) {
- dev_err(dev, "Create CQ failed, vector=%d, max=%d\n",
- vector, hr_dev->caps.num_comp_vectors);
+ ibdev_err(ibdev, "Failed to check CQ vector=%d max=%d\n",
+ vector, hr_dev->caps.num_comp_vectors);
return -EINVAL;
}
@@ -367,30 +253,35 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
hr_cq->cq_depth = cq_entries;
hr_cq->vector = vector;
- hr_cq->buf.size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
- hr_cq->buf.page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
spin_lock_init(&hr_cq->lock);
INIT_LIST_HEAD(&hr_cq->sq_list);
INIT_LIST_HEAD(&hr_cq->rq_list);
if (udata) {
- ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
- if (ret) {
- dev_err(dev, "Create cq failed in user mode!\n");
- goto err_cq;
- }
- } else {
- ret = create_kernel_cq(hr_dev, hr_cq);
+ ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (ret) {
- dev_err(dev, "Create cq failed in kernel mode!\n");
- goto err_cq;
+ ibdev_err(ibdev, "Failed to copy CQ udata, err %d\n",
+ ret);
+ return ret;
}
}
- ret = hns_roce_alloc_cqc(hr_dev, hr_cq);
+ ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret);
+ return ret;
+ }
+
+ ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
if (ret) {
- dev_err(dev, "Alloc CQ failed(%d).\n", ret);
- goto err_dbmap;
+ ibdev_err(ibdev, "Failed to alloc CQ db, err %d\n", ret);
+ goto err_cq_buf;
+ }
+
+ ret = alloc_cqc(hr_dev, hr_cq);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc CQ context, err %d\n", ret);
+ goto err_cq_db;
}
/*
@@ -412,15 +303,11 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
return 0;
err_cqc:
- hns_roce_free_cqc(hr_dev, hr_cq);
-
-err_dbmap:
- if (udata)
- destroy_user_cq(hr_dev, hr_cq, udata, &resp);
- else
- destroy_kernel_cq(hr_dev, hr_cq);
-
-err_cq:
+ free_cqc(hr_dev, hr_cq);
+err_cq_db:
+ free_cq_db(hr_dev, hr_cq, udata);
+err_cq_buf:
+ free_cq_buf(hr_dev, hr_cq);
return ret;
}
@@ -429,28 +316,12 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- if (hr_dev->hw->destroy_cq) {
+ if (hr_dev->hw->destroy_cq)
hr_dev->hw->destroy_cq(ib_cq, udata);
- return;
- }
-
- hns_roce_free_cqc(hr_dev, hr_cq);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
- ib_umem_release(hr_cq->umem);
- if (udata) {
- if (hr_cq->db_en == 1)
- hns_roce_db_unmap_user(rdma_udata_to_drv_context(
- udata,
- struct hns_roce_ucontext,
- ibucontext),
- &hr_cq->db);
- } else {
- /* Free the buff of stored cq */
- free_cq_buf(hr_dev, hr_cq);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
- hns_roce_free_db(hr_dev, &hr_cq->db);
- }
+ free_cq_buf(hr_dev, hr_cq);
+ free_cq_db(hr_dev, hr_cq, udata);
+ free_cqc(hr_dev, hr_cq);
}
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index f6b3cf6b95d6..a77fa6730b2d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -66,6 +66,8 @@
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
+#define HNS_ROCE_RESERVED_SGE 1
+
#define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
@@ -131,12 +133,12 @@ enum {
};
enum {
- HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
- HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
+ HNS_ROCE_QP_CAP_RQ_RECORD_DB = BIT(0),
+ HNS_ROCE_QP_CAP_SQ_RECORD_DB = BIT(1),
};
-enum {
- HNS_ROCE_SUPPORT_CQ_RECORD_DB = 1 << 0,
+enum hns_roce_cq_flags {
+ HNS_ROCE_CQ_FLAG_RECORD_DB = BIT(0),
};
enum hns_roce_qp_state {
@@ -209,6 +211,8 @@ enum {
HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
};
+#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12
+
enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
@@ -222,13 +226,6 @@ enum {
HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
};
-enum hns_roce_mtt_type {
- MTT_TYPE_WQE,
- MTT_TYPE_CQE,
- MTT_TYPE_SRQWQE,
- MTT_TYPE_IDX
-};
-
#define HNS_ROCE_DB_TYPE_COUNT 2
#define HNS_ROCE_DB_UNIT_SIZE 4
@@ -267,9 +264,12 @@ enum {
#define HNS_ROCE_PORT_DOWN 0
#define HNS_ROCE_PORT_UP 1
-#define HNS_ROCE_MTT_ENTRY_PER_SEG 8
+/* The minimum page size is 4K for hardware */
+#define HNS_HW_PAGE_SHIFT 12
+#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
-#define PAGE_ADDR_SHIFT 12
+/* The minimum page count for hardware access page directly. */
+#define HNS_HW_DIRECT_PAGE_COUNT 2
struct hns_roce_uar {
u64 pfn;
@@ -300,22 +300,6 @@ struct hns_roce_bitmap {
unsigned long *table;
};
-/* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */
-/* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
-/* Every bit repesent to a partner free/used status in bitmap */
-/*
- * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
- * Bit = 1 represent to idle and available; bit = 0: not available
- */
-struct hns_roce_buddy {
- /* Members point to every order level bitmap */
- unsigned long **bits;
- /* Represent to avail bits of the order level bitmap */
- u32 *num_free;
- int max_order;
- spinlock_t lock;
-};
-
/* For Hardware Entry Memory */
struct hns_roce_hem_table {
/* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
@@ -336,13 +320,6 @@ struct hns_roce_hem_table {
dma_addr_t *bt_l0_dma_addr;
};
-struct hns_roce_mtt {
- unsigned long first_seg;
- int order;
- int page_shift;
- enum hns_roce_mtt_type mtt_type;
-};
-
struct hns_roce_buf_region {
int offset; /* page offset */
u32 count; /* page count */
@@ -357,13 +334,34 @@ struct hns_roce_hem_list {
struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
struct list_head btm_bt; /* link all bottom bt in @mid_bt */
dma_addr_t root_ba; /* pointer to the root ba table */
- int bt_pg_shift;
+};
+
+struct hns_roce_buf_attr {
+ struct {
+ size_t size; /* region size */
+ int hopnum; /* multi-hop addressing hop num */
+ } region[HNS_ROCE_MAX_BT_REGION];
+ int region_count; /* valid region count */
+ unsigned int page_shift; /* buffer page shift */
+ bool fixed_page; /* decide page shift is fixed-size or maximum size */
+ int user_access; /* umem access flag */
+ bool mtt_only; /* only alloc buffer-required MTT memory */
};
/* memory translate region */
struct hns_roce_mtr {
- struct hns_roce_hem_list hem_list;
- int buf_pg_shift;
+ struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
+ struct ib_umem *umem; /* user space buffer */
+ struct hns_roce_buf *kmem; /* kernel space buffer */
+ struct {
+ dma_addr_t root_ba; /* root BA table's address */
+ bool is_direct; /* addressing without BA table */
+ unsigned int ba_pg_shift; /* BA table page shift */
+ unsigned int buf_pg_shift; /* buffer page shift */
+ int buf_pg_count; /* buffer page count */
+ struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
+ unsigned int region_count;
+ } hem_cfg; /* config for hardware addressing */
};
struct hns_roce_mw {
@@ -381,43 +379,22 @@ struct hns_roce_mw {
struct hns_roce_mr {
struct ib_mr ibmr;
- struct ib_umem *umem;
u64 iova; /* MR's virtual orignal addr */
u64 size; /* Address range of MR */
u32 key; /* Key of MR */
u32 pd; /* PD num of MR */
u32 access; /* Access permission of MR */
- u32 npages;
int enabled; /* MR's active status */
int type; /* MR's register type */
- u64 *pbl_buf; /* MR's PBL space */
- dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
- u32 pbl_size; /* PA number in the PBL */
- u64 pbl_ba; /* page table address */
- u32 l0_chunk_last_num; /* L0 last number */
- u32 l1_chunk_last_num; /* L1 last number */
- u64 **pbl_bt_l2; /* PBL BT L2 */
- u64 **pbl_bt_l1; /* PBL BT L1 */
- u64 *pbl_bt_l0; /* PBL BT L0 */
- dma_addr_t *pbl_l2_dma_addr; /* PBL BT L2 dma addr */
- dma_addr_t *pbl_l1_dma_addr; /* PBL BT L1 dma addr */
- dma_addr_t pbl_l0_dma_addr; /* PBL BT L0 dma addr */
- u32 pbl_ba_pg_sz; /* BT chunk page size */
- u32 pbl_buf_pg_sz; /* buf chunk page size */
u32 pbl_hop_num; /* multi-hop number */
+ struct hns_roce_mtr pbl_mtr;
+ u32 npages;
+ dma_addr_t *page_list;
};
struct hns_roce_mr_table {
struct hns_roce_bitmap mtpt_bitmap;
- struct hns_roce_buddy mtt_buddy;
- struct hns_roce_hem_table mtt_table;
struct hns_roce_hem_table mtpt_table;
- struct hns_roce_buddy mtt_cqe_buddy;
- struct hns_roce_hem_table mtt_cqe_table;
- struct hns_roce_buddy mtt_srqwqe_buddy;
- struct hns_roce_hem_table mtt_srqwqe_table;
- struct hns_roce_buddy mtt_idx_buddy;
- struct hns_roce_hem_table mtt_idx_table;
};
struct hns_roce_wq {
@@ -433,7 +410,7 @@ struct hns_roce_wq {
};
struct hns_roce_sge {
- int sge_cnt; /* SGE num */
+ unsigned int sge_cnt; /* SGE num */
int offset;
int sge_shift; /* SGE size */
};
@@ -446,10 +423,9 @@ struct hns_roce_buf_list {
struct hns_roce_buf {
struct hns_roce_buf_list direct;
struct hns_roce_buf_list *page_list;
- int nbufs;
u32 npages;
u32 size;
- int page_shift;
+ unsigned int page_shift;
};
struct hns_roce_db_pgdir {
@@ -482,12 +458,10 @@ struct hns_roce_db {
struct hns_roce_cq {
struct ib_cq ib_cq;
- struct hns_roce_buf buf;
- struct hns_roce_mtt mtt;
+ struct hns_roce_mtr mtr;
struct hns_roce_db db;
- u8 db_en;
+ u32 flags;
spinlock_t lock;
- struct ib_umem *umem;
u32 cq_depth;
u32 cons_index;
u32 *set_ci_db;
@@ -505,11 +479,8 @@ struct hns_roce_cq {
};
struct hns_roce_idx_que {
- struct hns_roce_buf idx_buf;
- int entry_sz;
- u32 buf_size;
- struct ib_umem *umem;
- struct hns_roce_mtt mtt;
+ struct hns_roce_mtr mtr;
+ int entry_shift;
unsigned long *bitmap;
};
@@ -524,10 +495,9 @@ struct hns_roce_srq {
atomic_t refcount;
struct completion free;
- struct hns_roce_buf buf;
+ struct hns_roce_mtr buf_mtr;
+
u64 *wrid;
- struct ib_umem *umem;
- struct hns_roce_mtt mtt;
struct hns_roce_idx_que idx_que;
spinlock_t lock;
int head;
@@ -656,20 +626,15 @@ struct hns_roce_work {
struct hns_roce_qp {
struct ib_qp ibqp;
- struct hns_roce_buf hr_buf;
struct hns_roce_wq rq;
struct hns_roce_db rdb;
struct hns_roce_db sdb;
- u8 rdb_en;
- u8 sdb_en;
+ unsigned long en_flags;
u32 doorbell_qpn;
u32 sq_signal_bits;
struct hns_roce_wq sq;
- struct ib_umem *umem;
- struct hns_roce_mtt mtt;
struct hns_roce_mtr mtr;
- int wqe_bt_pg_shift;
u32 buff_size;
struct mutex mutex;
@@ -769,17 +734,11 @@ struct hns_roce_eq {
int over_ignore;
int coalesce;
int arm_st;
- u64 eqe_ba;
- int eqe_ba_pg_sz;
- int eqe_buf_pg_sz;
int hop_num;
struct hns_roce_mtr mtr;
- struct hns_roce_buf buf;
- int eq_max_cnt;
+ u16 eq_max_cnt;
int eq_period;
int shift;
- dma_addr_t cur_eqe_ba;
- dma_addr_t nxt_eqe_ba;
int event_type;
int sub_type;
};
@@ -1102,15 +1061,67 @@ static inline struct hns_roce_qp
return xa_load(&hr_dev->qp_table_xa, qpn & (hr_dev->caps.num_qps - 1));
}
+static inline bool hns_roce_buf_is_direct(struct hns_roce_buf *buf)
+{
+ if (buf->page_list)
+ return false;
+
+ return true;
+}
+
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
{
- u32 page_size = 1 << buf->page_shift;
+ if (hns_roce_buf_is_direct(buf))
+ return (char *)(buf->direct.buf) + (offset & (buf->size - 1));
- if (buf->nbufs == 1)
- return (char *)(buf->direct.buf) + offset;
+ return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
+ (offset & ((1 << buf->page_shift) - 1));
+}
+
+static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, int idx)
+{
+ if (hns_roce_buf_is_direct(buf))
+ return buf->direct.map + ((dma_addr_t)idx << buf->page_shift);
else
- return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
- (offset & (page_size - 1));
+ return buf->page_list[idx].map;
+}
+
+#define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
+
+static inline u64 to_hr_hw_page_addr(u64 addr)
+{
+ return addr >> HNS_HW_PAGE_SHIFT;
+}
+
+static inline u32 to_hr_hw_page_shift(u32 page_shift)
+{
+ return page_shift - HNS_HW_PAGE_SHIFT;
+}
+
+static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
+{
+ if (count > 0)
+ return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;
+
+ return 0;
+}
+
+static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
+{
+ return hr_hw_page_align(count << buf_shift);
+}
+
+static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
+{
+ return hr_hw_page_align(count << buf_shift) >> buf_shift;
+}
+
+static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
+{
+ if (!count)
+ return 0;
+
+ return ilog2(to_hr_hem_entries_count(count, buf_shift));
}
int hns_roce_init_uar_table(struct hns_roce_dev *dev);
@@ -1125,25 +1136,18 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
-int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
- struct hns_roce_mtt *mtt);
-void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt);
-int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, struct hns_roce_buf *buf);
-
-void hns_roce_mtr_init(struct hns_roce_mtr *mtr, int bt_pg_shift,
- int buf_pg_shift);
-int hns_roce_mtr_attach(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- dma_addr_t **bufs, struct hns_roce_buf_region *regions,
- int region_cnt);
-void hns_roce_mtr_cleanup(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtr *mtr);
-
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT 2
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
+int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int page_shift, struct ib_udata *udata,
+ unsigned long user_addr);
+void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr);
+int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, int page_cnt);
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
@@ -1171,8 +1175,8 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
unsigned long obj, int cnt,
int rr);
-int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata);
+int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
void hns_roce_destroy_ah(struct ib_ah *ah, u32 flags);
@@ -1200,25 +1204,15 @@ struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
struct ib_udata *udata);
int hns_roce_dealloc_mw(struct ib_mw *ibmw);
-void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
- struct hns_roce_buf *buf);
+void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
struct hns_roce_buf *buf, u32 page_shift);
-int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, struct ib_umem *umem);
-
-void hns_roce_init_buf_region(struct hns_roce_buf_region *region, int hopnum,
- int offset, int buf_cnt);
-int hns_roce_alloc_buf_list(struct hns_roce_buf_region *regions,
- dma_addr_t **bufs, int count);
-void hns_roce_free_buf_list(dma_addr_t **bufs, int count);
-
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct hns_roce_buf *buf);
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct ib_umem *umem,
- int page_shift);
+ unsigned int page_shift);
int hns_roce_create_srq(struct ib_srq *srq,
struct ib_srq_init_attr *srq_init_attr,
@@ -1254,8 +1248,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
-void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
-
int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 263338b90d7a..c8db6f8ae018 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -75,18 +75,6 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
case HEM_TYPE_CQC_TIMER:
hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
- case HEM_TYPE_CQE:
- hop_num = hr_dev->caps.cqe_hop_num;
- break;
- case HEM_TYPE_MTT:
- hop_num = hr_dev->caps.mtt_hop_num;
- break;
- case HEM_TYPE_SRQWQE:
- hop_num = hr_dev->caps.srqwqe_hop_num;
- break;
- case HEM_TYPE_IDX:
- hop_num = hr_dev->caps.idx_hop_num;
- break;
default:
return false;
}
@@ -195,38 +183,6 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev,
mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
mhop->hop_num = hr_dev->caps.srqc_hop_num;
break;
- case HEM_TYPE_MTT:
- mhop->buf_chunk_size = 1 << (hr_dev->caps.mtt_buf_pg_sz
- + PAGE_SHIFT);
- mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
- + PAGE_SHIFT);
- mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
- mhop->hop_num = hr_dev->caps.mtt_hop_num;
- break;
- case HEM_TYPE_CQE:
- mhop->buf_chunk_size = 1 << (hr_dev->caps.cqe_buf_pg_sz
- + PAGE_SHIFT);
- mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
- + PAGE_SHIFT);
- mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
- mhop->hop_num = hr_dev->caps.cqe_hop_num;
- break;
- case HEM_TYPE_SRQWQE:
- mhop->buf_chunk_size = 1 << (hr_dev->caps.srqwqe_buf_pg_sz
- + PAGE_SHIFT);
- mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
- + PAGE_SHIFT);
- mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
- mhop->hop_num = hr_dev->caps.srqwqe_hop_num;
- break;
- case HEM_TYPE_IDX:
- mhop->buf_chunk_size = 1 << (hr_dev->caps.idx_buf_pg_sz
- + PAGE_SHIFT);
- mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
- + PAGE_SHIFT);
- mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
- mhop->hop_num = hr_dev->caps.idx_hop_num;
- break;
default:
dev_err(dev, "Table %d not support multi-hop addressing!\n",
type);
@@ -899,57 +855,6 @@ out:
return addr;
}
-int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long start, unsigned long end)
-{
- struct hns_roce_hem_mhop mhop;
- unsigned long inc = table->table_chunk_size / table->obj_size;
- unsigned long i = 0;
- int ret;
-
- if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
- ret = hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
- if (ret)
- goto fail;
- inc = mhop.bt_chunk_size / table->obj_size;
- }
-
- /* Allocate MTT entry memory according to chunk(128K) */
- for (i = start; i <= end; i += inc) {
- ret = hns_roce_table_get(hr_dev, table, i);
- if (ret)
- goto fail;
- }
-
- return 0;
-
-fail:
- while (i > start) {
- i -= inc;
- hns_roce_table_put(hr_dev, table, i);
- }
- return ret;
-}
-
-void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long start, unsigned long end)
-{
- struct hns_roce_hem_mhop mhop;
- unsigned long inc = table->table_chunk_size / table->obj_size;
- unsigned long i;
-
- if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
- if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
- return;
- inc = mhop.bt_chunk_size / table->obj_size;
- }
-
- for (i = start; i <= end; i += inc)
- hns_roce_table_put(hr_dev, table, i);
-}
-
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
unsigned long obj_size, unsigned long nobj,
@@ -1112,12 +1017,6 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
{
- if ((hr_dev->caps.num_idx_segs))
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_idx_table);
- if (hr_dev->caps.num_srqwqe_segs)
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_srqwqe_table);
if (hr_dev->caps.srqc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->srq_table.table);
@@ -1137,10 +1036,6 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_cqe_table);
- hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
}
struct roce_hem_item {
@@ -1505,7 +1400,7 @@ err_exit:
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
const struct hns_roce_buf_region *regions,
- int region_cnt)
+ int region_cnt, unsigned int bt_pg_shift)
{
const struct hns_roce_buf_region *r;
int ofs, end;
@@ -1519,7 +1414,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
return -EINVAL;
}
- unit = (1 << hem_list->bt_pg_shift) / BA_BYTE_LEN;
+ unit = (1 << bt_pg_shift) / BA_BYTE_LEN;
for (i = 0; i < region_cnt; i++) {
r = &regions[i];
if (!r->count)
@@ -1566,8 +1461,7 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
hem_list->root_ba = 0;
}
-void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list,
- int bt_page_order)
+void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
{
int i, j;
@@ -1576,8 +1470,6 @@ void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list,
for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
INIT_LIST_HEAD(&hem_list->mid_bt[i][j]);
-
- hem_list->bt_pg_shift = bt_page_order;
}
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 3bb8f78fb7b0..b34c940077bb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -115,12 +115,6 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj,
dma_addr_t *dma_handle);
-int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long start, unsigned long end);
-void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long start, unsigned long end);
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
unsigned long obj_size, unsigned long nobj,
@@ -133,14 +127,13 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_mhop *mhop);
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type);
-void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list,
- int bt_page_order);
+void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list);
int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
int region_cnt, int unit);
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
const struct hns_roce_buf_region *regions,
- int region_cnt);
+ int region_cnt, unsigned int bt_pg_shift);
void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list);
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 5ff028d77be3..d02207cd30df 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -503,16 +503,13 @@ static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
u32 ext_sdb_alful)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
dma_addr_t sdb_dma_addr;
__le32 tmp;
u32 val;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
-
/* Configure extend SDB threshold */
roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
@@ -545,16 +542,13 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
u32 ext_odb_alful)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
dma_addr_t odb_dma_addr;
__le32 tmp;
u32 val;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
-
/* Configure extend ODB threshold */
roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
@@ -583,16 +577,13 @@ static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
u32 odb_ext_mod)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
dma_addr_t sdb_dma_addr;
dma_addr_t odb_dma_addr;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
-
db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
if (!db->ext_db)
return -ENOMEM;
@@ -692,14 +683,14 @@ static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct hns_roce_caps *caps = &hr_dev->caps;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct device *dev = &hr_dev->pdev->dev;
struct ib_cq_init_attr cq_init_attr;
- struct hns_roce_free_mr *free_mr;
struct ib_qp_attr attr = { 0 };
- struct hns_roce_v1_priv *priv;
struct hns_roce_qp *hr_qp;
- struct ib_device *ibdev;
struct ib_cq *cq;
struct ib_pd *pd;
union ib_gid dgid;
@@ -712,14 +703,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
u8 port = 0;
u8 sl;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
-
/* Reserved cq for loop qp */
cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
cq_init_attr.comp_vector = 0;
- ibdev = &hr_dev->ib_dev;
cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
if (!cq)
return -ENOMEM;
@@ -868,16 +855,13 @@ alloc_cq_failed:
static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
struct hns_roce_qp *hr_qp;
int ret;
int i;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
-
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
hr_qp = free_mr->mr_free_qp[i];
if (!hr_qp)
@@ -897,18 +881,15 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
u32 sdb_ext_mod;
u32 odb_ext_mod;
u32 sdb_evt_mod;
u32 odb_evt_mod;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
-
memset(db, 0, sizeof(*db));
/* Default DB mode */
@@ -954,15 +935,12 @@ static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
{
- struct device *dev = &hr_dev->pdev->dev;
+ long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct hns_roce_recreate_lp_qp_work *lp_qp_work;
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
+ struct device *dev = &hr_dev->pdev->dev;
struct completion comp;
- long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
GFP_KERNEL);
@@ -1021,29 +999,21 @@ static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
{
- struct hns_roce_mr_free_work *mr_work;
- struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_cq *mr_free_cq;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_dev *hr_dev;
- struct hns_roce_mr *hr_mr;
- struct hns_roce_qp *hr_qp;
- struct device *dev;
unsigned long end =
msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
- int i;
- int ret;
+ struct hns_roce_mr_free_work *mr_work =
+ container_of(work, struct hns_roce_mr_free_work, work);
+ struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev);
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
+ struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq;
+ struct hns_roce_mr *hr_mr = mr_work->mr;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
+ struct hns_roce_qp *hr_qp;
int ne = 0;
-
- mr_work = container_of(work, struct hns_roce_mr_free_work, work);
- hr_mr = (struct hns_roce_mr *)mr_work->mr;
- hr_dev = to_hr_dev(mr_work->ib_dev);
- dev = &hr_dev->pdev->dev;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
- mr_free_cq = free_mr->mr_free_cq;
+ int ret;
+ int i;
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
hr_qp = free_mr->mr_free_qp[i];
@@ -1092,19 +1062,15 @@ free_work:
static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, struct ib_udata *udata)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
+ long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_mr_free_work *mr_work;
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
- struct completion comp;
- long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
unsigned long start = jiffies;
- int npages;
+ struct completion comp;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
-
if (mr->enabled) {
if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
key_to_hw_index(mr->key) &
@@ -1146,17 +1112,9 @@ free_mr:
dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
- if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
- dma_free_coherent(dev, npages * 8, mr->pbl_buf,
- mr->pbl_dma_addr);
- }
-
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
key_to_hw_index(mr->key), 0);
-
- ib_umem_release(mr->umem);
-
+ hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
kfree(mr);
return ret;
@@ -1164,12 +1122,9 @@ free_mr:
static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
if (db->sdb_ext_mod) {
dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
@@ -1190,17 +1145,14 @@ static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
{
- int ret;
- u32 val;
- __le32 tmp;
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_raq_table *raq = &priv->raq_table;
+ struct device *dev = &hr_dev->pdev->dev;
int raq_shift = 0;
dma_addr_t addr;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_raq_table *raq;
- struct device *dev = &hr_dev->pdev->dev;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- raq = &priv->raq_table;
+ __le32 tmp;
+ u32 val;
+ int ret;
raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
if (!raq->e_raq_buf)
@@ -1280,12 +1232,9 @@ err_dma_alloc_raq:
static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_raq_table *raq = &priv->raq_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_raq_table *raq;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- raq = &priv->raq_table;
dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
raq->e_raq_buf->map);
@@ -1319,12 +1268,10 @@ static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
int ret;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
-
priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
GFP_KERNEL);
@@ -1362,10 +1309,8 @@ err_failed_alloc_mtpt_buf:
static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
@@ -1379,12 +1324,9 @@ static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_buf_list *tptr_buf;
- struct hns_roce_v1_priv *priv;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- tptr_buf = &priv->tptr_table.tptr_buf;
/*
* This buffer will be used for CQ's tptr(tail pointer), also
@@ -1405,12 +1347,9 @@ static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_buf_list *tptr_buf;
- struct hns_roce_v1_priv *priv;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- tptr_buf = &priv->tptr_table.tptr_buf;
dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
tptr_buf->buf, tptr_buf->map);
@@ -1418,14 +1357,11 @@ static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
-
free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
if (!free_mr->free_mr_wq) {
dev_err(dev, "Create free mr workqueue failed!\n");
@@ -1444,11 +1380,8 @@ static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
flush_workqueue(free_mr->free_mr_wq);
destroy_workqueue(free_mr->free_mr_wq);
@@ -1826,9 +1759,12 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
+ u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v1_mpt_entry *mpt_entry;
- struct sg_dma_page_iter sg_iter;
- u64 *pages;
+ dma_addr_t pbl_ba;
+ int count;
int i;
/* MPT filled into mailbox buf */
@@ -1878,22 +1814,15 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
if (mr->type == MR_TYPE_DMA)
return 0;
- pages = (u64 *) __get_free_page(GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- i = 0;
- for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
- pages[i] = ((u64)sg_page_iter_dma_address(&sg_iter)) >> 12;
-
- /* Directly record to MTPT table firstly 7 entry */
- if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
- break;
- i++;
+ count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
+ ARRAY_SIZE(pages), &pbl_ba);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count);
+ return -ENOBUFS;
}
/* Register user mr */
- for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
+ for (i = 0; i < count; i++) {
switch (i) {
case 0:
mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
@@ -1959,20 +1888,17 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
}
}
- free_page((unsigned long) pages);
-
- mpt_entry->pbl_addr_l = cpu_to_le32((u32)(mr->pbl_dma_addr));
-
+ mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba);
roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
- MPT_BYTE_12_PBL_ADDR_H_S,
- ((u32)(mr->pbl_dma_addr >> 32)));
+ MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba));
return 0;
}
static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(hr_cq->mtr.kmem,
+ n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
@@ -2066,16 +1992,12 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf,
u64 *mtts, dma_addr_t dma_handle)
{
- struct hns_roce_cq_context *cq_context = NULL;
- struct hns_roce_buf_list *tptr_buf;
- struct hns_roce_v1_priv *priv;
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
+ struct hns_roce_cq_context *cq_context = mb_buf;
dma_addr_t tptr_dma_addr;
int offset;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- tptr_buf = &priv->tptr_table.tptr_buf;
-
- cq_context = mb_buf;
memset(cq_context, 0, sizeof(*cq_context));
/* Get the tptr for this CQ. */
@@ -2416,16 +2338,14 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
int step_idx)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- unsigned long flags = 0;
long end = HW_SYNC_TIMEOUT_MSECS;
__le32 bt_cmd_val[2] = {0};
+ unsigned long flags = 0;
void __iomem *bt_cmd;
u64 bt_ba = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
-
switch (table->type) {
case HEM_TYPE_QPC:
bt_ba = priv->bt_table.qpc_buf.map >> 12;
@@ -2479,7 +2399,6 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
}
static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt,
enum hns_roce_qp_state cur_state,
enum hns_roce_qp_state new_state,
struct hns_roce_qp_context *context,
@@ -2560,6 +2479,29 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
return ret;
}
+static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int rq_pa_start;
+ int count;
+
+ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba);
+ if (count < 1) {
+ ibdev_err(ibdev, "Failed to find SQ ba\n");
+ return -ENOBUFS;
+ }
+ rq_pa_start = hr_qp->rq.offset >> hr_qp->mtr.hem_cfg.buf_pg_shift;
+ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, rq_pa_start, rq_ba, 1,
+ NULL);
+ if (!count) {
+ ibdev_err(ibdev, "Failed to find RQ ba\n");
+ return -ENOBUFS;
+ }
+
+ return 0;
+}
+
static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state)
@@ -2567,25 +2509,20 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_sqp_context *context;
- struct device *dev = &hr_dev->pdev->dev;
dma_addr_t dma_handle = 0;
u32 __iomem *addr;
- int rq_pa_start;
+ u64 sq_ba = 0;
+ u64 rq_ba = 0;
__le32 tmp;
u32 reg_val;
- u64 *mtts;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
/* Search QP buf's MTTs */
- mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
- hr_qp->mtt.first_seg, &dma_handle);
- if (!mtts) {
- dev_err(dev, "qp buf pa find failed\n");
+ if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
goto out;
- }
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
roce_set_field(context->qp1c_bytes_4,
@@ -2599,11 +2536,11 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
- context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
+ context->sq_rq_bt_l = cpu_to_le32(dma_handle);
roce_set_field(context->qp1c_bytes_12,
QP1C_BYTES_12_SQ_RQ_BT_H_M,
QP1C_BYTES_12_SQ_RQ_BT_H_S,
- ((u32)(dma_handle >> 32)));
+ upper_32_bits(dma_handle));
roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
@@ -2624,14 +2561,12 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
- rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
- context->cur_rq_wqe_ba_l =
- cpu_to_le32((u32)(mtts[rq_pa_start]));
+ context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
roce_set_field(context->qp1c_bytes_28,
QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
- (mtts[rq_pa_start]) >> 32);
+ upper_32_bits(rq_ba));
roce_set_field(context->qp1c_bytes_28,
QP1C_BYTES_28_RQ_CUR_IDX_M,
QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
@@ -2645,12 +2580,12 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_32_TX_CQ_NUM_S,
to_hr_cq(ibqp->send_cq)->cqn);
- context->cur_sq_wqe_ba_l = cpu_to_le32((u32)mtts[0]);
+ context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
roce_set_field(context->qp1c_bytes_40,
QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
- (mtts[0]) >> 32);
+ upper_32_bits(sq_ba));
roce_set_field(context->qp1c_bytes_40,
QP1C_BYTES_40_SQ_CUR_IDX_M,
QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
@@ -2704,6 +2639,28 @@ out:
return -EINVAL;
}
+static bool check_qp_state(enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ static const bool sm[][IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true },
+ [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true,
+ [IB_QPS_RTR] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_RTS] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
+ [IB_QPS_SQD] = {},
+ [IB_QPS_SQE] = {},
+ [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
+ };
+
+ return sm[cur_state][new_state];
+}
+
static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state)
@@ -2716,26 +2673,29 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
dma_addr_t dma_handle_2 = 0;
dma_addr_t dma_handle = 0;
__le32 doorbell[2] = {0};
- int rq_pa_start = 0;
u64 *mtts_2 = NULL;
int ret = -EINVAL;
- u64 *mtts = NULL;
+ u64 sq_ba = 0;
+ u64 rq_ba = 0;
int port;
u8 port_num;
u8 *dmac;
u8 *smac;
+ if (!check_qp_state(cur_state, new_state)) {
+ ibdev_err(ibqp->device,
+ "not support QP(%u) status from %d to %d\n",
+ ibqp->qp_num, cur_state, new_state);
+ return -EINVAL;
+ }
+
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
/* Search qp buf's mtts */
- mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
- hr_qp->mtt.first_seg, &dma_handle);
- if (mtts == NULL) {
- dev_err(dev, "qp buf pa find failed\n");
+ if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
goto out;
- }
/* Search IRRL's mtts */
mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
@@ -2890,11 +2850,11 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
dmac = (u8 *)attr->ah_attr.roce.dmac;
- context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
+ context->sq_rq_bt_l = cpu_to_le32(dma_handle);
roce_set_field(context->qpc_bytes_24,
QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
- ((u32)(dma_handle >> 32)));
+ upper_32_bits(dma_handle));
roce_set_bit(context->qpc_bytes_24,
QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
1);
@@ -2993,14 +2953,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
- rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
- context->cur_rq_wqe_ba_l =
- cpu_to_le32((u32)(mtts[rq_pa_start]));
+ context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
roce_set_field(context->qpc_bytes_76,
QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
- mtts[rq_pa_start] >> 32);
+ upper_32_bits(rq_ba));
roce_set_field(context->qpc_bytes_76,
QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
@@ -3062,8 +3020,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_156_SL_S,
rdma_ah_get_sl(&attr->ah_attr));
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
- } else if (cur_state == IB_QPS_RTR &&
- new_state == IB_QPS_RTS) {
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
/* If exist optional param, return error */
if ((attr_mask & IB_QP_ALT_PATH) ||
(attr_mask & IB_QP_ACCESS_FLAGS) ||
@@ -3075,12 +3032,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
goto out;
}
- context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
+ context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
roce_set_field(context->qpc_bytes_120,
QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
- (mtts[0]) >> 32);
+ upper_32_bits(sq_ba));
roce_set_field(context->qpc_bytes_124,
QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
@@ -3223,28 +3180,18 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
- context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
+ context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
roce_set_field(context->qpc_bytes_188,
QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
- (mtts[0]) >> 32);
+ upper_32_bits(sq_ba));
roce_set_bit(context->qpc_bytes_188,
QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
roce_set_field(context->qpc_bytes_188,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
0);
- } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
- dev_err(dev, "not support this status migration\n");
- goto out;
}
/* Every status migrate must change state */
@@ -3253,8 +3200,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
/* SW pass context to HW */
- ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
- to_hns_roce_state(cur_state),
+ ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state),
to_hns_roce_state(new_state), context,
hr_qp);
if (ret) {
@@ -3636,8 +3582,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
u32 cqe_cnt_cur;
int wait_time = 0;
- hns_roce_free_cqc(hr_dev, hr_cq);
-
/*
* Before freeing cq buffer, we need to ensure that the outstanding CQE
* have been written by checking the CQE counter.
@@ -3660,14 +3604,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
}
wait_time++;
}
-
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
-
- ib_umem_release(hr_cq->umem);
- if (!udata) {
- /* Free the buff of stored cq */
- hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
- }
}
static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index c3316672b70e..c597d7281629 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -95,6 +95,7 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
{
struct hns_roce_mr *mr = to_hr_mr(wr->mr);
struct hns_roce_wqe_frmr_seg *fseg = wqe;
+ u64 pbl_ba;
/* use ib_access_flags */
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
@@ -109,26 +110,27 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
/* Data structure reuse may lead to confusion */
- rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
- rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
+ pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
+ rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
+ rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
rc_sq_wqe->rkey = cpu_to_le32(wr->key);
rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
- fseg->pbl_size = cpu_to_le32(mr->pbl_size);
+ fseg->pbl_size = cpu_to_le32(mr->npages);
roce_set_field(fseg->mode_buf_pg_sz,
V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
- mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
roce_set_bit(fseg->mode_buf_pg_sz,
V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
}
static void set_atomic_seg(const struct ib_send_wr *wr, void *wqe,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
- int valid_num_sge)
+ unsigned int valid_num_sge)
{
struct hns_roce_wqe_atomic_seg *aseg;
@@ -149,56 +151,33 @@ static void set_atomic_seg(const struct ib_send_wr *wr, void *wqe,
}
static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
- unsigned int *sge_ind, int valid_num_sge)
+ unsigned int *sge_ind, unsigned int valid_num_sge)
{
struct hns_roce_v2_wqe_data_seg *dseg;
- struct ib_sge *sg;
- int num_in_wqe = 0;
- int extend_sge_num;
- int fi_sge_num;
- int se_sge_num;
- int shift;
- int i;
+ unsigned int cnt = valid_num_sge;
+ struct ib_sge *sge = wr->sg_list;
+ unsigned int idx = *sge_ind;
- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
- num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
- extend_sge_num = valid_num_sge - num_in_wqe;
- sg = wr->sg_list + num_in_wqe;
- shift = qp->hr_buf.page_shift;
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
+ cnt -= HNS_ROCE_SGE_IN_WQE;
+ sge += HNS_ROCE_SGE_IN_WQE;
+ }
- /*
- * Check whether wr->num_sge sges are in the same page. If not, we
- * should calculate how many sges in the first page and the second
- * page.
- */
- dseg = hns_roce_get_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
- fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
- (uintptr_t)dseg) /
- sizeof(struct hns_roce_v2_wqe_data_seg);
- if (extend_sge_num > fi_sge_num) {
- se_sge_num = extend_sge_num - fi_sge_num;
- for (i = 0; i < fi_sge_num; i++) {
- set_data_seg_v2(dseg++, sg + i);
- (*sge_ind)++;
- }
- dseg = hns_roce_get_extend_sge(qp,
- (*sge_ind) & (qp->sge.sge_cnt - 1));
- for (i = 0; i < se_sge_num; i++) {
- set_data_seg_v2(dseg++, sg + fi_sge_num + i);
- (*sge_ind)++;
- }
- } else {
- for (i = 0; i < extend_sge_num; i++) {
- set_data_seg_v2(dseg++, sg + i);
- (*sge_ind)++;
- }
+ while (cnt > 0) {
+ dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
+ set_data_seg_v2(dseg, sge);
+ idx++;
+ sge++;
+ cnt--;
}
+
+ *sge_ind = idx;
}
static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind,
- int valid_num_sge)
+ unsigned int valid_num_sge)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_v2_wqe_data_seg *dseg = wqe;
@@ -208,15 +187,15 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
int i;
if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
- if (le32_to_cpu(rc_sq_wqe->msg_len) >
- hr_dev->caps.max_sq_inline) {
+ if (unlikely(le32_to_cpu(rc_sq_wqe->msg_len) >
+ hr_dev->caps.max_sq_inline)) {
ibdev_err(ibdev, "inline len(1-%d)=%d, illegal",
rc_sq_wqe->msg_len,
hr_dev->caps.max_sq_inline);
return -EINVAL;
}
- if (wr->opcode == IB_WR_RDMA_READ) {
+ if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
ibdev_err(ibdev, "Not support inline data!\n");
return -EINVAL;
}
@@ -230,7 +209,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
1);
} else {
- if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
+ if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
for (i = 0; i < wr->num_sge; i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
@@ -243,8 +222,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
(*sge_ind) & (qp->sge.sge_cnt - 1));
- for (i = 0; i < wr->num_sge &&
- j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
+ for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE;
+ i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
dseg++;
@@ -290,10 +269,11 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
return 0;
}
-static inline int calc_wr_sge_num(const struct ib_send_wr *wr, u32 *sge_len)
+static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
+ unsigned int *sge_len)
{
- int valid_num = 0;
- u32 len = 0;
+ unsigned int valid_num = 0;
+ unsigned int len = 0;
int i;
for (i = 0; i < wr->num_sge; i++) {
@@ -424,7 +404,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
{
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
unsigned int curr_idx = *sge_idx;
- int valid_num_sge;
+ unsigned int valid_num_sge;
u32 msg_len = 0;
int ret = 0;
@@ -521,8 +501,7 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
- V2_DB_PARAMETER_IDX_S,
- qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
+ V2_DB_PARAMETER_IDX_S, qp->sq.head);
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
V2_DB_PARAMETER_SL_S, qp->sl);
@@ -548,7 +527,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
spin_lock_irqsave(&qp->sq.lock, flags);
ret = check_send_valid(hr_dev, qp);
- if (ret) {
+ if (unlikely(ret)) {
*bad_wr = wr;
nreq = 0;
goto out;
@@ -584,7 +563,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
else if (ibqp->qp_type == IB_QPT_RC)
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
- if (ret) {
+ if (unlikely(ret)) {
*bad_wr = wr;
goto out;
}
@@ -634,15 +613,15 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
spin_lock_irqsave(&hr_qp->rq.lock, flags);
ret = check_recv_valid(hr_dev, hr_qp);
- if (ret) {
+ if (unlikely(ret)) {
*bad_wr = wr;
nreq = 0;
goto out;
}
for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
- hr_qp->ibqp.recv_cq)) {
+ if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
+ hr_qp->ibqp.recv_cq))) {
ret = -ENOMEM;
*bad_wr = wr;
goto out;
@@ -650,7 +629,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
- if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
+ if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) {
ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL;
@@ -667,13 +646,14 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
dseg++;
}
- if (i < hr_qp->rq.max_gs) {
+ if (wr->num_sge < hr_qp->rq.max_gs) {
dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
dseg->addr = 0;
+ dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
}
/* rq support inline data */
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ if (hr_qp->rq_inl_buf.wqe_cnt) {
sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
(u32)wr->num_sge;
@@ -715,6 +695,129 @@ out:
return ret;
}
+static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
+{
+ return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
+}
+
+static void *get_idx_buf(struct hns_roce_idx_que *idx_que, int n)
+{
+ return hns_roce_buf_offset(idx_que->mtr.kmem,
+ n << idx_que->entry_shift);
+}
+
+static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
+{
+ /* always called with interrupts disabled. */
+ spin_lock(&srq->lock);
+
+ bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
+ srq->tail++;
+
+ spin_unlock(&srq->lock);
+}
+
+static int find_empty_entry(struct hns_roce_idx_que *idx_que,
+ unsigned long size)
+{
+ int wqe_idx;
+
+ if (unlikely(bitmap_full(idx_que->bitmap, size)))
+ return -ENOSPC;
+
+ wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
+
+ bitmap_set(idx_que->bitmap, wqe_idx, 1);
+
+ return wqe_idx;
+}
+
+static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ struct hns_roce_v2_db srq_db;
+ unsigned long flags;
+ __le32 *srq_idx;
+ int ret = 0;
+ int wqe_idx;
+ void *wqe;
+ int nreq;
+ int ind;
+ int i;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ ind = srq->head & (srq->wqe_cnt - 1);
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (unlikely(wr->num_sge >= srq->max_gs)) {
+ ret = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ if (unlikely(srq->head == srq->tail)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+
+ wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
+ if (unlikely(wqe_idx < 0)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+
+ wqe = get_srq_wqe(srq, wqe_idx);
+ dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
+
+ for (i = 0; i < wr->num_sge; ++i) {
+ dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
+ dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
+ dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
+ }
+
+ if (wr->num_sge < srq->max_gs) {
+ dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
+ dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ dseg[i].addr = 0;
+ }
+
+ srq_idx = get_idx_buf(&srq->idx_que, ind);
+ *srq_idx = cpu_to_le32(wqe_idx);
+
+ srq->wrid[wqe_idx] = wr->wr_id;
+ ind = (ind + 1) & (srq->wqe_cnt - 1);
+ }
+
+ if (likely(nreq)) {
+ srq->head += nreq;
+
+ /*
+ * Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ srq_db.byte_4 =
+ cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
+ (srq->srqn & V2_DB_BYTE_4_TAG_M));
+ srq_db.parameter =
+ cpu_to_le32(srq->head & V2_DB_PARAMETER_IDX_M);
+
+ hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
+ }
+
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return ret;
+}
+
static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
@@ -742,7 +845,7 @@ static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
@@ -768,7 +871,7 @@ static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
@@ -785,7 +888,7 @@ static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long instance_stage; /* the current instance stage */
@@ -865,7 +968,7 @@ static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
&priv->cmq.csq : &priv->cmq.crq;
@@ -878,7 +981,7 @@ static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
&priv->cmq.csq : &priv->cmq.crq;
dma_addr_t dma = ring->desc_dma_addr;
@@ -904,7 +1007,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
int ret;
/* Setup the queue entries for command queue */
@@ -948,7 +1051,7 @@ err_crq:
static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
@@ -970,15 +1073,15 @@ static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
return head == priv->cmq.csq.next_to_use;
}
static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
struct hns_roce_cmq_desc *desc;
u16 ntc = csq->next_to_clean;
@@ -1003,7 +1106,7 @@ static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
struct hns_roce_cmq_desc *desc_to_use;
bool complete = false;
@@ -1131,7 +1234,7 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long reset_cnt;
@@ -1151,7 +1254,7 @@ static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
int flag)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long instance_stage;
@@ -1349,34 +1452,26 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
{
struct hns_roce_pf_timer_res_a *req_a;
- struct hns_roce_cmq_desc desc[2];
- int ret, i;
+ struct hns_roce_cmq_desc desc;
+ int ret;
- for (i = 0; i < 2; i++) {
- hns_roce_cmq_setup_basic_desc(&desc[i],
- HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
- true);
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
+ true);
- if (i == 0)
- desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- }
-
- ret = hns_roce_cmq_send(hr_dev, desc, 2);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
- req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
+ req_a = (struct hns_roce_pf_timer_res_a *)desc.data;
hr_dev->caps.qpc_timer_bt_num =
- roce_get_field(req_a->qpc_timer_bt_idx_num,
- PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
- PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
+ roce_get_field(req_a->qpc_timer_bt_idx_num,
+ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
+ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
hr_dev->caps.cqc_timer_bt_num =
- roce_get_field(req_a->cqc_timer_bt_idx_num,
- PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
- PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
+ roce_get_field(req_a->cqc_timer_bt_idx_num,
+ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
+ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
return 0;
}
@@ -1786,6 +1881,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
+ caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
+ HNS_ROCE_CAP_FLAGS_EX_SHIFT;
+
caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
V2_QUERY_PF_CAPS_C_NUM_CQS_M,
V2_QUERY_PF_CAPS_C_NUM_CQS_S);
@@ -1978,11 +2076,6 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
hr_dev->vendor_part_id = hr_dev->pci_dev->device;
hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
- caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
- caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
- caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
- caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
-
caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
caps->pbl_buf_pg_sz = 0;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
@@ -2040,8 +2133,6 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
page_num = link_tbl->npages;
entry = link_tbl->table.buf;
- memset(req_a, 0, sizeof(*req_a));
- memset(req_b, 0, sizeof(*req_b));
for (i = 0; i < 2; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
@@ -2050,39 +2141,30 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
-
- if (i == 0) {
- req_a->base_addr_l =
- cpu_to_le32(link_tbl->table.map & 0xffffffff);
- req_a->base_addr_h =
- cpu_to_le32(link_tbl->table.map >> 32);
- roce_set_field(req_a->depth_pgsz_init_en,
- CFG_LLM_QUE_DEPTH_M, CFG_LLM_QUE_DEPTH_S,
- link_tbl->npages);
- roce_set_field(req_a->depth_pgsz_init_en,
- CFG_LLM_QUE_PGSZ_M, CFG_LLM_QUE_PGSZ_S,
- link_tbl->pg_sz);
- req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
- req_a->head_ba_h_nxtptr =
- cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
- roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M,
- CFG_LLM_HEAD_PTR_S, 0);
- } else {
- req_b->tail_ba_l =
- cpu_to_le32(entry[page_num - 1].blk_ba0);
- roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
- CFG_LLM_TAIL_BA_H_S,
- entry[page_num - 1].blk_ba1_nxt_ptr &
- HNS_ROCE_LINK_TABLE_BA1_M);
- roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M,
- CFG_LLM_TAIL_PTR_S,
- (entry[page_num - 2].blk_ba1_nxt_ptr &
- HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
- HNS_ROCE_LINK_TABLE_NXT_PTR_S);
- }
}
+
+ req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & 0xffffffff);
+ req_a->base_addr_h = cpu_to_le32(link_tbl->table.map >> 32);
+ roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_DEPTH_M,
+ CFG_LLM_QUE_DEPTH_S, link_tbl->npages);
+ roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_PGSZ_M,
+ CFG_LLM_QUE_PGSZ_S, link_tbl->pg_sz);
roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
CFG_LLM_INIT_EN_S, 1);
+ req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
+ req_a->head_ba_h_nxtptr = cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
+ roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M, CFG_LLM_HEAD_PTR_S,
+ 0);
+
+ req_b->tail_ba_l = cpu_to_le32(entry[page_num - 1].blk_ba0);
+ roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
+ CFG_LLM_TAIL_BA_H_S,
+ entry[page_num - 1].blk_ba1_nxt_ptr &
+ HNS_ROCE_LINK_TABLE_BA1_M);
+ roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M, CFG_LLM_TAIL_PTR_S,
+ (entry[page_num - 2].blk_ba1_nxt_ptr &
+ HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
+ HNS_ROCE_LINK_TABLE_NXT_PTR_S);
return hns_roce_cmq_send(hr_dev, desc, 2);
}
@@ -2438,12 +2520,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
reg_smac_l = *(u32 *)(&addr[0]);
reg_smac_h = *(u16 *)(&addr[4]);
- memset(smac_tb, 0, sizeof(*smac_tb));
- roce_set_field(smac_tb->tb_idx_rsv,
- CFG_SMAC_TB_IDX_M,
+ roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M,
CFG_SMAC_TB_IDX_S, phy_port);
- roce_set_field(smac_tb->vf_smac_h_rsv,
- CFG_SMAC_TB_VF_SMAC_H_M,
+ roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M,
CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
@@ -2453,32 +2532,30 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
struct hns_roce_mr *mr)
{
- struct sg_dma_page_iter sg_iter;
- u64 page_addr;
- u64 *pages;
- int i;
+ struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
+ u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t pbl_ba;
+ int i, count;
- mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
- mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
- roce_set_field(mpt_entry->byte_48_mode_ba,
- V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
- upper_32_bits(mr->pbl_ba >> 3));
+ count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
+ ARRAY_SIZE(pages), &pbl_ba);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
+ count);
+ return -ENOBUFS;
+ }
- pages = (u64 *)__get_free_page(GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ /* Aligned to the hardware address access unit */
+ for (i = 0; i < count; i++)
+ pages[i] >>= 6;
- i = 0;
- for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
- page_addr = sg_page_iter_dma_address(&sg_iter);
- pages[i] = page_addr >> 6;
+ mpt_entry->pbl_size = cpu_to_le32(mr->npages);
+ mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
+ roce_set_field(mpt_entry->byte_48_mode_ba,
+ V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
+ upper_32_bits(pbl_ba >> 3));
- /* Record the first 2 entry directly to MTPT table */
- if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
- goto found;
- i++;
- }
-found:
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
@@ -2489,9 +2566,7 @@ found:
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
- mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
-
- free_page((unsigned long)pages);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
return 0;
}
@@ -2513,7 +2588,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_field(mpt_entry->byte_4_pd_hop_st,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
- mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd);
@@ -2599,11 +2674,19 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v2_mpt_entry *mpt_entry;
+ dma_addr_t pbl_ba = 0;
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
+ if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
+ ibdev_err(ibdev, "failed to find frmr mtr.\n");
+ return -ENOBUFS;
+ }
+
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
@@ -2611,7 +2694,7 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
roce_set_field(mpt_entry->byte_4_pd_hop_st,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
- mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd);
@@ -2624,17 +2707,17 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
- mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+ mpt_entry->pbl_size = cpu_to_le32(mr->npages);
- mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
V2_MPT_BYTE_48_PBL_BA_H_S,
- upper_32_bits(mr->pbl_ba >> 3));
+ upper_32_bits(pbl_ba >> 3));
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
- mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
return 0;
}
@@ -2680,7 +2763,8 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(hr_cq->mtr.kmem,
+ n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
@@ -2692,30 +2776,9 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
!!(n & hr_cq->cq_depth)) ? cqe : NULL;
}
-static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
+static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci)
{
- return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
-}
-
-static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
-{
- return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
-}
-
-static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
-{
- /* always called with interrupts disabled. */
- spin_lock(&srq->lock);
-
- bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
- srq->tail++;
-
- spin_unlock(&srq->lock);
-}
-
-static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
-{
- *hr_cq->set_ci_db = cons_index & V2_CQ_DB_PARAMETER_CONS_IDX_M;
+ *hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M;
}
static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
@@ -2801,39 +2864,39 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
- cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
+ cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
- mtts[0] >> (32 + PAGE_ADDR_SHIFT));
+ upper_32_bits(to_hr_hw_page_addr(mtts[0])));
roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_HOP_NUM_M,
V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
- cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
+ cq_context->cqe_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
- mtts[1] >> (32 + PAGE_ADDR_SHIFT));
+ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
- hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
- hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
- if (hr_cq->db_en)
- roce_set_bit(cq_context->byte_44_db_record,
- V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
+ roce_set_bit(cq_context->byte_44_db_record,
+ V2_CQC_BYTE_44_DB_RECORD_EN_S,
+ (hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB) ? 1 : 0);
roce_set_field(cq_context->byte_44_db_record,
V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
@@ -2873,8 +2936,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
HNS_ROCE_V2_CQ_DB_NTR);
roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
- V2_CQ_DB_PARAMETER_CONS_IDX_S,
- hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
+ V2_CQ_DB_PARAMETER_CONS_IDX_S, hr_cq->cons_index);
roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
@@ -2911,7 +2973,7 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
wqe_buf += size;
}
- if (data_len) {
+ if (unlikely(data_len)) {
wc->status = IB_WC_LOC_LEN_ERR;
return -EAGAIN;
}
@@ -2968,6 +3030,62 @@ out:
return npolled;
}
+static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ struct hns_roce_v2_cqe *cqe, struct ib_wc *wc)
+{
+ static const struct {
+ u32 cqe_status;
+ enum ib_wc_status wc_status;
+ } map[] = {
+ { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
+ { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
+ { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
+ { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
+ { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
+ { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
+ IB_WC_RETRY_EXC_ERR },
+ { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
+ };
+
+ u32 cqe_status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
+ V2_CQE_BYTE_4_STATUS_S);
+ int i;
+
+ wc->status = IB_WC_GENERAL_ERR;
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ if (cqe_status == map[i].cqe_status) {
+ wc->status = map[i].wc_status;
+ break;
+ }
+
+ if (likely(wc->status == IB_WC_SUCCESS ||
+ wc->status == IB_WC_WR_FLUSH_ERR))
+ return;
+
+ ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
+ sizeof(*cqe), false);
+
+ /*
+ * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
+ * into errored mode. Hence, as a workaround to this hardware
+ * limitation, driver needs to assist in flushing. But the flushing
+ * operation uses mailbox to convey the QP state to the hardware and
+ * which can sleep due to the mutex protection around the mailbox calls.
+ * Hence, use the deferred flush for now. Once wc error detected, the
+ * flushing operation is needed.
+ */
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(hr_dev, qp);
+}
+
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
@@ -2979,12 +3097,11 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
int is_send;
u16 wqe_ctr;
u32 opcode;
- u32 status;
int qpn;
int ret;
/* Find cqe according to consumer index */
- cqe = next_cqe_sw_v2(hr_cq);
+ cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
if (!cqe)
return -EAGAIN;
@@ -3009,7 +3126,6 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
*cur_qp = hr_qp;
}
- hr_qp = *cur_qp;
wc->qp = &(*cur_qp)->ibqp;
wc->vendor_err = 0;
@@ -3044,77 +3160,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
++wq->tail;
}
- status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
- V2_CQE_BYTE_4_STATUS_S);
- switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
- case HNS_ROCE_CQE_V2_SUCCESS:
- wc->status = IB_WC_SUCCESS;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
- wc->status = IB_WC_LOC_LEN_ERR;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
- wc->status = IB_WC_LOC_QP_OP_ERR;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
- wc->status = IB_WC_LOC_PROT_ERR;
- break;
- case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
- wc->status = IB_WC_WR_FLUSH_ERR;
- break;
- case HNS_ROCE_CQE_V2_MW_BIND_ERR:
- wc->status = IB_WC_MW_BIND_ERR;
- break;
- case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
- wc->status = IB_WC_BAD_RESP_ERR;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
- wc->status = IB_WC_REM_INV_REQ_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
- wc->status = IB_WC_REM_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
- wc->status = IB_WC_REM_OP_ERR;
- break;
- case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
- wc->status = IB_WC_RETRY_EXC_ERR;
- break;
- case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
- wc->status = IB_WC_RNR_RETRY_EXC_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
- wc->status = IB_WC_REM_ABORT_ERR;
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
-
- /*
- * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
- * into errored mode. Hence, as a workaround to this hardware
- * limitation, driver needs to assist in flushing. But the flushing
- * operation uses mailbox to convey the QP state to the hardware and
- * which can sleep due to the mutex protection around the mailbox calls.
- * Hence, use the deferred flush for now. Once wc error detected, the
- * flushing operation is needed.
- */
- if (wc->status != IB_WC_SUCCESS &&
- wc->status != IB_WC_WR_FLUSH_ERR) {
- ibdev_err(&hr_dev->ib_dev, "error cqe status is: 0x%x\n",
- status & HNS_ROCE_V2_CQE_STATUS_MASK);
-
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag))
- init_flush_work(hr_dev, hr_qp);
-
- return 0;
- }
-
- if (wc->status == IB_WC_WR_FLUSH_ERR)
+ get_cqe_status(hr_dev, *cur_qp, cqe, wc);
+ if (unlikely(wc->status != IB_WC_SUCCESS))
return 0;
if (is_send) {
@@ -3213,7 +3260,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
(roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
- if (ret)
+ if (unlikely(ret))
return -EAGAIN;
}
@@ -3514,29 +3561,18 @@ static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
- roce_set_field(context->byte_4_sqpn_tst,
- V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sge.sge_cnt));
- else
- roce_set_field(context->byte_4_sqpn_tst,
- V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S,
- hr_qp->sq.max_gs >
- HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_S,
+ to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
+ hr_qp->sge.sge_shift));
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ ilog2(hr_qp->sq.wqe_cnt));
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
- (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
- hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
- hr_qp->ibqp.srq) ? 0 :
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ ilog2(hr_qp->rq.wqe_cnt));
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
@@ -3572,7 +3608,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
- if (hr_qp->rdb_en)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
roce_set_bit(context->byte_68_rq_db,
V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
@@ -3734,30 +3770,19 @@ static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
return true;
}
-static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr, int attr_mask,
- struct hns_roce_v2_qp_context *context,
- struct hns_roce_v2_qp_context *qpc_mask)
+static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
{
- const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct ib_qp *ibqp = &hr_qp->ibqp;
u64 mtts[MTT_MIN_COUNT] = { 0 };
- dma_addr_t dma_handle_3;
- dma_addr_t dma_handle_2;
u64 wqe_sge_ba;
u32 page_size;
- u8 port_num;
- u64 *mtts_3;
- u64 *mtts_2;
int count;
- u8 *dmac;
- u8 *smac;
- int port;
/* Search qp buf's mtts */
- page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ page_size = 1 << hr_qp->mtr.hem_cfg.buf_pg_shift;
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
hr_qp->rq.offset / page_size, mtts,
MTT_MIN_COUNT, &wqe_sge_ba);
@@ -3765,29 +3790,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size))
return -EINVAL;
- /* Search IRRL's mtts */
- mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
- hr_qp->qpn, &dma_handle_2);
- if (!mtts_2) {
- ibdev_err(ibdev, "failed to find QP irrl_table\n");
- return -EINVAL;
- }
-
- /* Search TRRL's mtts */
- mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
- hr_qp->qpn, &dma_handle_3);
- if (!mtts_3) {
- ibdev_err(ibdev, "failed to find QP trrl_table\n");
- return -EINVAL;
- }
-
- if (attr_mask & IB_QP_ALT_PATH) {
- ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error\n",
- attr_mask);
- return -EINVAL;
- }
-
- dmac = (u8 *)attr->ah_attr.roce.dmac;
context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
qpc_mask->wqe_sge_ba = 0;
@@ -3804,17 +3806,16 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
V2_QPC_BYTE_12_SQ_HOP_NUM_S,
- hr_dev->caps.wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ?
- 0 : hr_dev->caps.wqe_sq_hop_num);
+ to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
+ hr_qp->sq.wqe_cnt));
roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S,
- ((ibqp->qp_type == IB_QPT_GSI) ||
- hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
- hr_dev->caps.wqe_sge_hop_num : 0);
+ to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
+ hr_qp->sge.sge_cnt));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
@@ -3822,8 +3823,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_HOP_NUM_M,
V2_QPC_BYTE_20_RQ_HOP_NUM_S,
- hr_dev->caps.wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ?
- 0 : hr_dev->caps.wqe_rq_hop_num);
+ to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
+ hr_qp->rq.wqe_cnt));
+
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_HOP_NUM_M,
V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
@@ -3831,7 +3833,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
- hr_qp->wqe_bt_pg_shift + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
@@ -3839,50 +3841,181 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
- hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
- context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
+ context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
qpc_mask->rq_cur_blk_addr = 0;
roce_set_field(context->byte_92_srq_info,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
- mtts[0] >> (32 + PAGE_ADDR_SHIFT));
+ upper_32_bits(to_hr_hw_page_addr(mtts[0])));
roce_set_field(qpc_mask->byte_92_srq_info,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
- context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
+ context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
qpc_mask->rq_nxt_blk_addr = 0;
roce_set_field(context->byte_104_rq_sge,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
- mtts[1] >> (32 + PAGE_ADDR_SHIFT));
+ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
roce_set_field(qpc_mask->byte_104_rq_sge,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
+ roce_set_field(context->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
+
+ return 0;
+}
+
+static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 sge_cur_blk = 0;
+ u64 sq_cur_blk = 0;
+ u32 page_size;
+ int count;
+
+ /* search qp buf's mtts */
+ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
+ hr_qp->qpn);
+ return -EINVAL;
+ }
+ if (hr_qp->sge.sge_cnt > 0) {
+ page_size = 1 << hr_qp->mtr.hem_cfg.buf_pg_shift;
+ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
+ hr_qp->sge.offset / page_size,
+ &sge_cur_blk, 1, NULL);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
+ hr_qp->qpn);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ context->sq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
+ roce_set_field(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ qpc_mask->sq_cur_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
+
+ context->sq_cur_sge_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(sge_cur_blk));
+ roce_set_field(context->byte_184_irrl_idx,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
+ qpc_mask->sq_cur_sge_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_184_irrl_idx,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
+
+ context->rx_sq_cur_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
+ roce_set_field(context->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ qpc_mask->rx_sq_cur_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
+
+ return 0;
+}
+
+static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t trrl_ba;
+ dma_addr_t irrl_ba;
+ u8 port_num;
+ u64 *mtts;
+ u8 *dmac;
+ u8 *smac;
+ int port;
+ int ret;
+
+ ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
+ if (ret) {
+ ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Search IRRL's mtts */
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
+ hr_qp->qpn, &irrl_ba);
+ if (!mtts) {
+ ibdev_err(ibdev, "failed to find qp irrl_table.\n");
+ return -EINVAL;
+ }
+
+ /* Search TRRL's mtts */
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
+ hr_qp->qpn, &trrl_ba);
+ if (!mtts) {
+ ibdev_err(ibdev, "failed to find qp trrl_table.\n");
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
+ attr_mask);
+ return -EINVAL;
+ }
+
roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
- V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
+ V2_QPC_BYTE_132_TRRL_BA_S, trrl_ba >> 4);
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
V2_QPC_BYTE_132_TRRL_BA_S, 0);
- context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4));
+ context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
qpc_mask->trrl_ba = 0;
roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
V2_QPC_BYTE_140_TRRL_BA_S,
- (u32)(dma_handle_3 >> (32 + 16 + 4)));
+ (u32)(trrl_ba >> (32 + 16 + 4)));
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
V2_QPC_BYTE_140_TRRL_BA_S, 0);
- context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6);
+ context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
qpc_mask->irrl_ba = 0;
roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
V2_QPC_BYTE_208_IRRL_BA_S,
- dma_handle_2 >> (32 + 6));
+ irrl_ba >> (32 + 6));
roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
V2_QPC_BYTE_208_IRRL_BA_S, 0);
@@ -3897,6 +4030,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
smac = (u8 *)hr_dev->dev_addr[port];
+ dmac = (u8 *)attr->ah_attr.roce.dmac;
/* when dmac equals smac or loop_idc is 1, it should loopback */
if (ether_addr_equal_unaligned(dmac, smac) ||
hr_dev->loop_idc == 0x1) {
@@ -3919,6 +4053,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
grh->sgid_index));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
+
memcpy(&(context->dmac), dmac, sizeof(u32));
roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
@@ -3928,7 +4063,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
/* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
- V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
+ V2_QPC_BYTE_56_LP_PKTN_INI_S,
+ ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096));
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
@@ -3942,16 +4078,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
- roce_set_field(context->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
-
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
@@ -3987,30 +4113,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
- u64 sge_cur_blk = 0;
- u64 sq_cur_blk = 0;
- u32 page_size;
- int count;
-
- /* Search qp buf's mtts */
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find buf pa of QP(0x%lx)\n",
- hr_qp->qpn);
- return -EINVAL;
- }
-
- if (hr_qp->sge.offset) {
- page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
- hr_qp->sge.offset / page_size,
- &sge_cur_blk, 1, NULL);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find sge pa of QP(0x%lx)\n",
- hr_qp->qpn);
- return -EINVAL;
- }
- }
+ int ret;
/* Not support alternate path and path migration */
if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
@@ -4018,48 +4121,11 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return -EINVAL;
}
- /*
- * In v2 engine, software pass context and context mask to hardware
- * when modifying qp. If software need modify some fields in context,
- * we should set all bits of the relevant fields in context mask to
- * 0 at the same time, else set them to 0x1.
- */
- context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
- roce_set_field(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
- sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
- qpc_mask->sq_cur_blk_addr = 0;
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
-
- context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
- hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
- cpu_to_le32(sge_cur_blk >>
- PAGE_ADDR_SHIFT) : 0;
- roce_set_field(context->byte_184_irrl_idx,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
- ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
- HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
- (sge_cur_blk >>
- (32 + PAGE_ADDR_SHIFT)) : 0);
- qpc_mask->sq_cur_sge_blk_addr = 0;
- roce_set_field(qpc_mask->byte_184_irrl_idx,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
-
- context->rx_sq_cur_blk_addr =
- cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
- roce_set_field(context->byte_232_irrl_sge,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
- sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
- qpc_mask->rx_sq_cur_blk_addr = 0;
- roce_set_field(qpc_mask->byte_232_irrl_sge,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
+ ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
+ if (ret) {
+ ibdev_err(ibdev, "failed to config sq buf, ret %d\n", ret);
+ return ret;
+ }
/*
* Set some fields in context to zero, Because the default values
@@ -4108,21 +4174,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return 0;
}
-static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
-
- if ((cur_state != IB_QPS_RESET &&
- (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
- ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
- (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
- (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
- return true;
-
- return false;
-
-}
-
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4226,6 +4277,28 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
return 0;
}
+static bool check_qp_state(enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ static const bool sm[][IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true },
+ [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true,
+ [IB_QPS_RTR] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_RTS] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
+ [IB_QPS_SQD] = {},
+ [IB_QPS_SQE] = {},
+ [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
+ };
+
+ return sm[cur_state][new_state];
+}
+
static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4237,6 +4310,11 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
int ret = 0;
+ if (!check_qp_state(cur_state, new_state)) {
+ ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
+ return -EINVAL;
+ }
+
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, sizeof(*qpc_mask));
modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
@@ -4247,23 +4325,11 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
- if (ret)
- goto out;
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
- if (ret)
- goto out;
- } else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
- /* Nothing */
- ;
- } else {
- ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
- ret = -EINVAL;
- goto out;
}
-out:
return ret;
}
@@ -4554,19 +4620,20 @@ out:
return ret;
}
-static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
+static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
{
- switch (state) {
- case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
- case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
- case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
- case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
- case HNS_ROCE_QP_ST_SQ_DRAINING:
- case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
- case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
- case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
- default: return -1;
- }
+ static const enum ib_qp_state map[] = {
+ [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
+ [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
+ [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
+ [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
+ [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
+ [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
+ [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
+ [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
+ };
+
+ return (state < ARRAY_SIZE(map)) ? map[state] : -1;
}
static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
@@ -4639,7 +4706,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->path_mig_state = IB_MIG_ARMED;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
if (hr_qp->ibqp.qp_type == IB_QPT_UD)
- qp_attr->qkey = V2_QKEY_VAL;
+ qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
@@ -4838,6 +4905,184 @@ out:
return ret;
}
+static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
+ u32 cqn, void *mb_buf, u64 *mtts_wqe,
+ u64 *mtts_idx, dma_addr_t dma_handle_wqe,
+ dma_addr_t dma_handle_idx)
+{
+ struct hns_roce_srq_context *srq_context;
+
+ srq_context = mb_buf;
+ memset(srq_context, 0, sizeof(*srq_context));
+
+ roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
+ SRQC_BYTE_4_SRQ_ST_S, 1);
+
+ roce_set_field(srq_context->byte_4_srqn_srqst,
+ SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
+ SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
+ to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
+ srq->wqe_cnt));
+ roce_set_field(srq_context->byte_4_srqn_srqst,
+ SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
+ ilog2(srq->wqe_cnt));
+
+ roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
+ SRQC_BYTE_4_SRQN_S, srq->srqn);
+
+ roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
+
+ roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
+ SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
+
+ srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
+
+ roce_set_field(srq_context->byte_24_wqe_bt_ba,
+ SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
+ SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
+ dma_handle_wqe >> 35);
+
+ roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
+ SRQC_BYTE_28_PD_S, pdn);
+ roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
+ SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
+ fls(srq->max_gs - 1));
+
+ srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
+ roce_set_field(srq_context->rsv_idx_bt_ba,
+ SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
+ SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
+ dma_handle_idx >> 35);
+
+ srq_context->idx_cur_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(mtts_idx[0]));
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
+ SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
+ SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
+ to_hr_hem_hopnum(hr_dev->caps.idx_hop_num,
+ srq->wqe_cnt));
+
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
+ SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
+ to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.ba_pg_shift));
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
+ SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
+ to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.buf_pg_shift));
+
+ srq_context->idx_nxt_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(mtts_idx[1]));
+ roce_set_field(srq_context->rsv_idxnxtblkaddr,
+ SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
+ SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
+ roce_set_field(srq_context->byte_56_xrc_cqn,
+ SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
+ cqn);
+ roce_set_field(srq_context->byte_56_xrc_cqn,
+ SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
+ SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
+ to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
+ roce_set_field(srq_context->byte_56_xrc_cqn,
+ SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
+ SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
+ to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
+
+ roce_set_bit(srq_context->db_record_addr_record_en,
+ SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
+}
+
+static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
+ struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_srq_context *srq_context;
+ struct hns_roce_srq_context *srqc_mask;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ if (srq_attr_mask & IB_SRQ_LIMIT) {
+ if (srq_attr->srq_limit >= srq->wqe_cnt)
+ return -EINVAL;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ srq_context = mailbox->buf;
+ srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
+
+ memset(srqc_mask, 0xff, sizeof(*srqc_mask));
+
+ roce_set_field(srq_context->byte_8_limit_wl,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_M,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
+ roce_set_field(srqc_mask->byte_8_limit_wl,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_M,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
+ HNS_ROCE_CMD_MODIFY_SRQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to handle cmd of modifying SRQ, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_srq_context *srq_context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int limit_wl;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ srq_context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
+ HNS_ROCE_CMD_QUERY_SRQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd of querying SRQ, ret = %d.\n",
+ ret);
+ goto out;
+ }
+
+ limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_M,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S);
+
+ attr->srq_limit = limit_wl;
+ attr->max_wr = srq->wqe_cnt - 1;
+ attr->max_sge = srq->max_gs - HNS_ROCE_RESERVED_SGE;
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
@@ -4989,24 +5234,14 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
hns_roce_write64(hr_dev, doorbell, eq->doorbell);
}
-static inline void *get_eqe_buf(struct hns_roce_eq *eq, unsigned long offset)
-{
- u32 buf_chk_sz;
-
- buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
- if (eq->buf.nbufs == 1)
- return eq->buf.direct.buf + offset % buf_chk_sz;
- else
- return eq->buf.page_list[offset / buf_chk_sz].buf +
- offset % buf_chk_sz;
-}
-
static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
{
struct hns_roce_aeqe *aeqe;
- aeqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
- HNS_ROCE_AEQ_ENTRY_SIZE);
+ aeqe = hns_roce_buf_offset(eq->mtr.kmem,
+ (eq->cons_index & (eq->entries - 1)) *
+ HNS_ROCE_AEQ_ENTRY_SIZE);
+
return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
}
@@ -5103,8 +5338,9 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
{
struct hns_roce_ceqe *ceqe;
- ceqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
- HNS_ROCE_CEQ_ENTRY_SIZE);
+ ceqe = hns_roce_buf_offset(eq->mtr.kmem,
+ (eq->cons_index & (eq->entries - 1)) *
+ HNS_ROCE_CEQ_ENTRY_SIZE);
return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
(!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
}
@@ -5263,17 +5499,15 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
{
- if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0)
- hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
- hns_roce_buf_free(hr_dev, eq->buf.size, &eq->buf);
+ hns_roce_mtr_destroy(hr_dev, &eq->mtr);
}
-static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq,
- void *mb_buf)
+static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
+ void *mb_buf)
{
+ u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
struct hns_roce_eq_context *eqc;
- u64 ba[MTT_MIN_COUNT] = { 0 };
+ u64 bt_ba = 0;
int count;
eqc = mb_buf;
@@ -5281,31 +5515,18 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* init eqc */
eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
- eq->hop_num = hr_dev->caps.eqe_hop_num;
eq->cons_index = 0;
eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
- eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
- eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
eq->shift = ilog2((unsigned int)eq->entries);
- /* if not muti-hop, eqe buffer only use one trunk */
- if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0) {
- eq->eqe_ba = eq->buf.direct.map;
- eq->cur_eqe_ba = eq->eqe_ba;
- if (eq->buf.npages > 1)
- eq->nxt_eqe_ba = eq->eqe_ba + (1 << eq->eqe_buf_pg_sz);
- else
- eq->nxt_eqe_ba = eq->eqe_ba;
- } else {
- count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, ba,
- MTT_MIN_COUNT, &eq->eqe_ba);
- eq->cur_eqe_ba = ba[0];
- if (count > 1)
- eq->nxt_eqe_ba = ba[1];
- else
- eq->nxt_eqe_ba = ba[0];
+ /* if not multi-hop, eqe buffer only use one trunk */
+ count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
+ &bt_ba);
+ if (count < 1) {
+ dev_err(hr_dev->dev, "failed to find EQE mtr\n");
+ return -ENOBUFS;
}
/* set eqc state */
@@ -5339,12 +5560,12 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* set eqe_ba_pg_sz */
roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
HNS_ROCE_EQC_BA_PG_SZ_S,
- eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
/* set eqe_buf_pg_sz */
roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
HNS_ROCE_EQC_BUF_PG_SZ_S,
- eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
/* set eq_producer_idx */
roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
@@ -5363,13 +5584,13 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
HNS_ROCE_EQC_REPORT_TIMER_S,
HNS_ROCE_EQ_INIT_REPORT_TIMER);
- /* set eqe_ba [34:3] */
+ /* set bt_ba [34:3] */
roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
- HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
+ HNS_ROCE_EQC_EQE_BA_L_S, bt_ba >> 3);
- /* set eqe_ba [64:35] */
+ /* set bt_ba [64:35] */
roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
- HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
+ HNS_ROCE_EQC_EQE_BA_H_S, bt_ba >> 35);
/* set eq shift */
roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
@@ -5381,15 +5602,15 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* set cur_eqe_ba [27:12] */
roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
- HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
+ HNS_ROCE_EQC_CUR_EQE_BA_L_S, eqe_ba[0] >> 12);
/* set cur_eqe_ba [59:28] */
roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
- HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
+ HNS_ROCE_EQC_CUR_EQE_BA_M_S, eqe_ba[0] >> 28);
/* set cur_eqe_ba [63:60] */
roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
- HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
+ HNS_ROCE_EQC_CUR_EQE_BA_H_S, eqe_ba[0] >> 60);
/* set eq consumer idx */
roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
@@ -5397,97 +5618,38 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* set nex_eqe_ba[43:12] */
roce_set_field(eqc->nxt_eqe_ba0, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
- HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
+ HNS_ROCE_EQC_NXT_EQE_BA_L_S, eqe_ba[1] >> 12);
/* set nex_eqe_ba[63:44] */
roce_set_field(eqc->nxt_eqe_ba1, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
- HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
-}
+ HNS_ROCE_EQC_NXT_EQE_BA_H_S, eqe_ba[1] >> 44);
-static int map_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
- u32 page_shift)
-{
- struct hns_roce_buf_region region = {};
- dma_addr_t *buf_list = NULL;
- int ba_num;
- int ret;
-
- ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
- 1 << page_shift);
- hns_roce_init_buf_region(&region, hr_dev->caps.eqe_hop_num, 0, ba_num);
-
- /* alloc a tmp list for storing eq buf address */
- ret = hns_roce_alloc_buf_list(&region, &buf_list, 1);
- if (ret) {
- dev_err(hr_dev->dev, "alloc eq buf_list error\n");
- return ret;
- }
-
- ba_num = hns_roce_get_kmem_bufs(hr_dev, buf_list, region.count,
- region.offset, &eq->buf);
- if (ba_num != region.count) {
- dev_err(hr_dev->dev, "get eqe buf err,expect %d,ret %d.\n",
- region.count, ba_num);
- ret = -ENOBUFS;
- goto done;
- }
-
- hns_roce_mtr_init(&eq->mtr, PAGE_SHIFT + hr_dev->caps.eqe_ba_pg_sz,
- page_shift);
- ret = hns_roce_mtr_attach(hr_dev, &eq->mtr, &buf_list, &region, 1);
- if (ret)
- dev_err(hr_dev->dev, "mtr attach error for eqe\n");
-
- goto done;
-
- hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
-done:
- hns_roce_free_buf_list(&buf_list, 1);
-
- return ret;
+ return 0;
}
static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
{
- struct hns_roce_buf *buf = &eq->buf;
- bool is_mhop = false;
- u32 page_shift;
- u32 mhop_num;
- u32 max_size;
- int ret;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
- page_shift = PAGE_SHIFT + hr_dev->caps.eqe_buf_pg_sz;
- mhop_num = hr_dev->caps.eqe_hop_num;
- if (!mhop_num) {
- max_size = 1 << page_shift;
- buf->size = max_size;
- } else if (mhop_num == HNS_ROCE_HOP_NUM_0) {
- max_size = eq->entries * eq->eqe_size;
- buf->size = max_size;
- } else {
- max_size = 1 << page_shift;
- buf->size = PAGE_ALIGN(eq->entries * eq->eqe_size);
- is_mhop = true;
- }
+ if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
+ eq->hop_num = 0;
+ else
+ eq->hop_num = hr_dev->caps.eqe_hop_num;
- ret = hns_roce_buf_alloc(hr_dev, buf->size, max_size, buf, page_shift);
- if (ret) {
- dev_err(hr_dev->dev, "alloc eq buf error\n");
- return ret;
- }
+ buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = eq->entries * eq->eqe_size;
+ buf_attr.region[0].hopnum = eq->hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
- if (is_mhop) {
- ret = map_eq_buf(hr_dev, eq, page_shift);
- if (ret) {
- dev_err(hr_dev->dev, "map roce buf error\n");
- goto err_alloc;
- }
- }
+ err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
+ hr_dev->caps.eqe_ba_pg_sz +
+ HNS_HW_PAGE_SHIFT, NULL, 0);
+ if (err)
+ dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
- return 0;
-err_alloc:
- hns_roce_buf_free(hr_dev, buf->size, buf);
- return ret;
+ return err;
}
static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
@@ -5499,15 +5661,16 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
/* Allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
+ if (IS_ERR_OR_NULL(mailbox))
+ return -ENOMEM;
ret = alloc_eq_buf(hr_dev, eq);
- if (ret) {
- ret = -ENOMEM;
+ if (ret)
goto free_cmd_mbox;
- }
- hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
+
+ ret = config_eqc(hr_dev, eq, mailbox->buf);
+ if (ret)
+ goto err_cmd_mbox;
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
@@ -5731,294 +5894,6 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
destroy_workqueue(hr_dev->irq_workq);
}
-static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
- u32 cqn, void *mb_buf, u64 *mtts_wqe,
- u64 *mtts_idx, dma_addr_t dma_handle_wqe,
- dma_addr_t dma_handle_idx)
-{
- struct hns_roce_srq_context *srq_context;
-
- srq_context = mb_buf;
- memset(srq_context, 0, sizeof(*srq_context));
-
- roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
- SRQC_BYTE_4_SRQ_ST_S, 1);
-
- roce_set_field(srq_context->byte_4_srqn_srqst,
- SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
- SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
- (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
- hr_dev->caps.srqwqe_hop_num));
- roce_set_field(srq_context->byte_4_srqn_srqst,
- SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
- ilog2(srq->wqe_cnt));
-
- roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
- SRQC_BYTE_4_SRQN_S, srq->srqn);
-
- roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
-
- roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
- SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
-
- srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
-
- roce_set_field(srq_context->byte_24_wqe_bt_ba,
- SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
- SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
- dma_handle_wqe >> 35);
-
- roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
- SRQC_BYTE_28_PD_S, pdn);
- roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
- SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
- fls(srq->max_gs - 1));
-
- srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
- roce_set_field(srq_context->rsv_idx_bt_ba,
- SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
- SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
- dma_handle_idx >> 35);
-
- srq_context->idx_cur_blk_addr =
- cpu_to_le32(mtts_idx[0] >> PAGE_ADDR_SHIFT);
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
- SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
- mtts_idx[0] >> (32 + PAGE_ADDR_SHIFT));
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
- SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
- hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
- hr_dev->caps.idx_hop_num);
-
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
- SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
- hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
- SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
- hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
-
- srq_context->idx_nxt_blk_addr =
- cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
- roce_set_field(srq_context->rsv_idxnxtblkaddr,
- SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
- SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
- mtts_idx[1] >> (32 + PAGE_ADDR_SHIFT));
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
- cqn);
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
- SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
- hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
- SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
- hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
-
- roce_set_bit(srq_context->db_record_addr_record_en,
- SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
-}
-
-static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
- struct ib_srq_attr *srq_attr,
- enum ib_srq_attr_mask srq_attr_mask,
- struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
- struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- struct hns_roce_srq_context *srq_context;
- struct hns_roce_srq_context *srqc_mask;
- struct hns_roce_cmd_mailbox *mailbox;
- int ret;
-
- if (srq_attr_mask & IB_SRQ_LIMIT) {
- if (srq_attr->srq_limit >= srq->wqe_cnt)
- return -EINVAL;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- srq_context = mailbox->buf;
- srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
-
- memset(srqc_mask, 0xff, sizeof(*srqc_mask));
-
- roce_set_field(srq_context->byte_8_limit_wl,
- SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
- roce_set_field(srqc_mask->byte_8_limit_wl,
- SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
-
- ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
- HNS_ROCE_CMD_MODIFY_SRQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- if (ret) {
- ibdev_err(&hr_dev->ib_dev,
- "failed to process cmd when modifying SRQ, ret = %d\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
- struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- struct hns_roce_srq_context *srq_context;
- struct hns_roce_cmd_mailbox *mailbox;
- int limit_wl;
- int ret;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- srq_context = mailbox->buf;
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
- HNS_ROCE_CMD_QUERY_SRQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
- if (ret) {
- ibdev_err(&hr_dev->ib_dev,
- "failed to process cmd when querying SRQ, ret = %d\n",
- ret);
- goto out;
- }
-
- limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
- SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S);
-
- attr->srq_limit = limit_wl;
- attr->max_wr = srq->wqe_cnt - 1;
- attr->max_sge = srq->max_gs;
-
- memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
-
-out:
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- return ret;
-}
-
-static int find_empty_entry(struct hns_roce_idx_que *idx_que,
- unsigned long size)
-{
- int wqe_idx;
-
- if (unlikely(bitmap_full(idx_que->bitmap, size)))
- return -ENOSPC;
-
- wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
-
- bitmap_set(idx_que->bitmap, wqe_idx, 1);
-
- return wqe_idx;
-}
-
-static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
- int cur_idx, int wqe_idx)
-{
- unsigned int *addr;
-
- addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
- cur_idx * idx_que->entry_sz);
- *addr = wqe_idx;
-}
-
-static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
- const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
- struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- struct hns_roce_v2_wqe_data_seg *dseg;
- struct hns_roce_v2_db srq_db;
- unsigned long flags;
- int ret = 0;
- int wqe_idx;
- void *wqe;
- int nreq;
- int ind;
- int i;
-
- spin_lock_irqsave(&srq->lock, flags);
-
- ind = srq->head & (srq->wqe_cnt - 1);
-
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (unlikely(wr->num_sge > srq->max_gs)) {
- ret = -EINVAL;
- *bad_wr = wr;
- break;
- }
-
- if (unlikely(srq->head == srq->tail)) {
- ret = -ENOMEM;
- *bad_wr = wr;
- break;
- }
-
- wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
- if (wqe_idx < 0) {
- ret = -ENOMEM;
- *bad_wr = wr;
- break;
- }
-
- fill_idx_queue(&srq->idx_que, ind, wqe_idx);
- wqe = get_srq_wqe(srq, wqe_idx);
- dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
-
- for (i = 0; i < wr->num_sge; ++i) {
- dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
- dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
- dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
- }
-
- if (i < srq->max_gs) {
- dseg[i].len = 0;
- dseg[i].lkey = cpu_to_le32(0x100);
- dseg[i].addr = 0;
- }
-
- srq->wrid[wqe_idx] = wr->wr_id;
- ind = (ind + 1) & (srq->wqe_cnt - 1);
- }
-
- if (likely(nreq)) {
- srq->head += nreq;
-
- /*
- * Make sure that descriptors are written before
- * doorbell record.
- */
- wmb();
-
- srq_db.byte_4 =
- cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
- (srq->srqn & V2_DB_BYTE_4_TAG_M));
- srq_db.parameter = cpu_to_le32(srq->head);
-
- hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
-
- }
-
- spin_unlock_irqrestore(&srq->lock, flags);
-
- return ret;
-}
-
static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
.query_cqc_info = hns_roce_v2_query_cqc_info,
};
@@ -6161,7 +6036,7 @@ error_failed_kzalloc:
static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
bool reset)
{
- struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+ struct hns_roce_dev *hr_dev = handle->priv;
if (!hr_dev)
return;
@@ -6241,7 +6116,7 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
- hr_dev = (struct hns_roce_dev *)handle->priv;
+ hr_dev = handle->priv;
if (!hr_dev)
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 82dd9f6f4845..e176b0aaa4ac 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -92,7 +92,9 @@
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
-#define HNS_ROCE_INVALID_LKEY 0x100
+#define HNS_ROCE_INVALID_LKEY 0x0
+#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
+
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
@@ -1241,10 +1243,9 @@ struct hns_roce_func_clear {
};
#define FUNC_CLEAR_RST_FUN_DONE_S 0
-/* Each physical function manages up to 248 virtual functions;
- * it takes up to 100ms for each function to execute clear;
- * if an abnormal reset occurs, it is executed twice at most;
- * so it takes up to 249 * 2 * 100ms.
+/* Each physical function manages up to 248 virtual functions, it takes up to
+ * 100ms for each function to execute clear. If an abnormal reset occurs, it is
+ * executed twice at most, so it takes up to 249 * 2 * 100ms.
*/
#define HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS (249 * 2 * 100)
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL 40
@@ -1648,7 +1649,7 @@ struct hns_roce_query_pf_caps_c {
struct hns_roce_query_pf_caps_d {
__le32 wq_hop_num_max_srqs;
__le16 srq_depth;
- __le16 rsv;
+ __le16 cap_flags_ex;
__le32 num_ceqs_ceq_depth;
__le32 arm_st_aeq_depth;
__le32 num_uars_rsv_pds;
@@ -1978,7 +1979,7 @@ int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
void __iomem *dest)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index d0031d559213..50763cf4fa3d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -233,7 +233,6 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
enum ib_mtu mtu;
u8 port;
- assert(port_num > 0);
port = port_num - 1;
/* props being zeroed by the caller, avoid zeroing it here */
@@ -579,33 +578,12 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
int ret;
struct device *dev = hr_dev->dev;
- ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
- HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
- hr_dev->caps.num_mtt_segs, 1);
- if (ret) {
- dev_err(dev, "Failed to init MTT context memory, aborting.\n");
- return ret;
- }
-
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
- ret = hns_roce_init_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_cqe_table,
- HEM_TYPE_CQE,
- hr_dev->caps.mtt_entry_sz,
- hr_dev->caps.num_cqe_segs, 1);
- if (ret) {
- dev_err(dev,
- "Failed to init CQE context memory, aborting.\n");
- goto err_unmap_cqe;
- }
- }
-
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
hr_dev->caps.num_mtpts, 1);
if (ret) {
dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
- goto err_unmap_mtt;
+ return ret;
}
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
@@ -660,32 +638,6 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
}
}
- if (hr_dev->caps.num_srqwqe_segs) {
- ret = hns_roce_init_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_srqwqe_table,
- HEM_TYPE_SRQWQE,
- hr_dev->caps.mtt_entry_sz,
- hr_dev->caps.num_srqwqe_segs, 1);
- if (ret) {
- dev_err(dev,
- "Failed to init MTT srqwqe memory, aborting.\n");
- goto err_unmap_srq;
- }
- }
-
- if (hr_dev->caps.num_idx_segs) {
- ret = hns_roce_init_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_idx_table,
- HEM_TYPE_IDX,
- hr_dev->caps.idx_entry_sz,
- hr_dev->caps.num_idx_segs, 1);
- if (ret) {
- dev_err(dev,
- "Failed to init MTT idx memory, aborting.\n");
- goto err_unmap_srqwqe;
- }
- }
-
if (hr_dev->caps.sccc_entry_sz) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->qp_table.sccc_table,
@@ -695,7 +647,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
if (ret) {
dev_err(dev,
"Failed to init SCC context memory, aborting.\n");
- goto err_unmap_idx;
+ goto err_unmap_srq;
}
}
@@ -733,17 +685,6 @@ err_unmap_ctx:
if (hr_dev->caps.sccc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.sccc_table);
-
-err_unmap_idx:
- if (hr_dev->caps.num_idx_segs)
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_idx_table);
-
-err_unmap_srqwqe:
- if (hr_dev->caps.num_srqwqe_segs)
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_srqwqe_table);
-
err_unmap_srq:
if (hr_dev->caps.srqc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
@@ -765,14 +706,6 @@ err_unmap_qp:
err_unmap_dmpt:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
-err_unmap_mtt:
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_cqe_table);
-
-err_unmap_cqe:
- hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
-
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 176f34692f88..4c0bbb12770d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -66,645 +66,89 @@ int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
- unsigned long *seg)
+static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
+ u32 pd, u64 iova, u64 size, u32 access)
{
- int o;
- u32 m;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned long obj = 0;
+ int err;
- spin_lock(&buddy->lock);
-
- for (o = order; o <= buddy->max_order; ++o) {
- if (buddy->num_free[o]) {
- m = 1 << (buddy->max_order - o);
- *seg = find_first_bit(buddy->bits[o], m);
- if (*seg < m)
- goto found;
- }
- }
- spin_unlock(&buddy->lock);
- return -EINVAL;
-
- found:
- clear_bit(*seg, buddy->bits[o]);
- --buddy->num_free[o];
-
- while (o > order) {
- --o;
- *seg <<= 1;
- set_bit(*seg ^ 1, buddy->bits[o]);
- ++buddy->num_free[o];
- }
-
- spin_unlock(&buddy->lock);
-
- *seg <<= order;
- return 0;
-}
-
-static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
- int order)
-{
- seg >>= order;
-
- spin_lock(&buddy->lock);
-
- while (test_bit(seg ^ 1, buddy->bits[order])) {
- clear_bit(seg ^ 1, buddy->bits[order]);
- --buddy->num_free[order];
- seg >>= 1;
- ++order;
- }
-
- set_bit(seg, buddy->bits[order]);
- ++buddy->num_free[order];
-
- spin_unlock(&buddy->lock);
-}
-
-static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
-{
- int i, s;
-
- buddy->max_order = max_order;
- spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1,
- sizeof(*buddy->bits),
- GFP_KERNEL);
- buddy->num_free = kcalloc(buddy->max_order + 1,
- sizeof(*buddy->num_free),
- GFP_KERNEL);
- if (!buddy->bits || !buddy->num_free)
- goto err_out;
-
- for (i = 0; i <= buddy->max_order; ++i) {
- s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
- __GFP_NOWARN);
- if (!buddy->bits[i]) {
- buddy->bits[i] = vzalloc(array_size(s, sizeof(long)));
- if (!buddy->bits[i])
- goto err_out_free;
- }
- }
-
- set_bit(0, buddy->bits[buddy->max_order]);
- buddy->num_free[buddy->max_order] = 1;
-
- return 0;
-
-err_out_free:
- for (i = 0; i <= buddy->max_order; ++i)
- kvfree(buddy->bits[i]);
-
-err_out:
- kfree(buddy->bits);
- kfree(buddy->num_free);
- return -ENOMEM;
-}
-
-static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
-{
- int i;
-
- for (i = 0; i <= buddy->max_order; ++i)
- kvfree(buddy->bits[i]);
-
- kfree(buddy->bits);
- kfree(buddy->num_free);
-}
-
-static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
- unsigned long *seg, u32 mtt_type)
-{
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- struct hns_roce_hem_table *table;
- struct hns_roce_buddy *buddy;
- int ret;
-
- switch (mtt_type) {
- case MTT_TYPE_WQE:
- buddy = &mr_table->mtt_buddy;
- table = &mr_table->mtt_table;
- break;
- case MTT_TYPE_CQE:
- buddy = &mr_table->mtt_cqe_buddy;
- table = &mr_table->mtt_cqe_table;
- break;
- case MTT_TYPE_SRQWQE:
- buddy = &mr_table->mtt_srqwqe_buddy;
- table = &mr_table->mtt_srqwqe_table;
- break;
- case MTT_TYPE_IDX:
- buddy = &mr_table->mtt_idx_buddy;
- table = &mr_table->mtt_idx_table;
- break;
- default:
- dev_err(hr_dev->dev, "Unsupport MTT table type: %d\n",
- mtt_type);
- return -EINVAL;
- }
-
- ret = hns_roce_buddy_alloc(buddy, order, seg);
- if (ret)
- return ret;
-
- ret = hns_roce_table_get_range(hr_dev, table, *seg,
- *seg + (1 << order) - 1);
- if (ret) {
- hns_roce_buddy_free(buddy, *seg, order);
- return ret;
- }
-
- return 0;
-}
-
-int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
- struct hns_roce_mtt *mtt)
-{
- int ret;
- int i;
-
- /* Page num is zero, correspond to DMA memory register */
- if (!npages) {
- mtt->order = -1;
- mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
- return 0;
- }
-
- /* Note: if page_shift is zero, FAST memory register */
- mtt->page_shift = page_shift;
-
- /* Compute MTT entry necessary */
- for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
- i <<= 1)
- ++mtt->order;
-
- /* Allocate MTT entry */
- ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
- mtt->mtt_type);
- if (ret)
- return -ENOMEM;
-
- return 0;
-}
-
-void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
-{
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
-
- if (mtt->order < 0)
- return;
-
- switch (mtt->mtt_type) {
- case MTT_TYPE_WQE:
- hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
- mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
- mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
- break;
- case MTT_TYPE_CQE:
- hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
- mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
- mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
- break;
- case MTT_TYPE_SRQWQE:
- hns_roce_buddy_free(&mr_table->mtt_srqwqe_buddy, mtt->first_seg,
- mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_srqwqe_table,
- mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
- break;
- case MTT_TYPE_IDX:
- hns_roce_buddy_free(&mr_table->mtt_idx_buddy, mtt->first_seg,
- mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_idx_table,
- mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
- break;
- default:
- dev_err(hr_dev->dev,
- "Unsupport mtt type %d, clean mtt failed\n",
- mtt->mtt_type);
- break;
- }
-}
-
-static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr, int err_loop_index,
- int loop_i, int loop_j)
-{
- struct device *dev = hr_dev->dev;
- u32 mhop_num;
- u32 pbl_bt_sz;
- u64 bt_idx;
- int i, j;
-
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- mhop_num = hr_dev->caps.pbl_hop_num;
-
- i = loop_i;
- if (mhop_num == 3 && err_loop_index == 2) {
- for (; i >= 0; i--) {
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
-
- for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
- if (i == loop_i && j >= loop_j)
- break;
-
- bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
- dma_free_coherent(dev, pbl_bt_sz,
- mr->pbl_bt_l2[bt_idx],
- mr->pbl_l2_dma_addr[bt_idx]);
- }
- }
- } else if (mhop_num == 3 && err_loop_index == 1) {
- for (i -= 1; i >= 0; i--) {
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
-
- for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
- bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
- dma_free_coherent(dev, pbl_bt_sz,
- mr->pbl_bt_l2[bt_idx],
- mr->pbl_l2_dma_addr[bt_idx]);
- }
- }
- } else if (mhop_num == 2 && err_loop_index == 1) {
- for (i -= 1; i >= 0; i--)
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
- } else {
- dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
- mhop_num, err_loop_index);
- return;
- }
-
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
- mr->pbl_bt_l0 = NULL;
- mr->pbl_l0_dma_addr = 0;
-}
-static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr, u32 pbl_bt_sz)
-{
- struct device *dev = hr_dev->dev;
-
- if (npages > pbl_bt_sz / 8) {
- dev_err(dev, "npages %d is larger than buf_pg_sz!",
- npages);
- return -EINVAL;
- }
- mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf)
+ /* Allocate a key for mr from mr_table */
+ err = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &obj);
+ if (err) {
+ ibdev_err(ibdev,
+ "failed to alloc bitmap for MR key, ret = %d.\n",
+ err);
return -ENOMEM;
-
- mr->pbl_size = npages;
- mr->pbl_ba = mr->pbl_dma_addr;
- mr->pbl_hop_num = 1;
- mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
- mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
- return 0;
-
-}
-
-
-static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr, u32 pbl_bt_sz)
-{
- struct device *dev = hr_dev->dev;
- int npages_allocated;
- u64 pbl_last_bt_num;
- u64 pbl_bt_cnt = 0;
- u64 size;
- int i;
-
- pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
-
- /* alloc L1 BT */
- for (i = 0; i < pbl_bt_sz / 8; i++) {
- if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
- size = pbl_bt_sz;
- } else {
- npages_allocated = i * (pbl_bt_sz / 8);
- size = (npages - npages_allocated) * 8;
- }
- mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
- &(mr->pbl_l1_dma_addr[i]),
- GFP_KERNEL);
- if (!mr->pbl_bt_l1[i]) {
- hns_roce_loop_free(hr_dev, mr, 1, i, 0);
- return -ENOMEM;
- }
-
- *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
-
- pbl_bt_cnt++;
- if (pbl_bt_cnt >= pbl_last_bt_num)
- break;
}
- mr->l0_chunk_last_num = i + 1;
-
- return 0;
-}
-
-static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr, u32 pbl_bt_sz)
-{
- struct device *dev = hr_dev->dev;
- int mr_alloc_done = 0;
- int npages_allocated;
- u64 pbl_last_bt_num;
- u64 pbl_bt_cnt = 0;
- u64 bt_idx;
- u64 size;
- int i;
- int j = 0;
-
- pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
-
- mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
- sizeof(*mr->pbl_l2_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_l2_dma_addr)
- return -ENOMEM;
-
- mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
- sizeof(*mr->pbl_bt_l2),
- GFP_KERNEL);
- if (!mr->pbl_bt_l2)
- goto err_kcalloc_bt_l2;
-
- /* alloc L1, L2 BT */
- for (i = 0; i < pbl_bt_sz / 8; i++) {
- mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
- &(mr->pbl_l1_dma_addr[i]),
- GFP_KERNEL);
- if (!mr->pbl_bt_l1[i]) {
- hns_roce_loop_free(hr_dev, mr, 1, i, 0);
- goto err_dma_alloc_l0;
- }
-
- *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
-
- for (j = 0; j < pbl_bt_sz / 8; j++) {
- bt_idx = i * pbl_bt_sz / 8 + j;
-
- if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
- size = pbl_bt_sz;
- } else {
- npages_allocated = bt_idx *
- (pbl_bt_sz / 8);
- size = (npages - npages_allocated) * 8;
- }
- mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
- dev, size,
- &(mr->pbl_l2_dma_addr[bt_idx]),
- GFP_KERNEL);
- if (!mr->pbl_bt_l2[bt_idx]) {
- hns_roce_loop_free(hr_dev, mr, 2, i, j);
- goto err_dma_alloc_l0;
- }
-
- *(mr->pbl_bt_l1[i] + j) =
- mr->pbl_l2_dma_addr[bt_idx];
-
- pbl_bt_cnt++;
- if (pbl_bt_cnt >= pbl_last_bt_num) {
- mr_alloc_done = 1;
- break;
- }
- }
+ mr->iova = iova; /* MR va starting addr */
+ mr->size = size; /* MR addr range */
+ mr->pd = pd; /* MR num */
+ mr->access = access; /* MR access permit */
+ mr->enabled = 0; /* MR active status */
+ mr->key = hw_index_to_key(obj); /* MR key */
- if (mr_alloc_done)
- break;
+ err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
+ if (err) {
+ ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
+ goto err_free_bitmap;
}
- mr->l0_chunk_last_num = i + 1;
- mr->l1_chunk_last_num = j + 1;
-
-
return 0;
-
-err_dma_alloc_l0:
- kfree(mr->pbl_bt_l2);
- mr->pbl_bt_l2 = NULL;
-
-err_kcalloc_bt_l2:
- kfree(mr->pbl_l2_dma_addr);
- mr->pbl_l2_dma_addr = NULL;
-
- return -ENOMEM;
+err_free_bitmap:
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
+ return err;
}
-
-/* PBL multi hop addressing */
-static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr)
+static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
- struct device *dev = hr_dev->dev;
- u32 pbl_bt_sz;
- u32 mhop_num;
-
- mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
-
- if (mhop_num == HNS_ROCE_HOP_NUM_0)
- return 0;
-
- if (mhop_num == 1)
- return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz);
-
- mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
- sizeof(*mr->pbl_l1_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_l1_dma_addr)
- return -ENOMEM;
-
- mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
- GFP_KERNEL);
- if (!mr->pbl_bt_l1)
- goto err_kcalloc_bt_l1;
-
- /* alloc L0 BT */
- mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
- &(mr->pbl_l0_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_bt_l0)
- goto err_kcalloc_l2_dma;
-
- if (mhop_num == 2) {
- if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
- goto err_kcalloc_l2_dma;
- }
-
- if (mhop_num == 3) {
- if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
- goto err_kcalloc_l2_dma;
- }
-
+ unsigned long obj = key_to_hw_index(mr->key);
- mr->pbl_size = npages;
- mr->pbl_ba = mr->pbl_l0_dma_addr;
- mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
- mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
- mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
-
- return 0;
-
-err_kcalloc_l2_dma:
- kfree(mr->pbl_bt_l1);
- mr->pbl_bt_l1 = NULL;
-
-err_kcalloc_bt_l1:
- kfree(mr->pbl_l1_dma_addr);
- mr->pbl_l1_dma_addr = NULL;
-
- return -ENOMEM;
+ hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
}
-static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
- u64 size, u32 access, int npages,
- struct hns_roce_mr *mr)
+static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
+ size_t length, struct ib_udata *udata, u64 start,
+ int access)
{
- struct device *dev = hr_dev->dev;
- unsigned long index = 0;
- int ret;
-
- /* Allocate a key for mr from mr_table */
- ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
- if (ret)
- return -ENOMEM;
-
- mr->iova = iova; /* MR va starting addr */
- mr->size = size; /* MR addr range */
- mr->pd = pd; /* MR num */
- mr->access = access; /* MR access permit */
- mr->enabled = 0; /* MR active status */
- mr->key = hw_index_to_key(index); /* MR key */
-
- if (size == ~0ull) {
- mr->pbl_buf = NULL;
- mr->pbl_dma_addr = 0;
- /* PBL multi-hop addressing parameters */
- mr->pbl_bt_l2 = NULL;
- mr->pbl_bt_l1 = NULL;
- mr->pbl_bt_l0 = NULL;
- mr->pbl_l2_dma_addr = NULL;
- mr->pbl_l1_dma_addr = NULL;
- mr->pbl_l0_dma_addr = 0;
- } else {
- if (!hr_dev->caps.pbl_hop_num) {
- mr->pbl_buf = dma_alloc_coherent(dev,
- npages * BA_BYTE_LEN,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf)
- return -ENOMEM;
- } else {
- ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
- }
- }
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ bool is_fast = mr->type == MR_TYPE_FRMR;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
+ buf_attr.page_shift = is_fast ? PAGE_SHIFT :
+ hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = length;
+ buf_attr.region[0].hopnum = mr->pbl_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+ buf_attr.user_access = access;
+ /* fast MR's buffer is alloced before mapping, not at creation */
+ buf_attr.mtt_only = is_fast;
+
+ err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
+ hr_dev->caps.pbl_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ udata, start);
+ if (err)
+ ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
+ else
+ mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
- return ret;
+ return err;
}
-static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr)
+static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
- struct device *dev = hr_dev->dev;
- int npages_allocated;
- int npages;
- int i, j;
- u32 pbl_bt_sz;
- u32 mhop_num;
- u64 bt_idx;
-
- npages = mr->pbl_size;
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num;
-
- if (mhop_num == HNS_ROCE_HOP_NUM_0)
- return;
-
- if (mhop_num == 1) {
- dma_free_coherent(dev, (unsigned int)(npages * BA_BYTE_LEN),
- mr->pbl_buf, mr->pbl_dma_addr);
- return;
- }
-
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
- mr->pbl_l0_dma_addr);
-
- if (mhop_num == 2) {
- for (i = 0; i < mr->l0_chunk_last_num; i++) {
- if (i == mr->l0_chunk_last_num - 1) {
- npages_allocated =
- i * (pbl_bt_sz / BA_BYTE_LEN);
-
- dma_free_coherent(dev,
- (npages - npages_allocated) * BA_BYTE_LEN,
- mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
-
- break;
- }
-
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
- }
- } else if (mhop_num == 3) {
- for (i = 0; i < mr->l0_chunk_last_num; i++) {
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
-
- for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
- bt_idx = i * (pbl_bt_sz / BA_BYTE_LEN) + j;
-
- if ((i == mr->l0_chunk_last_num - 1)
- && j == mr->l1_chunk_last_num - 1) {
- npages_allocated = bt_idx *
- (pbl_bt_sz / BA_BYTE_LEN);
-
- dma_free_coherent(dev,
- (npages - npages_allocated) *
- BA_BYTE_LEN,
- mr->pbl_bt_l2[bt_idx],
- mr->pbl_l2_dma_addr[bt_idx]);
-
- break;
- }
-
- dma_free_coherent(dev, pbl_bt_sz,
- mr->pbl_bt_l2[bt_idx],
- mr->pbl_l2_dma_addr[bt_idx]);
- }
- }
- }
-
- kfree(mr->pbl_bt_l1);
- kfree(mr->pbl_l1_dma_addr);
- mr->pbl_bt_l1 = NULL;
- mr->pbl_l1_dma_addr = NULL;
- if (mhop_num == 3) {
- kfree(mr->pbl_bt_l2);
- kfree(mr->pbl_l2_dma_addr);
- mr->pbl_bt_l2 = NULL;
- mr->pbl_l2_dma_addr = NULL;
- }
+ hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
}
static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr)
{
- struct device *dev = hr_dev->dev;
- int npages = 0;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
if (mr->enabled) {
@@ -712,27 +156,12 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
key_to_hw_index(mr->key) &
(hr_dev->caps.num_mtpts - 1));
if (ret)
- dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
+ ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n",
+ ret);
}
- if (mr->size != ~0ULL) {
- if (mr->type == MR_TYPE_MR)
- npages = ib_umem_page_count(mr->umem);
-
- if (!hr_dev->caps.pbl_hop_num)
- dma_free_coherent(dev,
- (unsigned int)(npages * BA_BYTE_LEN),
- mr->pbl_buf, mr->pbl_dma_addr);
- else
- hns_roce_mhop_free(hr_dev, mr);
- }
-
- if (mr->enabled)
- hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
- key_to_hw_index(mr->key));
-
- hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
- key_to_hw_index(mr->key), BITMAP_NO_RR);
+ free_mr_pbl(hr_dev, mr);
+ free_mr_key(hr_dev, mr);
}
static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
@@ -742,18 +171,12 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
unsigned long mtpt_idx = key_to_hw_index(mr->key);
struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
-
- /* Prepare HEM entry memory */
- ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
- if (ret)
- return ret;
/* Allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
- goto err_table;
+ return ret;
}
if (mr->type != MR_TYPE_FRMR)
@@ -780,137 +203,6 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
err_page:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-err_table:
- hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
- return ret;
-}
-
-static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, u32 start_index,
- u32 npages, u64 *page_list)
-{
- struct hns_roce_hem_table *table;
- dma_addr_t dma_handle;
- __le64 *mtts;
- u32 bt_page_size;
- u32 i;
-
- switch (mtt->mtt_type) {
- case MTT_TYPE_WQE:
- table = &hr_dev->mr_table.mtt_table;
- bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_CQE:
- table = &hr_dev->mr_table.mtt_cqe_table;
- bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_SRQWQE:
- table = &hr_dev->mr_table.mtt_srqwqe_table;
- bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_IDX:
- table = &hr_dev->mr_table.mtt_idx_table;
- bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
- break;
- default:
- return -EINVAL;
- }
-
- /* All MTTs must fit in the same page */
- if (start_index / (bt_page_size / sizeof(u64)) !=
- (start_index + npages - 1) / (bt_page_size / sizeof(u64)))
- return -EINVAL;
-
- if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
- return -EINVAL;
-
- mtts = hns_roce_table_find(hr_dev, table,
- mtt->first_seg +
- start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
- &dma_handle);
- if (!mtts)
- return -ENOMEM;
-
- /* Save page addr, low 12 bits : 0 */
- for (i = 0; i < npages; ++i) {
- if (!hr_dev->caps.mtt_hop_num)
- mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT);
- else
- mtts[i] = cpu_to_le64(page_list[i]);
- }
-
- return 0;
-}
-
-static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, u32 start_index,
- u32 npages, u64 *page_list)
-{
- int chunk;
- int ret;
- u32 bt_page_size;
-
- if (mtt->order < 0)
- return -EINVAL;
-
- switch (mtt->mtt_type) {
- case MTT_TYPE_WQE:
- bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_CQE:
- bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_SRQWQE:
- bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_IDX:
- bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
- break;
- default:
- dev_err(hr_dev->dev,
- "Unsupport mtt type %d, write mtt failed\n",
- mtt->mtt_type);
- return -EINVAL;
- }
-
- while (npages > 0) {
- chunk = min_t(int, bt_page_size / sizeof(u64), npages);
-
- ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
- page_list);
- if (ret)
- return ret;
-
- npages -= chunk;
- start_index += chunk;
- page_list += chunk;
- }
-
- return 0;
-}
-
-int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
-{
- u64 *page_list;
- int ret;
- u32 i;
-
- page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
- if (!page_list)
- return -ENOMEM;
-
- for (i = 0; i < buf->npages; ++i) {
- if (buf->nbufs == 1)
- page_list[i] = buf->direct.map + (i << buf->page_shift);
- else
- page_list[i] = buf->page_list[i].map;
-
- }
- ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
-
- kfree(page_list);
-
return ret;
}
@@ -923,50 +215,6 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
hr_dev->caps.num_mtpts,
hr_dev->caps.num_mtpts - 1,
hr_dev->caps.reserved_mrws, 0);
- if (ret)
- return ret;
-
- ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
- ilog2(hr_dev->caps.num_mtt_segs));
- if (ret)
- goto err_buddy;
-
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
- ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
- ilog2(hr_dev->caps.num_cqe_segs));
- if (ret)
- goto err_buddy_cqe;
- }
-
- if (hr_dev->caps.num_srqwqe_segs) {
- ret = hns_roce_buddy_init(&mr_table->mtt_srqwqe_buddy,
- ilog2(hr_dev->caps.num_srqwqe_segs));
- if (ret)
- goto err_buddy_srqwqe;
- }
-
- if (hr_dev->caps.num_idx_segs) {
- ret = hns_roce_buddy_init(&mr_table->mtt_idx_buddy,
- ilog2(hr_dev->caps.num_idx_segs));
- if (ret)
- goto err_buddy_idx;
- }
-
- return 0;
-
-err_buddy_idx:
- if (hr_dev->caps.num_srqwqe_segs)
- hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy);
-
-err_buddy_srqwqe:
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
-
-err_buddy_cqe:
- hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
-
-err_buddy:
- hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
return ret;
}
@@ -974,30 +222,24 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- if (hr_dev->caps.num_idx_segs)
- hns_roce_buddy_cleanup(&mr_table->mtt_idx_buddy);
- if (hr_dev->caps.num_srqwqe_segs)
- hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy);
- hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
}
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_mr *mr;
int ret;
- mr = kmalloc(sizeof(*mr), GFP_KERNEL);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (mr == NULL)
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_DMA;
/* Allocate memory region key */
- ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
- ~0ULL, acc, 0, mr);
+ hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
+ ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, 0, acc);
if (ret)
goto err_free;
@@ -1006,203 +248,52 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
goto err_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
- mr->umem = NULL;
return &mr->ibmr;
-
err_mr:
- hns_roce_mr_free(to_hr_dev(pd->device), mr);
+ free_mr_key(hr_dev, mr);
err_free:
kfree(mr);
return ERR_PTR(ret);
}
-int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, struct ib_umem *umem)
-{
- struct device *dev = hr_dev->dev;
- struct sg_dma_page_iter sg_iter;
- unsigned int order;
- int npage = 0;
- int ret = 0;
- int i;
- u64 page_addr;
- u64 *pages;
- u32 bt_page_size;
- u32 n;
-
- switch (mtt->mtt_type) {
- case MTT_TYPE_WQE:
- order = hr_dev->caps.mtt_ba_pg_sz;
- break;
- case MTT_TYPE_CQE:
- order = hr_dev->caps.cqe_ba_pg_sz;
- break;
- case MTT_TYPE_SRQWQE:
- order = hr_dev->caps.srqwqe_ba_pg_sz;
- break;
- case MTT_TYPE_IDX:
- order = hr_dev->caps.idx_ba_pg_sz;
- break;
- default:
- dev_err(dev, "Unsupport mtt type %d, write mtt failed\n",
- mtt->mtt_type);
- return -EINVAL;
- }
-
- bt_page_size = 1 << (order + PAGE_SHIFT);
-
- pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
- if (!pages)
- return -ENOMEM;
-
- i = n = 0;
-
- for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
- page_addr = sg_page_iter_dma_address(&sg_iter);
- if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
- if (page_addr & ((1 << mtt->page_shift) - 1)) {
- dev_err(dev,
- "page_addr is not page_shift %d alignment!\n",
- mtt->page_shift);
- ret = -EINVAL;
- goto out;
- }
- pages[i++] = page_addr;
- }
- npage++;
- if (i == bt_page_size / sizeof(u64)) {
- ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
- if (ret)
- goto out;
- n += i;
- i = 0;
- }
- }
-
- if (i)
- ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
-
-out:
- free_pages((unsigned long) pages, order);
- return ret;
-}
-
-static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr,
- struct ib_umem *umem)
-{
- struct sg_dma_page_iter sg_iter;
- int i = 0, j = 0;
- u64 page_addr;
- u32 pbl_bt_sz;
-
- if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
- return 0;
-
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
- page_addr = sg_page_iter_dma_address(&sg_iter);
- if (!hr_dev->caps.pbl_hop_num) {
- /* for hip06, page addr is aligned to 4K */
- mr->pbl_buf[i++] = page_addr >> 12;
- } else if (hr_dev->caps.pbl_hop_num == 1) {
- mr->pbl_buf[i++] = page_addr;
- } else {
- if (hr_dev->caps.pbl_hop_num == 2)
- mr->pbl_bt_l1[i][j] = page_addr;
- else if (hr_dev->caps.pbl_hop_num == 3)
- mr->pbl_bt_l2[i][j] = page_addr;
-
- j++;
- if (j >= (pbl_bt_sz / BA_BYTE_LEN)) {
- i++;
- j = 0;
- }
- }
- }
-
- /* Memory barrier */
- mb();
-
- return 0;
-}
-
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
- struct device *dev = hr_dev->dev;
struct hns_roce_mr *mr;
- int bt_size;
int ret;
- int n;
- int i;
- mr = kmalloc(sizeof(*mr), GFP_KERNEL);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(pd->device, start, length, access_flags);
- if (IS_ERR(mr->umem)) {
- ret = PTR_ERR(mr->umem);
- goto err_free;
- }
-
- n = ib_umem_page_count(mr->umem);
-
- if (!hr_dev->caps.pbl_hop_num) {
- if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
- dev_err(dev,
- " MR len %lld err. MR is limited to 4G at most!\n",
- length);
- ret = -EINVAL;
- goto err_umem;
- }
- } else {
- u64 pbl_size = 1;
-
- bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) /
- BA_BYTE_LEN;
- for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
- pbl_size *= bt_size;
- if (n > pbl_size) {
- dev_err(dev,
- " MR len %lld err. MR page num is limited to %lld!\n",
- length, pbl_size);
- ret = -EINVAL;
- goto err_umem;
- }
- }
-
mr->type = MR_TYPE_MR;
-
- ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
- access_flags, n, mr);
+ ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, virt_addr, length,
+ access_flags);
if (ret)
- goto err_umem;
+ goto err_alloc_mr;
- ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
+ ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, access_flags);
if (ret)
- goto err_mr;
+ goto err_alloc_key;
ret = hns_roce_mr_enable(hr_dev, mr);
if (ret)
- goto err_mr;
+ goto err_alloc_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+ mr->ibmr.length = length;
return &mr->ibmr;
-err_mr:
- hns_roce_mr_free(hr_dev, mr);
-
-err_umem:
- ib_umem_release(mr->umem);
-
-err_free:
+err_alloc_pbl:
+ free_mr_pbl(hr_dev, mr);
+err_alloc_key:
+ free_mr_key(hr_dev, mr);
+err_alloc_mr:
kfree(mr);
return ERR_PTR(ret);
}
@@ -1214,84 +305,36 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
u32 pdn, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr);
- struct device *dev = hr_dev->dev;
- int npages;
int ret;
- if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
-
- if (hr_dev->caps.pbl_hop_num)
- hns_roce_mhop_free(hr_dev, mr);
- else
- dma_free_coherent(dev, npages * 8,
- mr->pbl_buf, mr->pbl_dma_addr);
- }
- ib_umem_release(mr->umem);
-
- mr->umem = ib_umem_get(ibmr->device, start, length, mr_access_flags);
- if (IS_ERR(mr->umem)) {
- ret = PTR_ERR(mr->umem);
- mr->umem = NULL;
- return -ENOMEM;
- }
- npages = ib_umem_page_count(mr->umem);
-
- if (hr_dev->caps.pbl_hop_num) {
- ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
- if (ret)
- goto release_umem;
- } else {
- mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf) {
- ret = -ENOMEM;
- goto release_umem;
- }
+ free_mr_pbl(hr_dev, mr);
+ ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, mr_access_flags);
+ if (ret) {
+ ibdev_err(ibdev, "failed to create mr PBL, ret = %d.\n", ret);
+ return ret;
}
ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
mr_access_flags, virt_addr,
length, mailbox->buf);
- if (ret)
- goto release_umem;
-
-
- ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
if (ret) {
- if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
-
- if (hr_dev->caps.pbl_hop_num)
- hns_roce_mhop_free(hr_dev, mr);
- else
- dma_free_coherent(dev, npages * 8,
- mr->pbl_buf,
- mr->pbl_dma_addr);
- }
-
- goto release_umem;
+ ibdev_err(ibdev, "failed to write mtpt, ret = %d.\n", ret);
+ free_mr_pbl(hr_dev, mr);
}
- return 0;
-
-release_umem:
- ib_umem_release(mr->umem);
return ret;
-
}
-
int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct ib_device *ib_dev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct hns_roce_cmd_mailbox *mailbox;
- struct device *dev = hr_dev->dev;
unsigned long mtpt_idx;
u32 pdn = 0;
int ret;
@@ -1312,7 +355,7 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
if (ret)
- dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
+ ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
mr->enabled = 0;
@@ -1336,8 +379,7 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
if (ret) {
- dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
- ib_umem_release(mr->umem);
+ ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
goto free_cmd_mbox;
}
@@ -1365,8 +407,6 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
} else {
hns_roce_mr_free(hr_dev, mr);
-
- ib_umem_release(mr->umem);
kfree(mr);
}
@@ -1380,12 +420,8 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
struct device *dev = hr_dev->dev;
struct hns_roce_mr *mr;
u64 length;
- u32 page_size;
int ret;
- page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT);
- length = max_num_sg * page_size;
-
if (mr_type != IB_MR_TYPE_MEM_REG)
return ERR_PTR(-EINVAL);
@@ -1402,23 +438,28 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
mr->type = MR_TYPE_FRMR;
/* Allocate memory region key */
- ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length,
- 0, max_num_sg, mr);
+ length = max_num_sg * (1 << PAGE_SHIFT);
+ ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, length, 0);
if (ret)
goto err_free;
+ ret = alloc_mr_pbl(hr_dev, mr, length, NULL, 0, 0);
+ if (ret)
+ goto err_key;
+
ret = hns_roce_mr_enable(hr_dev, mr);
if (ret)
- goto err_mr;
+ goto err_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
- mr->umem = NULL;
+ mr->ibmr.length = length;
return &mr->ibmr;
-err_mr:
- hns_roce_mr_free(to_hr_dev(pd->device), mr);
-
+err_key:
+ free_mr_key(hr_dev, mr);
+err_pbl:
+ free_mr_pbl(hr_dev, mr);
err_free:
kfree(mr);
return ERR_PTR(ret);
@@ -1428,19 +469,54 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
{
struct hns_roce_mr *mr = to_hr_mr(ibmr);
- mr->pbl_buf[mr->npages++] = addr;
+ if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
+ mr->page_list[mr->npages++] = addr;
+ return 0;
+ }
- return 0;
+ return -ENOBUFS;
}
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ struct hns_roce_mtr *mtr = &mr->pbl_mtr;
+ int ret = 0;
mr->npages = 0;
+ mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!mr->page_list)
+ return ret;
+
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+ if (ret < 1) {
+ ibdev_err(ibdev, "failed to store sg pages %d %d, cnt = %d.\n",
+ mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
+ goto err_page_list;
+ }
+
+ mtr->hem_cfg.region[0].offset = 0;
+ mtr->hem_cfg.region[0].count = mr->npages;
+ mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
+ mtr->hem_cfg.region_count = 1;
+ ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
+ if (ret) {
+ ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
+ ret = 0;
+ } else {
+ mr->pbl_mtr.hem_cfg.buf_pg_shift = ilog2(ibmr->page_size);
+ ret = mr->npages;
+ }
+
+err_page_list:
+ kvfree(mr->page_list);
+ mr->page_list = NULL;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+ return ret;
}
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
@@ -1564,32 +640,23 @@ int hns_roce_dealloc_mw(struct ib_mw *ibmw)
return 0;
}
-void hns_roce_mtr_init(struct hns_roce_mtr *mtr, int bt_pg_shift,
- int buf_pg_shift)
-{
- hns_roce_hem_list_init(&mtr->hem_list, bt_pg_shift);
- mtr->buf_pg_shift = buf_pg_shift;
-}
-
-void hns_roce_mtr_cleanup(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtr *mtr)
-{
- hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
-}
-
-static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtr *mtr, dma_addr_t *bufs,
- struct hns_roce_buf_region *r)
+static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, struct hns_roce_buf_region *region)
{
+ __le64 *mtts;
int offset;
int count;
int npage;
- u64 *mtts;
+ u64 addr;
int end;
int i;
- offset = r->offset;
- end = offset + r->count;
+ /* if hopnum is 0, buffer cannot store BAs, so skip write mtt */
+ if (!region->hopnum)
+ return 0;
+
+ offset = region->offset;
+ end = offset + region->count;
npage = 0;
while (offset < end) {
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
@@ -1597,13 +664,13 @@ static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev,
if (!mtts)
return -ENOBUFS;
- /* Save page addr, low 12 bits : 0 */
for (i = 0; i < count; i++) {
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- mtts[i] = bufs[npage] >> PAGE_ADDR_SHIFT;
+ addr = to_hr_hw_page_addr(pages[npage]);
else
- mtts[i] = bufs[npage];
+ addr = pages[npage];
+ mtts[i] = cpu_to_le64(addr);
npage++;
}
offset += count;
@@ -1612,69 +679,416 @@ static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev,
return 0;
}
-int hns_roce_mtr_attach(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- dma_addr_t **bufs, struct hns_roce_buf_region *regions,
- int region_cnt)
+static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
{
- struct hns_roce_buf_region *r;
- int ret;
int i;
- ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list, regions,
- region_cnt);
- if (ret)
- return ret;
+ for (i = 0; i < attr->region_count; i++)
+ if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
+ attr->region[i].hopnum > 0)
+ return true;
- for (i = 0; i < region_cnt; i++) {
- r = &regions[i];
- ret = hns_roce_write_mtr(hr_dev, mtr, bufs[i], r);
+ /* because the mtr only one root base address, when hopnum is 0 means
+ * root base address equals the first buffer address, thus all alloced
+ * memory must in a continuous space accessed by direct mode.
+ */
+ return false;
+}
+
+static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < attr->region_count; i++)
+ size += attr->region[i].size;
+
+ return size;
+}
+
+static inline int mtr_umem_page_count(struct ib_umem *umem,
+ unsigned int page_shift)
+{
+ int count = ib_umem_page_count(umem);
+
+ if (page_shift >= PAGE_SHIFT)
+ count >>= page_shift - PAGE_SHIFT;
+ else
+ count <<= PAGE_SHIFT - page_shift;
+
+ return count;
+}
+
+static inline size_t mtr_kmem_direct_size(bool is_direct, size_t alloc_size,
+ unsigned int page_shift)
+{
+ if (is_direct)
+ return ALIGN(alloc_size, 1 << page_shift);
+ else
+ return HNS_HW_DIRECT_PAGE_COUNT << page_shift;
+}
+
+/*
+ * check the given pages in continuous address space
+ * Returns 0 on success, or the error page num.
+ */
+static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
+ unsigned int page_shift)
+{
+ size_t page_size = 1 << page_shift;
+ int i;
+
+ for (i = 1; i < page_count; i++)
+ if (pages[i] - pages[i - 1] != page_size)
+ return i;
+
+ return 0;
+}
+
+static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
+{
+ /* release user buffers */
+ if (mtr->umem) {
+ ib_umem_release(mtr->umem);
+ mtr->umem = NULL;
+ }
+
+ /* release kernel buffers */
+ if (mtr->kmem) {
+ hns_roce_buf_free(hr_dev, mtr->kmem);
+ kfree(mtr->kmem);
+ mtr->kmem = NULL;
+ }
+}
+
+static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr, bool is_direct,
+ struct ib_udata *udata, unsigned long user_addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned int max_pg_shift = buf_attr->page_shift;
+ unsigned int best_pg_shift = 0;
+ int all_pg_count = 0;
+ size_t direct_size;
+ size_t total_size;
+ unsigned long tmp;
+ int ret = 0;
+
+ total_size = mtr_bufs_size(buf_attr);
+ if (total_size < 1) {
+ ibdev_err(ibdev, "Failed to check mtr size\n");
+ return -EINVAL;
+ }
+
+ if (udata) {
+ mtr->kmem = NULL;
+ mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
+ buf_attr->user_access);
+ if (IS_ERR_OR_NULL(mtr->umem)) {
+ ibdev_err(ibdev, "Failed to get umem, ret %ld\n",
+ PTR_ERR(mtr->umem));
+ return -ENOMEM;
+ }
+ if (buf_attr->fixed_page) {
+ best_pg_shift = max_pg_shift;
+ } else {
+ tmp = GENMASK(max_pg_shift, 0);
+ ret = ib_umem_find_best_pgsz(mtr->umem, tmp, user_addr);
+ best_pg_shift = (ret <= PAGE_SIZE) ?
+ PAGE_SHIFT : ilog2(ret);
+ }
+ all_pg_count = mtr_umem_page_count(mtr->umem, best_pg_shift);
+ ret = 0;
+ } else {
+ mtr->umem = NULL;
+ mtr->kmem = kzalloc(sizeof(*mtr->kmem), GFP_KERNEL);
+ if (!mtr->kmem) {
+ ibdev_err(ibdev, "Failed to alloc kmem\n");
+ return -ENOMEM;
+ }
+ direct_size = mtr_kmem_direct_size(is_direct, total_size,
+ max_pg_shift);
+ ret = hns_roce_buf_alloc(hr_dev, total_size, direct_size,
+ mtr->kmem, max_pg_shift);
if (ret) {
- dev_err(hr_dev->dev,
- "write mtr[%d/%d] err %d,offset=%d.\n",
- i, region_cnt, ret, r->offset);
- goto err_write;
+ ibdev_err(ibdev, "Failed to alloc kmem, ret %d\n", ret);
+ goto err_alloc_mem;
+ } else {
+ best_pg_shift = max_pg_shift;
+ all_pg_count = mtr->kmem->npages;
}
}
- return 0;
+ /* must bigger than minimum hardware page shift */
+ if (best_pg_shift < HNS_HW_PAGE_SHIFT || all_pg_count < 1) {
+ ret = -EINVAL;
+ ibdev_err(ibdev, "Failed to check mtr page shift %d count %d\n",
+ best_pg_shift, all_pg_count);
+ goto err_alloc_mem;
+ }
-err_write:
- hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+ mtr->hem_cfg.buf_pg_shift = best_pg_shift;
+ mtr->hem_cfg.buf_pg_count = all_pg_count;
+ return 0;
+err_alloc_mem:
+ mtr_free_bufs(hr_dev, mtr);
return ret;
}
+static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, int count, unsigned int page_shift)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int npage;
+ int err;
+
+ if (mtr->umem)
+ npage = hns_roce_get_umem_bufs(hr_dev, pages, count, 0,
+ mtr->umem, page_shift);
+ else
+ npage = hns_roce_get_kmem_bufs(hr_dev, pages, count, 0,
+ mtr->kmem);
+
+ if (mtr->hem_cfg.is_direct && npage > 1) {
+ err = mtr_check_direct_pages(pages, npage, page_shift);
+ if (err) {
+ ibdev_err(ibdev, "Failed to check %s direct page-%d\n",
+ mtr->umem ? "user" : "kernel", err);
+ npage = err;
+ }
+ }
+
+ return npage;
+}
+
+int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, int page_cnt)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_region *r;
+ int err;
+ int i;
+
+ for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+ r = &mtr->hem_cfg.region[i];
+ if (r->offset + r->count > page_cnt) {
+ err = -EINVAL;
+ ibdev_err(ibdev,
+ "Failed to check mtr%d end %d + %d, max %d\n",
+ i, r->offset, r->count, page_cnt);
+ return err;
+ }
+
+ err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
+ if (err) {
+ ibdev_err(ibdev,
+ "Failed to map mtr%d offset %d, err %d\n",
+ i, r->offset, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
{
- u64 *mtts = mtt_buf;
int mtt_count;
int total = 0;
- u64 *addr;
+ __le64 *mtts;
int npage;
+ u64 addr;
int left;
- if (mtts == NULL || mtt_max < 1)
+ if (!mtt_buf || mtt_max < 1)
goto done;
+ /* no mtt memory in direct mode, so just return the buffer address */
+ if (mtr->hem_cfg.is_direct) {
+ npage = offset;
+ for (total = 0; total < mtt_max; total++, npage++) {
+ addr = mtr->hem_cfg.root_ba +
+ (npage << mtr->hem_cfg.buf_pg_shift);
+
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
+ mtt_buf[total] = to_hr_hw_page_addr(addr);
+ else
+ mtt_buf[total] = addr;
+ }
+
+ goto done;
+ }
+
left = mtt_max;
while (left > 0) {
mtt_count = 0;
- addr = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
+ mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
offset + total,
&mtt_count, NULL);
- if (!addr || !mtt_count)
+ if (!mtts || !mtt_count)
goto done;
npage = min(mtt_count, left);
- memcpy(&mtts[total], addr, BA_BYTE_LEN * npage);
left -= npage;
- total += npage;
+ for (mtt_count = 0; mtt_count < npage; mtt_count++)
+ mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
}
done:
if (base_addr)
- *base_addr = mtr->hem_list.root_ba;
+ *base_addr = mtr->hem_cfg.root_ba;
return total;
}
+
+/* convert buffer size to page index and page count */
+static unsigned int mtr_init_region(struct hns_roce_buf_attr *attr,
+ int page_cnt,
+ struct hns_roce_buf_region *regions,
+ int region_cnt, unsigned int page_shift)
+{
+ unsigned int page_size = 1 << page_shift;
+ int max_region = attr->region_count;
+ struct hns_roce_buf_region *r;
+ unsigned int i = 0;
+ int page_idx = 0;
+
+ for (; i < region_cnt && i < max_region && page_idx < page_cnt; i++) {
+ r = &regions[i];
+ r->hopnum = attr->region[i].hopnum == HNS_ROCE_HOP_NUM_0 ?
+ 0 : attr->region[i].hopnum;
+ r->offset = page_idx;
+ r->count = DIV_ROUND_UP(attr->region[i].size, page_size);
+ page_idx += r->count;
+ }
+
+ return i;
+}
+
+/**
+ * hns_roce_mtr_create - Create hns memory translate region.
+ *
+ * @mtr: memory translate region
+ * @init_attr: init attribute for creating mtr
+ * @page_shift: page shift for multi-hop base address table
+ * @udata: user space context, if it's NULL, means kernel space
+ * @user_addr: userspace virtual address to start at
+ * @buf_alloced: mtr has private buffer, true means need to alloc
+ */
+int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int page_shift, struct ib_udata *udata,
+ unsigned long user_addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t *pages = NULL;
+ int region_cnt = 0;
+ int all_pg_cnt;
+ int get_pg_cnt;
+ bool has_mtt;
+ int err = 0;
+
+ has_mtt = mtr_has_mtt(buf_attr);
+ /* if buffer only need mtt, just init the hem cfg */
+ if (buf_attr->mtt_only) {
+ mtr->hem_cfg.buf_pg_shift = buf_attr->page_shift;
+ mtr->hem_cfg.buf_pg_count = mtr_bufs_size(buf_attr) >>
+ buf_attr->page_shift;
+ mtr->umem = NULL;
+ mtr->kmem = NULL;
+ } else {
+ err = mtr_alloc_bufs(hr_dev, mtr, buf_attr, !has_mtt, udata,
+ user_addr);
+ if (err) {
+ ibdev_err(ibdev, "Failed to alloc mtr bufs, err %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /* alloc mtt memory */
+ all_pg_cnt = mtr->hem_cfg.buf_pg_count;
+ hns_roce_hem_list_init(&mtr->hem_list);
+ mtr->hem_cfg.is_direct = !has_mtt;
+ mtr->hem_cfg.ba_pg_shift = page_shift;
+ mtr->hem_cfg.region_count = 0;
+ region_cnt = mtr_init_region(buf_attr, all_pg_cnt,
+ mtr->hem_cfg.region,
+ ARRAY_SIZE(mtr->hem_cfg.region),
+ mtr->hem_cfg.buf_pg_shift);
+ if (region_cnt < 1) {
+ err = -ENOBUFS;
+ ibdev_err(ibdev, "failed to init mtr region %d\n", region_cnt);
+ goto err_alloc_bufs;
+ }
+
+ mtr->hem_cfg.region_count = region_cnt;
+
+ if (has_mtt) {
+ err = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
+ mtr->hem_cfg.region, region_cnt,
+ page_shift);
+ if (err) {
+ ibdev_err(ibdev, "Failed to request mtr hem, err %d\n",
+ err);
+ goto err_alloc_bufs;
+ }
+ mtr->hem_cfg.root_ba = mtr->hem_list.root_ba;
+ }
+
+ /* no buffer to map */
+ if (buf_attr->mtt_only)
+ return 0;
+
+ /* alloc a tmp array to store buffer's dma address */
+ pages = kvcalloc(all_pg_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!pages) {
+ err = -ENOMEM;
+ ibdev_err(ibdev, "Failed to alloc mtr page list %d\n",
+ all_pg_cnt);
+ goto err_alloc_hem_list;
+ }
+
+ get_pg_cnt = mtr_get_pages(hr_dev, mtr, pages, all_pg_cnt,
+ mtr->hem_cfg.buf_pg_shift);
+ if (get_pg_cnt != all_pg_cnt) {
+ ibdev_err(ibdev, "Failed to get mtr page %d != %d\n",
+ get_pg_cnt, all_pg_cnt);
+ err = -ENOBUFS;
+ goto err_alloc_page_list;
+ }
+
+ if (!has_mtt) {
+ mtr->hem_cfg.root_ba = pages[0];
+ } else {
+ /* write buffer's dma address to BA table */
+ err = hns_roce_mtr_map(hr_dev, mtr, pages, all_pg_cnt);
+ if (err) {
+ ibdev_err(ibdev, "Failed to map mtr pages, err %d\n",
+ err);
+ goto err_alloc_page_list;
+ }
+ }
+
+ /* drop tmp array */
+ kvfree(pages);
+ return 0;
+err_alloc_page_list:
+ kvfree(pages);
+err_alloc_hem_list:
+ hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+err_alloc_bufs:
+ mtr_free_bufs(hr_dev, mtr);
+ return err;
+}
+
+void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
+{
+ /* release multi-hop addressing resource */
+ hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+
+ /* free buffers */
+ mtr_free_bufs(hr_dev, mtr);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 6317901c4b4f..a0a47bd66975 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -355,16 +355,16 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR);
}
-static int set_rq_size(struct hns_roce_dev *hr_dev,
- struct ib_qp_cap *cap, bool is_user, int has_rq,
- struct hns_roce_qp *hr_qp)
+static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+ struct hns_roce_qp *hr_qp, int has_rq)
{
- u32 max_cnt;
+ u32 cnt;
/* If srq exist, set zero for relative number of rq */
if (!has_rq) {
hr_qp->rq.wqe_cnt = 0;
hr_qp->rq.max_gs = 0;
+ hr_qp->rq_inl_buf.wqe_cnt = 0;
cap->max_recv_wr = 0;
cap->max_recv_sge = 0;
@@ -379,17 +379,15 @@ static int set_rq_size(struct hns_roce_dev *hr_dev,
return -EINVAL;
}
- max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
-
- hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
- if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
+ cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
+ if (cnt > hr_dev->caps.max_wqes) {
ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
cap->max_recv_wr);
return -EINVAL;
}
- max_cnt = max(1U, cap->max_recv_sge);
- hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
+ hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
+ HNS_ROCE_RESERVED_SGE);
if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
@@ -397,8 +395,57 @@ static int set_rq_size(struct hns_roce_dev *hr_dev,
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
hr_qp->rq.max_gs);
- cap->max_recv_wr = hr_qp->rq.wqe_cnt;
- cap->max_recv_sge = hr_qp->rq.max_gs;
+ hr_qp->rq.wqe_cnt = cnt;
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ hr_qp->rq_inl_buf.wqe_cnt = cnt;
+ else
+ hr_qp->rq_inl_buf.wqe_cnt = 0;
+
+ cap->max_recv_wr = cnt;
+ cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE;
+
+ return 0;
+}
+
+static int set_extend_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
+ struct hns_roce_qp *hr_qp,
+ struct ib_qp_cap *cap)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 cnt;
+
+ cnt = max(1U, cap->max_send_sge);
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+ hr_qp->sq.max_gs = roundup_pow_of_two(cnt);
+ hr_qp->sge.sge_cnt = 0;
+
+ return 0;
+ }
+
+ hr_qp->sq.max_gs = cnt;
+
+ /* UD sqwqe's sge use extend sge */
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
+ hr_qp->ibqp.qp_type == IB_QPT_UD) {
+ cnt = roundup_pow_of_two(sq_wqe_cnt * hr_qp->sq.max_gs);
+ } else if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) {
+ cnt = roundup_pow_of_two(sq_wqe_cnt *
+ (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE));
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
+ if (cnt > hr_dev->caps.max_extend_sg) {
+ ibdev_err(ibdev,
+ "failed to check exSGE num, exSGE num = %d.\n",
+ cnt);
+ return -EINVAL;
+ }
+ }
+ } else {
+ cnt = 0;
+ }
+
+ hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
+ hr_qp->sge.sge_cnt = cnt;
return 0;
}
@@ -430,174 +477,79 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd)
{
- u32 ex_sge_num;
- u32 page_size;
- u32 max_cnt;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 cnt = 0;
int ret;
- if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) ||
- hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes)
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
+ cnt > hr_dev->caps.max_wqes)
return -EINVAL;
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) {
- ibdev_err(&hr_dev->ib_dev, "Failed to check user SQ size limit\n");
+ ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
+ ret);
return ret;
}
- hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
-
- max_cnt = max(1U, cap->max_send_sge);
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
- else
- hr_qp->sq.max_gs = max_cnt;
-
- if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
- hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
- (hr_qp->sq.max_gs - 2));
-
- if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE &&
- hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
- if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
- ibdev_err(&hr_dev->ib_dev,
- "Failed to check extended SGE size limit %d\n",
- hr_qp->sge.sge_cnt);
- return -EINVAL;
- }
- }
-
- hr_qp->sge.sge_shift = 4;
- ex_sge_num = hr_qp->sge.sge_cnt;
+ ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
+ if (ret)
+ return ret;
- /* Get buf size, SQ and RQ are aligned to page_szie */
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
- hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
- hr_qp->rq.wqe_shift), PAGE_SIZE) +
- round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), PAGE_SIZE);
-
- hr_qp->sq.offset = 0;
- hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), PAGE_SIZE);
- } else {
- page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- hr_qp->sge.sge_cnt = ex_sge_num ?
- max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
- hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
- hr_qp->rq.wqe_shift), page_size) +
- round_up((hr_qp->sge.sge_cnt <<
- hr_qp->sge.sge_shift), page_size) +
- round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), page_size);
-
- hr_qp->sq.offset = 0;
- if (ex_sge_num) {
- hr_qp->sge.offset = round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift),
- page_size);
- hr_qp->rq.offset = hr_qp->sge.offset +
- round_up((hr_qp->sge.sge_cnt <<
- hr_qp->sge.sge_shift),
- page_size);
- } else {
- hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift),
- page_size);
- }
- }
+ hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+ hr_qp->sq.wqe_cnt = cnt;
return 0;
}
-static int split_wqe_buf_region(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_buf_region *regions,
- int region_max, int page_shift)
+static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_buf_attr *buf_attr)
{
- int page_size = 1 << page_shift;
- bool is_extend_sge;
- int region_cnt = 0;
int buf_size;
- int buf_cnt;
+ int idx = 0;
- if (hr_qp->buff_size < 1 || region_max < 1)
- return region_cnt;
+ hr_qp->buff_size = 0;
- if (hr_qp->sge.sge_cnt > 0)
- is_extend_sge = true;
- else
- is_extend_sge = false;
-
- /* sq region */
- if (is_extend_sge)
- buf_size = hr_qp->sge.offset - hr_qp->sq.offset;
- else
- buf_size = hr_qp->rq.offset - hr_qp->sq.offset;
-
- if (buf_size > 0 && region_cnt < region_max) {
- buf_cnt = DIV_ROUND_UP(buf_size, page_size);
- hns_roce_init_buf_region(&regions[region_cnt],
- hr_dev->caps.wqe_sq_hop_num,
- hr_qp->sq.offset / page_size,
- buf_cnt);
- region_cnt++;
- }
-
- /* sge region */
- if (is_extend_sge) {
- buf_size = hr_qp->rq.offset - hr_qp->sge.offset;
- if (buf_size > 0 && region_cnt < region_max) {
- buf_cnt = DIV_ROUND_UP(buf_size, page_size);
- hns_roce_init_buf_region(&regions[region_cnt],
- hr_dev->caps.wqe_sge_hop_num,
- hr_qp->sge.offset / page_size,
- buf_cnt);
- region_cnt++;
- }
- }
-
- /* rq region */
- buf_size = hr_qp->buff_size - hr_qp->rq.offset;
- if (buf_size > 0) {
- buf_cnt = DIV_ROUND_UP(buf_size, page_size);
- hns_roce_init_buf_region(&regions[region_cnt],
- hr_dev->caps.wqe_rq_hop_num,
- hr_qp->rq.offset / page_size,
- buf_cnt);
- region_cnt++;
- }
-
- return region_cnt;
-}
-
-static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp)
-{
- struct device *dev = hr_dev->dev;
-
- if (hr_qp->sq.max_gs > 2) {
- hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
- (hr_qp->sq.max_gs - 2));
- hr_qp->sge.sge_shift = 4;
- }
-
- /* ud sqwqe's sge use extend sge */
- if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
- hr_qp->ibqp.qp_type == IB_QPT_GSI) {
- hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
- hr_qp->sq.max_gs);
- hr_qp->sge.sge_shift = 4;
- }
+ /* SQ WQE */
+ hr_qp->sq.offset = 0;
+ buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
+ hr_qp->sq.wqe_shift);
+ if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
+ buf_attr->region[idx].size = buf_size;
+ buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
+ idx++;
+ hr_qp->buff_size += buf_size;
+ }
+
+ /* extend SGE WQE in SQ */
+ hr_qp->sge.offset = hr_qp->buff_size;
+ buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
+ hr_qp->sge.sge_shift);
+ if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
+ buf_attr->region[idx].size = buf_size;
+ buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
+ idx++;
+ hr_qp->buff_size += buf_size;
+ }
+
+ /* RQ WQE */
+ hr_qp->rq.offset = hr_qp->buff_size;
+ buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
+ hr_qp->rq.wqe_shift);
+ if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
+ buf_attr->region[idx].size = buf_size;
+ buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
+ idx++;
+ hr_qp->buff_size += buf_size;
+ }
+
+ if (hr_qp->buff_size < 1)
+ return -EINVAL;
- if (hr_qp->sq.max_gs > 2 &&
- hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
- if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
- dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
- hr_qp->sge.sge_cnt);
- return -EINVAL;
- }
- }
+ buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
+ buf_attr->fixed_page = true;
+ buf_attr->region_count = idx;
return 0;
}
@@ -605,62 +557,35 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
{
- u32 page_size;
- u32 max_cnt;
- int size;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 cnt;
int ret;
if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
- ibdev_err(&hr_dev->ib_dev,
- "SQ WR or sge or inline data error!\n");
+ ibdev_err(ibdev,
+ "failed to check SQ WR, SGE or inline num, ret = %d.\n",
+ -EINVAL);
return -EINVAL;
}
- hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
-
- max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
-
- hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
- if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
- ibdev_err(&hr_dev->ib_dev,
- "while setting kernel sq size, sq.wqe_cnt too large\n");
+ cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
+ if (cnt > hr_dev->caps.max_wqes) {
+ ibdev_err(ibdev, "failed to check WQE num, WQE num = %d.\n",
+ cnt);
return -EINVAL;
}
- /* Get data_seg numbers */
- max_cnt = max(1U, cap->max_send_sge);
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
- else
- hr_qp->sq.max_gs = max_cnt;
+ hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
+ hr_qp->sq.wqe_cnt = cnt;
- ret = set_extend_sge_param(hr_dev, hr_qp);
- if (ret) {
- ibdev_err(&hr_dev->ib_dev, "set extend sge parameters fail\n");
+ ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
+ if (ret)
return ret;
- }
- /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
- page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- hr_qp->sq.offset = 0;
- size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size);
-
- if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && hr_qp->sge.sge_cnt) {
- hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
- (u32)hr_qp->sge.sge_cnt);
- hr_qp->sge.offset = size;
- size += round_up(hr_qp->sge.sge_cnt << hr_qp->sge.sge_shift,
- page_size);
- }
-
- hr_qp->rq.offset = size;
- size += round_up((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), page_size);
- hr_qp->buff_size = size;
-
- /* Get wr and sge number which send */
- cap->max_send_wr = hr_qp->sq.wqe_cnt;
+ /* sync the parameters of kernel QP to user's configuration */
+ cap->max_send_wr = cnt;
cap->max_send_sge = hr_qp->sq.max_gs;
/* We don't support inline sends for kernel QPs (yet) */
@@ -691,8 +616,8 @@ static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr)
{
u32 max_recv_sge = init_attr->cap.max_recv_sge;
+ u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
struct hns_roce_rinl_wqe *wqe_list;
- u32 wqe_cnt = hr_qp->rq.wqe_cnt;
int i;
/* allocate recv inline buf */
@@ -714,7 +639,6 @@ static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
hr_qp->rq_inl_buf.wqe_list = wqe_list;
- hr_qp->rq_inl_buf.wqe_cnt = wqe_cnt;
return 0;
@@ -727,140 +651,55 @@ err:
static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
{
- kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+ if (hr_qp->rq_inl_buf.wqe_list)
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
kfree(hr_qp->rq_inl_buf.wqe_list);
}
-static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
- u32 page_shift, bool is_user)
-{
-/* WQE buffer include 3 parts: SQ, extend SGE and RQ. */
-#define HNS_ROCE_WQE_REGION_MAX 3
- struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX] = {};
- dma_addr_t *buf_list[HNS_ROCE_WQE_REGION_MAX] = {};
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_buf_region *r;
- int region_count;
- int buf_count;
- int ret;
- int i;
-
- region_count = split_wqe_buf_region(hr_dev, hr_qp, regions,
- ARRAY_SIZE(regions), page_shift);
-
- /* alloc a tmp list to store WQE buffers address */
- ret = hns_roce_alloc_buf_list(regions, buf_list, region_count);
- if (ret) {
- ibdev_err(ibdev, "Failed to alloc WQE buffer list\n");
- return ret;
- }
-
- for (i = 0; i < region_count; i++) {
- r = &regions[i];
- if (is_user)
- buf_count = hns_roce_get_umem_bufs(hr_dev, buf_list[i],
- r->count, r->offset, hr_qp->umem,
- page_shift);
- else
- buf_count = hns_roce_get_kmem_bufs(hr_dev, buf_list[i],
- r->count, r->offset, &hr_qp->hr_buf);
-
- if (buf_count != r->count) {
- ibdev_err(ibdev, "Failed to get %s WQE buf, expect %d = %d.\n",
- is_user ? "user" : "kernel",
- r->count, buf_count);
- ret = -ENOBUFS;
- goto done;
- }
- }
-
- hr_qp->wqe_bt_pg_shift = hr_dev->caps.mtt_ba_pg_sz;
- hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift,
- page_shift);
- ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, regions,
- region_count);
- if (ret)
- ibdev_err(ibdev, "Failed to attach WQE's mtr\n");
-
- goto done;
-
- hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
-done:
- hns_roce_free_buf_list(buf_list, region_count);
-
- return ret;
-}
-
static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, unsigned long addr)
{
- u32 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
struct ib_device *ibdev = &hr_dev->ib_dev;
- bool is_rq_buf_inline;
+ struct hns_roce_buf_attr buf_attr = {};
int ret;
- is_rq_buf_inline = (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hns_roce_qp_has_rq(init_attr);
- if (is_rq_buf_inline) {
+ if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
ret = alloc_rq_inline_buf(hr_qp, init_attr);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc inline RQ buffer\n");
+ ibdev_err(ibdev,
+ "failed to alloc inline buf, ret = %d.\n",
+ ret);
return ret;
}
- }
-
- if (udata) {
- hr_qp->umem = ib_umem_get(ibdev, addr, hr_qp->buff_size, 0);
- if (IS_ERR(hr_qp->umem)) {
- ret = PTR_ERR(hr_qp->umem);
- goto err_inline;
- }
} else {
- ret = hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
- (1 << page_shift) * 2,
- &hr_qp->hr_buf, page_shift);
- if (ret)
- goto err_inline;
+ hr_qp->rq_inl_buf.wqe_list = NULL;
}
- ret = map_wqe_buf(hr_dev, hr_qp, page_shift, udata);
- if (ret)
- goto err_alloc;
+ ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
+ goto err_inline;
+ }
+ ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
+ HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
+ udata, addr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
+ goto err_inline;
+ }
return 0;
-
err_inline:
- if (is_rq_buf_inline)
- free_rq_inline_buf(hr_qp);
-
-err_alloc:
- if (udata) {
- ib_umem_release(hr_qp->umem);
- hr_qp->umem = NULL;
- } else {
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
- }
-
- ibdev_err(ibdev, "Failed to alloc WQE buffer, ret %d.\n", ret);
+ free_rq_inline_buf(hr_qp);
return ret;
}
static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
- hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
- if (hr_qp->umem) {
- ib_umem_release(hr_qp->umem);
- hr_qp->umem = NULL;
- }
-
- if (hr_qp->hr_buf.nbufs > 0)
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
-
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hr_qp->rq.wqe_cnt)
- free_rq_inline_buf(hr_qp);
+ hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
+ free_rq_inline_buf(hr_qp);
}
static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
@@ -912,8 +751,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
"Failed to map user SQ doorbell\n");
goto err_out;
}
- hr_qp->sdb_en = 1;
- resp->cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
+ resp->cap_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
}
if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
@@ -924,8 +763,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
"Failed to map user RQ doorbell\n");
goto err_sdb;
}
- hr_qp->rdb_en = 1;
- resp->cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
+ resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
}
} else {
/* QP doorbell register address */
@@ -942,13 +781,13 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
goto err_out;
}
*hr_qp->rdb.db_record = 0;
- hr_qp->rdb_en = 1;
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
}
}
return 0;
err_sdb:
- if (udata && hr_qp->sdb_en)
+ if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
err_out:
return ret;
@@ -961,12 +800,12 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
udata, struct hns_roce_ucontext, ibucontext);
if (udata) {
- if (hr_qp->rdb_en)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
- if (hr_qp->sdb_en)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
} else {
- if (hr_qp->rdb_en)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_qp->rdb);
}
}
@@ -1003,8 +842,7 @@ err_sq:
return ret;
}
-static void free_kernel_wrid(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp)
+static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
{
kfree(hr_qp->rq.wrid);
kfree(hr_qp->sq.wrid);
@@ -1025,10 +863,11 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
else
hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
- ret = set_rq_size(hr_dev, &init_attr->cap, udata,
- hns_roce_qp_has_rq(init_attr), hr_qp);
+ ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
+ hns_roce_qp_has_rq(init_attr));
if (ret) {
- ibdev_err(ibdev, "Failed to set user RQ size\n");
+ ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
+ ret);
return ret;
}
@@ -1156,7 +995,7 @@ err_buf:
err_db:
free_qp_db(hr_dev, hr_qp, udata);
err_wrid:
- free_kernel_wrid(hr_dev, hr_qp);
+ free_kernel_wrid(hr_qp);
return ret;
}
@@ -1170,7 +1009,7 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
free_qpc(hr_dev, hr_qp);
free_qpn(hr_dev, hr_qp);
free_qp_buf(hr_dev, hr_qp);
- free_kernel_wrid(hr_dev, hr_qp);
+ free_kernel_wrid(hr_qp);
free_qp_db(hr_dev, hr_qp, udata);
kfree(hr_qp);
@@ -1339,10 +1178,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (ibqp->uobject &&
(attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
- if (hr_qp->sdb_en == 1) {
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
- if (hr_qp->rdb_en == 1)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
} else {
ibdev_warn(&hr_dev->ib_dev,
@@ -1431,10 +1270,9 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
}
}
-static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
+static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
{
-
- return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
+ return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
}
void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
@@ -1449,8 +1287,7 @@ void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n)
void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n)
{
- return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
- (n << hr_qp->sge.sge_shift));
+ return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
}
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 5b3dd1a337d4..f40a000e94ee 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -77,56 +77,56 @@ static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
- u16 xrcd, struct hns_roce_mtt *hr_mtt,
- u64 db_rec_addr, struct hns_roce_srq *srq)
+static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ u32 pdn, u32 cqn, u16 xrcd, u64 db_rec_addr)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cmd_mailbox *mailbox;
- dma_addr_t dma_handle_wqe;
- dma_addr_t dma_handle_idx;
- u64 *mtts_wqe;
- u64 *mtts_idx;
+ u64 mtts_wqe[MTT_MIN_COUNT] = { 0 };
+ u64 mtts_idx[MTT_MIN_COUNT] = { 0 };
+ dma_addr_t dma_handle_wqe = 0;
+ dma_addr_t dma_handle_idx = 0;
int ret;
/* Get the physical address of srq buf */
- mtts_wqe = hns_roce_table_find(hr_dev,
- &hr_dev->mr_table.mtt_srqwqe_table,
- srq->mtt.first_seg,
- &dma_handle_wqe);
- if (!mtts_wqe) {
- dev_err(hr_dev->dev, "Failed to find mtt for srq buf.\n");
- return -EINVAL;
+ ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
+ ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
+ if (ret < 1) {
+ ibdev_err(ibdev, "Failed to find mtr for SRQ WQE\n");
+ return -ENOBUFS;
}
/* Get physical address of idx que buf */
- mtts_idx = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_idx_table,
- srq->idx_que.mtt.first_seg,
- &dma_handle_idx);
- if (!mtts_idx) {
- dev_err(hr_dev->dev,
- "Failed to find mtt for srq idx queue buf.\n");
- return -EINVAL;
+ ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx,
+ ARRAY_SIZE(mtts_idx), &dma_handle_idx);
+ if (ret < 1) {
+ ibdev_err(ibdev, "Failed to find mtr for SRQ idx\n");
+ return -ENOBUFS;
}
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
if (ret) {
- dev_err(hr_dev->dev,
- "Failed to alloc a bit from srq bitmap.\n");
+ ibdev_err(ibdev, "Failed to alloc SRQ number, err %d\n", ret);
return -ENOMEM;
}
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
- if (ret)
+ if (ret) {
+ ibdev_err(ibdev, "Failed to get SRQC table, err %d\n", ret);
goto err_out;
+ }
ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
- if (ret)
+ if (ret) {
+ ibdev_err(ibdev, "Failed to store SRQC, err %d\n", ret);
goto err_put;
+ }
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox)) {
- ret = PTR_ERR(mailbox);
+ if (IS_ERR_OR_NULL(mailbox)) {
+ ret = -ENOMEM;
+ ibdev_err(ibdev, "Failed to alloc mailbox for SRQC\n");
goto err_xa;
}
@@ -136,8 +136,10 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- if (ret)
+ if (ret) {
+ ibdev_err(ibdev, "Failed to config SRQC, err %d\n", ret);
goto err_xa;
+ }
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
@@ -154,8 +156,7 @@ err_out:
return ret;
}
-static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq)
+static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
int ret;
@@ -175,187 +176,104 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
}
-static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
- int srq_buf_size)
+static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata, unsigned long addr)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
- struct hns_roce_ib_create_srq ucmd;
- struct hns_roce_buf *buf;
- int ret;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
+ HNS_ROCE_SGE_SIZE *
+ srq->max_gs)));
+
+ buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
+ srq->wqe_shift);
+ buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+
+ err = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
+ hr_dev->caps.srqwqe_ba_pg_sz +
+ HNS_HW_PAGE_SHIFT, udata, addr);
+ if (err)
+ ibdev_err(ibdev, "Failed to alloc SRQ buf mtr, err %d\n", err);
+
+ return err;
+}
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
- return -EFAULT;
-
- srq->umem =
- ib_umem_get(srq->ibsrq.device, ucmd.buf_addr, srq_buf_size, 0);
- if (IS_ERR(srq->umem))
- return PTR_ERR(srq->umem);
-
- buf = &srq->buf;
- buf->npages = (ib_umem_page_count(srq->umem) +
- (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.srqwqe_buf_pg_sz);
- buf->page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
- &srq->mtt);
- if (ret)
- goto err_user_buf;
+static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
+}
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
- if (ret)
- goto err_user_srq_mtt;
-
- /* config index queue BA */
- srq->idx_que.umem = ib_umem_get(srq->ibsrq.device, ucmd.que_addr,
- srq->idx_que.buf_size, 0);
- if (IS_ERR(srq->idx_que.umem)) {
- dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
- ret = PTR_ERR(srq->idx_que.umem);
- goto err_user_srq_mtt;
+static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata, unsigned long addr)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
+
+ buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
+ srq->idx_que.entry_shift);
+ buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+
+ err = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
+ hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ udata, addr);
+ if (err) {
+ ibdev_err(ibdev, "Failed to alloc SRQ idx mtr, err %d\n", err);
+ return err;
}
- buf = &srq->idx_que.idx_buf;
- buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem),
- 1 << hr_dev->caps.idx_buf_pg_sz);
- buf->page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
- &srq->idx_que.mtt);
- if (ret) {
- dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
- goto err_user_idx_mtt;
- }
+ if (!udata) {
+ idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
+ if (!idx_que->bitmap) {
+ ibdev_err(ibdev, "Failed to alloc SRQ idx bitmap\n");
+ err = -ENOMEM;
+ goto err_idx_mtr;
+ }
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
- srq->idx_que.umem);
- if (ret) {
- dev_err(hr_dev->dev,
- "hns_roce_ib_umem_write_mtt error for idx que\n");
- goto err_user_idx_buf;
}
return 0;
+err_idx_mtr:
+ hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
-err_user_idx_buf:
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
-
-err_user_idx_mtt:
- ib_umem_release(srq->idx_que.umem);
-
-err_user_srq_mtt:
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
-
-err_user_buf:
- ib_umem_release(srq->umem);
-
- return ret;
+ return err;
}
-static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
- u32 page_shift)
+static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_idx_que *idx_que = &srq->idx_que;
- idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
- if (!idx_que->bitmap)
- return -ENOMEM;
-
- idx_que->buf_size = srq->idx_que.buf_size;
-
- if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
- &idx_que->idx_buf, page_shift)) {
- bitmap_free(idx_que->bitmap);
- return -ENOMEM;
- }
-
- return 0;
+ bitmap_free(idx_que->bitmap);
+ idx_que->bitmap = NULL;
+ hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
}
-static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
+static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
- u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
- int ret;
-
- if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2,
- &srq->buf, page_shift))
- return -ENOMEM;
-
srq->head = 0;
srq->tail = srq->wqe_cnt - 1;
-
- ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
- &srq->mtt);
- if (ret)
- goto err_kernel_buf;
-
- ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
- if (ret)
- goto err_kernel_srq_mtt;
-
- page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
- ret = hns_roce_create_idx_que(srq->ibsrq.pd, srq, page_shift);
- if (ret) {
- dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret);
- goto err_kernel_srq_mtt;
- }
-
- /* Init mtt table for idx_que */
- ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
- srq->idx_que.idx_buf.page_shift,
- &srq->idx_que.mtt);
- if (ret)
- goto err_kernel_create_idx;
-
- /* Write buffer address into the mtt table */
- ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
- &srq->idx_que.idx_buf);
- if (ret)
- goto err_kernel_idx_buf;
-
srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
- if (!srq->wrid) {
- ret = -ENOMEM;
- goto err_kernel_idx_buf;
- }
+ if (!srq->wrid)
+ return -ENOMEM;
return 0;
-
-err_kernel_idx_buf:
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
-
-err_kernel_create_idx:
- hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
- &srq->idx_que.idx_buf);
- kfree(srq->idx_que.bitmap);
-
-err_kernel_srq_mtt:
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
-
-err_kernel_buf:
- hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
-
- return ret;
-}
-
-static void destroy_user_srq(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq)
-{
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
- ib_umem_release(srq->idx_que.umem);
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
- ib_umem_release(srq->umem);
}
-static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq, int srq_buf_size)
+static void free_srq_wrid(struct hns_roce_srq *srq)
{
- kvfree(srq->wrid);
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
- hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf);
- kfree(srq->idx_que.bitmap);
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
- hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
+ kfree(srq->wrid);
+ srq->wrid = NULL;
}
int hns_roce_create_srq(struct ib_srq *ib_srq,
@@ -365,8 +283,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
struct hns_roce_ib_create_srq_resp resp = {};
struct hns_roce_srq *srq = to_hr_srq(ib_srq);
- int srq_desc_size;
- int srq_buf_size;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ib_create_srq ucmd = {};
int ret = 0;
u32 cqn;
@@ -379,43 +297,47 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
spin_lock_init(&srq->lock);
srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
- srq->max_gs = init_attr->attr.max_sge;
-
- srq_desc_size = roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
- HNS_ROCE_SGE_SIZE * srq->max_gs));
-
- srq->wqe_shift = ilog2(srq_desc_size);
-
- srq_buf_size = srq->wqe_cnt * srq_desc_size;
-
- srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
- srq->idx_que.buf_size = srq->wqe_cnt * srq->idx_que.entry_sz;
- srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
- srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
+ srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE;
if (udata) {
- ret = create_user_srq(srq, udata, srq_buf_size);
+ ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (ret) {
- dev_err(hr_dev->dev, "Create user srq failed\n");
- goto err_srq;
+ ibdev_err(ibdev, "Failed to copy SRQ udata, err %d\n",
+ ret);
+ return ret;
}
- } else {
- ret = create_kernel_srq(srq, srq_buf_size);
+ }
+
+ ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc SRQ buffer, err %d\n", ret);
+ return ret;
+ }
+
+ ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc SRQ idx, err %d\n", ret);
+ goto err_buf_alloc;
+ }
+
+ if (!udata) {
+ ret = alloc_srq_wrid(hr_dev, srq);
if (ret) {
- dev_err(hr_dev->dev, "Create kernel srq failed\n");
- goto err_srq;
+ ibdev_err(ibdev, "Failed to alloc SRQ wrid, err %d\n",
+ ret);
+ goto err_idx_alloc;
}
}
cqn = ib_srq_has_cq(init_attr->srq_type) ?
to_hr_cq(init_attr->ext.cq)->cqn : 0;
-
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
- ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(ib_srq->pd)->pdn, cqn, 0,
- &srq->mtt, 0, srq);
- if (ret)
- goto err_wrid;
+ ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc SRQ context, err %d\n", ret);
+ goto err_wrid_alloc;
+ }
srq->event = hns_roce_ib_srq_event;
resp.srqn = srq->srqn;
@@ -431,15 +353,13 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
return 0;
err_srqc_alloc:
- hns_roce_srq_free(hr_dev, srq);
-
-err_wrid:
- if (udata)
- destroy_user_srq(hr_dev, srq);
- else
- destroy_kernel_srq(hr_dev, srq, srq_buf_size);
-
-err_srq:
+ free_srqc(hr_dev, srq);
+err_wrid_alloc:
+ free_srq_wrid(srq);
+err_idx_alloc:
+ free_srq_idx(hr_dev, srq);
+err_buf_alloc:
+ free_srq_buf(hr_dev, srq);
return ret;
}
@@ -448,18 +368,10 @@ void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- hns_roce_srq_free(hr_dev, srq);
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
-
- if (udata) {
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
- } else {
- kvfree(srq->wrid);
- hns_roce_buf_free(hr_dev, srq->wqe_cnt << srq->wqe_shift,
- &srq->buf);
- }
- ib_umem_release(srq->idx_que.umem);
- ib_umem_release(srq->umem);
+ free_srqc(hr_dev, srq);
+ free_srq_idx(hr_dev, srq);
+ free_srq_wrid(srq);
+ free_srq_buf(hr_dev, srq);
}
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 3c62c9327a9c..49d92638e0db 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -382,15 +382,6 @@ static inline struct i40iw_mr *to_iwmr(struct ib_mr *ibmr)
}
/**
- * to_iwmr_from_ibfmr - get device memory region
- * @ibfmr: ib fmr
- **/
-static inline struct i40iw_mr *to_iwmr_from_ibfmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct i40iw_mr, ibfmr);
-}
-
-/**
* to_iwmw - get device memory window
* @ibmw: ib memory window
**/
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 1b6fb1380961..19af29a48c55 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -83,7 +83,6 @@ static int i40iw_query_device(struct ib_device *ibdev,
props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
props->max_qp_init_rd_atom = props->max_qp_rd_atom;
props->atomic_cap = IB_ATOMIC_NONE;
- props->max_map_per_fmr = 1;
props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
return 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 3a413752ccc3..331bc21cbcc7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -89,7 +89,6 @@ struct i40iw_mr {
union {
struct ib_mr ibmr;
struct ib_mw ibmw;
- struct ib_fmr ibfmr;
};
struct ib_umem *region;
u16 type;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 02a169f8027b..5f8f8d5c0ce0 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -141,10 +141,11 @@ static int create_iboe_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
return 0;
}
-int mlx4_ib_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
-
+int mlx4_ib_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
return -EINVAL;
@@ -167,12 +168,14 @@ int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
int slave_sgid_index, u8 *s_mac, u16 vlan_tag)
{
struct rdma_ah_attr slave_attr = *ah_attr;
+ struct rdma_ah_init_attr init_attr = {};
struct mlx4_ib_ah *mah = to_mah(ah);
int ret;
slave_attr.grh.sgid_attr = NULL;
slave_attr.grh.sgid_index = slave_sgid_index;
- ret = mlx4_ib_create_ah(ah, &slave_attr, 0, NULL);
+ init_attr.ah_attr = &slave_attr;
+ ret = mlx4_ib_create_ah(ah, &init_attr, NULL);
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 275722cec8c6..816d28854a8e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -558,7 +558,6 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
- props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
props->timestamp_mask = 0xFFFFFFFFFFFFULL;
props->max_ah = INT_MAX;
@@ -2600,13 +2599,6 @@ static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
.modify_wq = mlx4_ib_modify_wq,
};
-static const struct ib_device_ops mlx4_ib_dev_fmr_ops = {
- .alloc_fmr = mlx4_ib_fmr_alloc,
- .dealloc_fmr = mlx4_ib_fmr_dealloc,
- .map_phys_fmr = mlx4_ib_map_phys_fmr,
- .unmap_fmr = mlx4_ib_unmap_fmr,
-};
-
static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
.alloc_mw = mlx4_ib_alloc_mw,
.dealloc_mw = mlx4_ib_dealloc_mw,
@@ -2724,9 +2716,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
}
- if (!mlx4_is_slave(ibdev->dev))
- ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fmr_ops);
-
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
ibdev->ib_dev.uverbs_cmd_mask |=
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index d188573187fa..6f4ea1067095 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -146,11 +146,6 @@ struct mlx4_ib_mw {
struct mlx4_mw mmw;
};
-struct mlx4_ib_fmr {
- struct ib_fmr ibfmr;
- struct mlx4_fmr mfmr;
-};
-
#define MAX_REGS_PER_FLOW 2
struct mlx4_flow_reg_id {
@@ -679,11 +674,6 @@ static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
return container_of(ibmw, struct mlx4_ib_mw, ibmw);
}
-static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
-}
-
static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
{
return container_of(ibflow, struct mlx4_ib_flow, ibflow);
@@ -752,7 +742,7 @@ int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
-int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
int slave_sgid_index, u8 *s_mac, u16 vlan_tag);
@@ -794,12 +784,6 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
-struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
- u64 iova);
-int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
-int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props, int netw_view);
int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index b0121c90c561..7e0b205c05eb 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -380,7 +380,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
unsigned long untagged_start = untagged_addr(start);
struct vm_area_struct *vma;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
/*
* FIXME: Ideally this would iterate over all the vmas that
* cover the memory, but for now it requires a single vma to
@@ -395,7 +395,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
access_flags |= IB_ACCESS_LOCAL_WRITE;
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
}
return ib_umem_get(device, start, length, access_flags);
@@ -698,99 +698,6 @@ err_free:
return ERR_PTR(err);
}
-struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
- struct ib_fmr_attr *fmr_attr)
-{
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
- struct mlx4_ib_fmr *fmr;
- int err = -ENOMEM;
-
- fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
- if (!fmr)
- return ERR_PTR(-ENOMEM);
-
- err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
- fmr_attr->max_pages, fmr_attr->max_maps,
- fmr_attr->page_shift, &fmr->mfmr);
- if (err)
- goto err_free;
-
- err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
- if (err)
- goto err_mr;
-
- fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
-
- return &fmr->ibfmr;
-
-err_mr:
- (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
-
-err_free:
- kfree(fmr);
-
- return ERR_PTR(err);
-}
-
-int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int npages, u64 iova)
-{
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
- struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
-
- return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
- &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
-}
-
-int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
-{
- struct ib_fmr *ibfmr;
- int err;
- struct mlx4_dev *mdev = NULL;
-
- list_for_each_entry(ibfmr, fmr_list, list) {
- if (mdev && to_mdev(ibfmr->device)->dev != mdev)
- return -EINVAL;
- mdev = to_mdev(ibfmr->device)->dev;
- }
-
- if (!mdev)
- return 0;
-
- list_for_each_entry(ibfmr, fmr_list, list) {
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
-
- mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
- }
-
- /*
- * Make sure all MPT status updates are visible before issuing
- * SYNC_TPT firmware command.
- */
- wmb();
-
- err = mlx4_SYNC_TPT(mdev);
- if (err)
- pr_warn("SYNC_TPT error %d when "
- "unmapping FMRs\n", err);
-
- return 0;
-}
-
-int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
-{
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
- struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
- int err;
-
- err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
-
- if (!err)
- kfree(ifmr);
-
- return err;
-}
-
static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 2a334800f109..8cca61c671f8 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,11 +1,26 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
+obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
+
+mlx5_ib-y := ah.o \
+ cmd.o \
+ cong.o \
+ cq.o \
+ doorbell.o \
+ gsi.o \
+ ib_virt.o \
+ mad.o \
+ main.o \
+ mem.o \
+ mr.o \
+ qp.o \
+ qpc.o \
+ restrack.o \
+ srq.o \
+ srq_cmd.o \
+ wr.o
-mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq_cmd.o \
- srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o \
- cong.o restrack.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
-mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
-mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += flow.o
-mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += qos.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o \
+ flow.o \
+ qos.o
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 80642dd359bc..59e5ec39b447 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -32,9 +32,28 @@
#include "mlx5_ib.h"
+static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev,
+ const struct rdma_ah_attr *ah_attr)
+{
+ enum ib_gid_type gid_type = ah_attr->grh.sgid_attr->gid_type;
+ __be16 sport;
+
+ if ((gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
+ (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) &&
+ (ah_attr->grh.flow_label & IB_GRH_FLOWLABEL_MASK))
+ sport = cpu_to_be16(
+ rdma_flow_label_to_udp_sport(ah_attr->grh.flow_label));
+ else
+ sport = mlx5_get_roce_udp_sport_min(dev,
+ ah_attr->grh.sgid_attr);
+
+ return sport;
+}
+
static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
- struct rdma_ah_attr *ah_attr)
+ struct rdma_ah_init_attr *init_attr)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
enum ib_gid_type gid_type;
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
@@ -51,12 +70,15 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ if (init_attr->xmit_slave)
+ ah->xmit_port =
+ mlx5_lag_get_slave_port(dev->mdev,
+ init_attr->xmit_slave);
gid_type = ah_attr->grh.sgid_attr->gid_type;
memcpy(ah->av.rmac, ah_attr->roce.dmac,
sizeof(ah_attr->roce.dmac));
- ah->av.udp_sport =
- mlx5_get_roce_udp_sport(dev, ah_attr->grh.sgid_attr);
+ ah->av.udp_sport = mlx5_ah_get_udp_sport(dev, ah_attr);
ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
#define MLX5_ECN_ENABLED BIT(1)
@@ -68,10 +90,11 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
}
}
-int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
struct mlx5_ib_ah *ah = to_mah(ibah);
struct mlx5_ib_dev *dev = to_mdev(ibah->device);
enum rdma_ah_attr_type ah_type = ah_attr->type;
@@ -97,7 +120,7 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
return err;
}
- create_ib_ah(dev, ah, ah_attr);
+ create_ib_ah(dev, ah, init_attr);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 4c26492ab8a3..cc24c711e92a 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -1,46 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2017-2020, Mellanox Technologies inc. All rights reserved.
*/
#include "cmd.h"
int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
{
- u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
int err;
MLX5_SET(query_special_contexts_in, in, opcode,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
if (!err)
*mkey = MLX5_GET(query_special_contexts_out, out,
dump_fill_mkey);
@@ -50,12 +23,12 @@ int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
{
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
- u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
int err;
MLX5_SET(query_special_contexts_in, in, opcode,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
if (!err)
*null_mkey = MLX5_GET(query_special_contexts_out, out,
null_mkey);
@@ -63,23 +36,15 @@ int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
}
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
- void *out, int out_size)
+ void *out)
{
- u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { };
+ u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {};
MLX5_SET(query_cong_params_in, in, opcode,
MLX5_CMD_OP_QUERY_CONG_PARAMS);
MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-
-int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev,
- void *in, int in_size)
-{
- u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { };
-
- return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
+ return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
}
int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
@@ -133,7 +98,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
MLX5_SET64(alloc_memic_in, in, range_start_addr,
hw_start_addr + (page_idx * PAGE_SIZE));
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
if (ret) {
spin_lock(&dm->lock);
bitmap_clear(dm->memic_alloc_pages,
@@ -162,8 +127,7 @@ void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
struct mlx5_core_dev *dev = dm->dev;
u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
- u32 out[MLX5_ST_SZ_DW(dealloc_memic_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
u64 start_page_idx;
int err;
@@ -174,7 +138,7 @@ void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
MLX5_SET(dealloc_memic_in, in, memic_size, length);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
if (err)
return;
@@ -198,49 +162,46 @@ int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
MLX5_SET(destroy_tir_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_tir, in);
}
void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
MLX5_SET(destroy_tis_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_tis, in);
}
void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
MLX5_SET(destroy_rqt_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_rqt, in);
}
int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
int err;
MLX5_SET(alloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
MLX5_SET(alloc_transport_domain_in, in, uid, uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out,
transport_domain);
@@ -251,32 +212,29 @@ int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
MLX5_SET(dealloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
}
void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {};
- u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
MLX5_SET(dealloc_pd_in, in, pd, pdn);
MLX5_SET(dealloc_pd_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_pd, in);
}
int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
u32 qpn, u16 uid)
{
- u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {};
- u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
void *gid;
MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
@@ -284,14 +242,13 @@ int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
MLX5_SET(attach_to_mcg_in, in, uid, uid);
gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
}
int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
u32 qpn, u16 uid)
{
- u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {};
- u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
void *gid;
MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
@@ -299,18 +256,18 @@ int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
MLX5_SET(detach_from_mcg_in, in, uid, uid);
gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
}
int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
{
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
- u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
int err;
MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
MLX5_SET(alloc_xrcd_in, in, uid, uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_xrcd, in, out);
if (!err)
*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
return err;
@@ -318,30 +275,12 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {};
- u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
MLX5_SET(dealloc_xrcd_in, in, uid, uid);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
- u16 uid)
-{
- u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
- int err;
-
- MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- MLX5_SET(alloc_q_counter_in, in, uid, uid);
-
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *counter_id = MLX5_GET(alloc_q_counter_out, out,
- counter_set_id);
- return err;
+ return mlx5_cmd_exec_in(dev, dealloc_xrcd, in);
}
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
@@ -367,7 +306,7 @@ int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ err = mlx5_cmd_exec_inout(dev, mad_ifc, in, out);
if (err)
goto out;
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 945ebce73613..f4d8558db434 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -40,10 +40,8 @@
int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
- void *out, int out_size);
+ void *out);
int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out);
-int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
- void *in, int in_size);
int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
u64 length, u32 alignment);
void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
@@ -61,8 +59,6 @@ int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
u32 qpn, u16 uid);
int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
-int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
- u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index de4da92b81a6..b9291e482428 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -290,7 +290,7 @@ static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
node = mlx5_ib_param_to_node(offset);
- err = mlx5_cmd_query_cong_params(mdev, node, out, outlen);
+ err = mlx5_cmd_query_cong_params(mdev, node, out);
if (err)
goto free;
@@ -339,7 +339,7 @@ static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp,
attr_mask);
- err = mlx5_cmd_modify_cong_params(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(dev->mdev, modify_cong_params, in);
kvfree(in);
alloc_err:
mlx5_ib_put_native_port_mdev(dev, port_num + 1);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 146ba2966744..0c18cb6a2f14 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -36,6 +36,7 @@
#include <rdma/ib_cache.h>
#include "mlx5_ib.h"
#include "srq.h"
+#include "qp.h"
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
{
@@ -201,7 +202,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
case MLX5_CQE_RESP_WR_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->imm_inval_pkey;
+ wc->ex.imm_data = cqe->immediate;
break;
case MLX5_CQE_RESP_SEND:
wc->opcode = IB_WC_RECV;
@@ -213,12 +214,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
case MLX5_CQE_RESP_SEND_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->imm_inval_pkey;
+ wc->ex.imm_data = cqe->immediate;
break;
case MLX5_CQE_RESP_SEND_INV:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
- wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
break;
}
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
@@ -226,7 +227,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
wc->wc_flags |= g ? IB_WC_GRH : 0;
if (unlikely(is_qp1(qp->ibqp.qp_type))) {
- u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
+ u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
&wc->pkey_index);
@@ -484,7 +485,7 @@ repoll:
* because CQs will be locked while QPs are removed
* from the table.
*/
- mqp = __mlx5_qp_lookup(dev->mdev, qpn);
+ mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
*cur_qp = to_mibqp(mqp);
}
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 46e1ab771f10..9454a66c12cc 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -14,6 +14,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
+#include "qp.h"
#include <linux/xarray.h>
#define UVERBS_MODULE_NAME mlx5_ib
@@ -494,6 +495,10 @@ static u64 devx_get_obj_id(const void *in)
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(rst2init_qp_in, in, qpn));
break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(init2init_qp_in, in, qpn));
+ break;
case MLX5_CMD_OP_INIT2RTR_QP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(init2rtr_qp_in, in, qpn));
@@ -614,7 +619,7 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
enum ib_qp_type qp_type = qp->ibqp.qp_type;
if (qp_type == IB_QPT_RAW_PACKET ||
- (qp->flags & MLX5_IB_QP_UNDERLAY)) {
+ (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
struct mlx5_ib_raw_packet_qp *raw_packet_qp =
&qp->raw_packet_qp;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
@@ -819,6 +824,7 @@ static bool devx_is_obj_modify_cmd(const void *in)
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
case MLX5_CMD_OP_RST2INIT_QP:
case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_INIT2INIT_QP:
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
case MLX5_CMD_OP_SQERR2RTS_QP:
@@ -1356,7 +1362,7 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
}
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
- ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
+ ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
else
@@ -1450,9 +1456,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (opcode == MLX5_CMD_OP_CREATE_DCT) {
obj->flags |= DEVX_OBJ_FLAGS_DCT;
- err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
- cmd_in, cmd_in_len,
- cmd_out, cmd_out_len);
+ err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
+ cmd_in_len, cmd_out, cmd_out_len);
} else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
obj->flags |= DEVX_OBJ_FLAGS_CQ;
obj->core_cq.comp = devx_cq_comp;
@@ -1499,7 +1504,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
obj_destroy:
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
- mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
+ mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
else
@@ -2217,14 +2222,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
obj->mdev = dev->mdev;
uobj->object = obj;
devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
- err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
- if (err)
- goto err_umem_destroy;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
- return 0;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
+ sizeof(obj_id));
+ return err;
-err_umem_destroy:
- mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
err_umem_release:
ib_umem_release(obj->umem);
err_obj_free:
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index 862b7bf3e646..216a1108ad34 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -67,46 +67,41 @@ static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
},
};
-#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
-static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
- struct uverbs_attr_bundle *attrs)
+static int get_dests(struct uverbs_attr_bundle *attrs,
+ struct mlx5_ib_flow_matcher *fs_matcher, int *dest_id,
+ int *dest_type, struct ib_qp **qp, u32 *flags)
{
- struct mlx5_flow_context flow_context = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
- struct mlx5_ib_flow_handler *flow_handler;
- struct mlx5_ib_flow_matcher *fs_matcher;
- struct ib_uobject **arr_flow_actions;
- struct ib_uflow_resources *uflow_res;
- struct mlx5_flow_act flow_act = {};
- void *devx_obj;
- int dest_id, dest_type;
- void *cmd_in;
- int inlen;
bool dest_devx, dest_qp;
- struct ib_qp *qp = NULL;
- struct ib_uobject *uobj =
- uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
- struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
- int len, ret, i;
- u32 counter_id = 0;
- u32 *offset_attr;
- u32 offset = 0;
-
- if (!capable(CAP_NET_RAW))
- return -EPERM;
+ void *devx_obj;
+ int err;
- dest_devx =
- uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+ dest_devx = uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
dest_qp = uverbs_attr_is_valid(attrs,
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
- fs_matcher = uverbs_attr_get_obj(attrs,
- MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
- if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS &&
- ((dest_devx && dest_qp) || (!dest_devx && !dest_qp)))
+ *flags = 0;
+ err = uverbs_get_flags32(flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS |
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP);
+ if (err)
+ return err;
+
+ /* Both flags are not allowed */
+ if (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS &&
+ *flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
return -EINVAL;
- /* Allow only DEVX object as dest when inserting to FDB */
- if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx)
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
+ if (dest_devx && (dest_qp || *flags))
+ return -EINVAL;
+ else if (dest_qp && *flags)
+ return -EINVAL;
+ }
+
+ /* Allow only DEVX object, drop as dest for FDB */
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !(dest_devx ||
+ (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)))
return -EINVAL;
/* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
@@ -114,43 +109,86 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
return -EINVAL;
+ *qp = NULL;
if (dest_devx) {
- devx_obj = uverbs_attr_get_obj(
- attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
- if (IS_ERR(devx_obj))
- return PTR_ERR(devx_obj);
+ devx_obj =
+ uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
/* Verify that the given DEVX object is a flow
* steering destination.
*/
- if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
+ if (!mlx5_ib_devx_is_flow_dest(devx_obj, dest_id, dest_type))
return -EINVAL;
/* Allow only flow table as dest when inserting to FDB or RDMA_RX */
if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB ||
fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
- dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ *dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
return -EINVAL;
} else if (dest_qp) {
struct mlx5_ib_qp *mqp;
- qp = uverbs_attr_get_obj(attrs,
- MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
+ *qp = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+ if (IS_ERR(*qp))
+ return PTR_ERR(*qp);
- if (qp->qp_type != IB_QPT_RAW_PACKET)
+ if ((*qp)->qp_type != IB_QPT_RAW_PACKET)
return -EINVAL;
- mqp = to_mqp(qp);
- if (mqp->flags & MLX5_IB_QP_RSS)
- dest_id = mqp->rss_qp.tirn;
+ mqp = to_mqp(*qp);
+ if (mqp->is_rss)
+ *dest_id = mqp->rss_qp.tirn;
else
- dest_id = mqp->raw_packet_qp.rq.tirn;
- dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- } else {
- dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ *dest_id = mqp->raw_packet_qp.rq.tirn;
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
}
+ if (*dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
+static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_flow_context flow_context = {.flow_tag =
+ MLX5_FS_DEFAULT_FLOW_TAG};
+ u32 *offset_attr, offset = 0, counter_id = 0;
+ int dest_id, dest_type, inlen, len, ret, i;
+ struct mlx5_ib_flow_handler *flow_handler;
+ struct mlx5_ib_flow_matcher *fs_matcher;
+ struct ib_uobject **arr_flow_actions;
+ struct ib_uflow_resources *uflow_res;
+ struct mlx5_flow_act flow_act = {};
+ struct ib_qp *qp = NULL;
+ void *devx_obj, *cmd_in;
+ struct ib_uobject *uobj;
+ struct mlx5_ib_dev *dev;
+ u32 flags;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ fs_matcher = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
+ uobj = uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
+ dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+
+ if (get_dests(attrs, fs_matcher, &dest_id, &dest_type, &qp, &flags))
+ return -EINVAL;
+
+ if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+
+ if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+
len = uverbs_attr_get_uobjs_arr(attrs,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
if (len) {
@@ -180,10 +218,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
}
- if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
- fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
- return -EINVAL;
-
cmd_in = uverbs_attr_get_alloced_ptr(
attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
inlen = uverbs_attr_get_len(attrs,
@@ -404,7 +438,10 @@ static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev)
{
return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
max_modify_header_actions) ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, max_modify_header_actions);
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
+ max_modify_header_actions) ||
+ MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
+ max_modify_header_actions);
}
static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
@@ -427,7 +464,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
num_actions = uverbs_attr_ptr_get_array_size(
attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
- MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto));
+ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto));
if (num_actions < 0)
return num_actions;
@@ -626,7 +663,10 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
UA_OPTIONAL,
- UA_ALLOC_AND_COPY));
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
+ enum mlx5_ib_create_flow_flags,
+ UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_DESTROY_FLOW,
@@ -648,7 +688,7 @@ DECLARE_UVERBS_NAMED_METHOD(
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
- set_action_in_add_action_in_auto)),
+ set_add_copy_action_in_auto)),
UA_MANDATORY,
UA_ALLOC_AND_COPY),
UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 1ae6fd95acaa..40d418153891 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -119,17 +119,15 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
struct mlx5_ib_gsi_qp *gsi;
struct ib_qp_init_attr hw_init_attr = *init_attr;
const u8 port_num = init_attr->port_num;
- const int num_pkeys = pd->device->attrs.max_pkeys;
- const int num_qps = mlx5_ib_deth_sqpn_cap(dev) ? num_pkeys : 0;
+ int num_qps = 0;
int ret;
- mlx5_ib_dbg(dev, "creating GSI QP\n");
-
- if (port_num > ARRAY_SIZE(dev->devr.ports) || port_num < 1) {
- mlx5_ib_warn(dev,
- "invalid port number %d during GSI QP creation\n",
- port_num);
- return ERR_PTR(-EINVAL);
+ if (mlx5_ib_deth_sqpn_cap(dev)) {
+ if (MLX5_CAP_GEN(dev->mdev,
+ port_type) == MLX5_CAP_PORT_TYPE_IB)
+ num_qps = pd->device->attrs.max_pkeys;
+ else if (dev->lag_active)
+ num_qps = MLX5_MAX_PORTS;
}
gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
@@ -270,7 +268,7 @@ static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
}
static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
- u16 qp_index)
+ u16 pkey_index)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct ib_qp_attr attr;
@@ -279,7 +277,7 @@ static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
attr.qp_state = IB_QPS_INIT;
- attr.pkey_index = qp_index;
+ attr.pkey_index = pkey_index;
attr.qkey = IB_QP1_QKEY;
attr.port_num = gsi->port_num;
ret = ib_modify_qp(qp, &attr, mask);
@@ -313,12 +311,17 @@ static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
{
struct ib_device *device = gsi->rx_qp->device;
struct mlx5_ib_dev *dev = to_mdev(device);
+ int pkey_index = qp_index;
+ struct mlx5_ib_qp *mqp;
struct ib_qp *qp;
unsigned long flags;
u16 pkey;
int ret;
- ret = ib_query_pkey(device, gsi->port_num, qp_index, &pkey);
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ pkey_index = 0;
+
+ ret = ib_query_pkey(device, gsi->port_num, pkey_index, &pkey);
if (ret) {
mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
gsi->port_num, qp_index);
@@ -347,7 +350,10 @@ static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
return;
}
- ret = modify_to_rts(gsi, qp, qp_index);
+ mqp = to_mqp(qp);
+ if (dev->lag_active)
+ mqp->gsi_lag_port = qp_index + 1;
+ ret = modify_to_rts(gsi, qp, pkey_index);
if (ret)
goto err_destroy_qp;
@@ -466,11 +472,15 @@ static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_gsi_qp *gsi,
static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
{
struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+ struct mlx5_ib_ah *ah = to_mah(wr->ah);
int qp_index = wr->pkey_index;
- if (!mlx5_ib_deth_sqpn_cap(dev))
+ if (!gsi->num_qps)
return gsi->rx_qp;
+ if (dev->lag_active && ah->xmit_port)
+ qp_index = ah->xmit_port - 1;
+
if (qp_index >= gsi->num_qps)
return NULL;
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index 3b6750cba796..5b30d3fa8f8d 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -9,9 +9,9 @@
#include <linux/mlx5/eswitch.h>
#include "mlx5_ib.h"
-#ifdef CONFIG_MLX5_ESWITCH
extern const struct mlx5_ib_profile raw_eth_profile;
+#ifdef CONFIG_MLX5_ESWITCH
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
u16 vport_num);
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index b61165359954..46b2d370fb3f 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -134,7 +134,7 @@ int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
if (!out)
return -ENOMEM;
- err = mlx5_core_query_vport_counter(mdev, true, vf, port, out, out_sz);
+ err = mlx5_core_query_vport_counter(mdev, true, vf, port, out);
if (err)
goto ex;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 14e0c17de6a9..454ce5de2de7 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/mlx5/cmd.h>
#include <linux/mlx5/vport.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
@@ -188,8 +187,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
goto done;
}
- err = mlx5_core_query_vport_counter(mdev, 0, 0,
- mdev_port_num, out_cnt, sz);
+ err = mlx5_core_query_vport_counter(mdev, 0, 0, mdev_port_num,
+ out_cnt);
if (!err)
pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
} else {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 6679756506e6..343a8b8361e7 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -53,12 +53,15 @@
#include <linux/list.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
+#include <rdma/lag.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
#include "mlx5_ib.h"
#include "ib_rep.h"
#include "cmd.h"
#include "srq.h"
+#include "qp.h"
+#include "wr.h"
#include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
@@ -69,17 +72,10 @@
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
-#define DRIVER_NAME "mlx5_ib"
-#define DRIVER_VERSION "5.0-0"
-
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
-MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
+MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
MODULE_LICENSE("Dual BSD/GPL");
-static char mlx5_version[] =
- DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
- DRIVER_VERSION "\n";
-
struct mlx5_ib_event_work {
struct work_struct work;
union {
@@ -627,8 +623,8 @@ static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
attr->index, NULL, NULL);
}
-__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
- const struct ib_gid_attr *attr)
+__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr)
{
if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
return 0;
@@ -1003,7 +999,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
- props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
props->max_ah = INT_MAX;
props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
@@ -1963,6 +1958,9 @@ uar_done:
resp.response_length += sizeof(resp.dump_fill_mkey);
}
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ resp.comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
+
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
goto out_mdev;
@@ -1973,7 +1971,7 @@ uar_done:
context->lib_caps = req.lib_caps;
print_lib_caps(dev, context->lib_caps);
- if (dev->lag_active) {
+ if (mlx5_ib_lag_should_assign_affinity(dev)) {
u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
atomic_set(&context->tx_port_affinity,
@@ -2443,7 +2441,7 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
act_size = roundup_pow_of_two(act_size);
dm->size = act_size;
- err = mlx5_dm_sw_icm_alloc(dev, type, act_size,
+ err = mlx5_dm_sw_icm_alloc(dev, type, act_size, attr->alignment,
to_mucontext(ctx)->devx_uid, &dm->dev_addr,
&dm->icm_dm.obj_id);
if (err)
@@ -2560,7 +2558,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct mlx5_ib_alloc_pd_resp resp;
int err;
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
- u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
u16 uid = 0;
struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
@@ -2568,8 +2566,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
uid = context ? context->devx_uid : 0;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
MLX5_SET(alloc_pd_in, in, uid, uid);
- err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
- out, sizeof(out));
+ err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
if (err)
return err;
@@ -3697,12 +3694,13 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!dest_num)
rule_dst = NULL;
} else {
+ if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)
+ flow_act.action |=
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
if (is_egress)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- else
- flow_act.action |=
- dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
- MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ else if (dest_num)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) &&
@@ -3746,30 +3744,6 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
}
-static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
- struct mlx5_ib_flow_prio *ft_prio,
- struct ib_flow_attr *flow_attr,
- struct mlx5_flow_destination *dst)
-{
- struct mlx5_ib_flow_handler *handler_dst = NULL;
- struct mlx5_ib_flow_handler *handler = NULL;
-
- handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
- if (!IS_ERR(handler)) {
- handler_dst = create_flow_rule(dev, ft_prio,
- flow_attr, dst);
- if (IS_ERR(handler_dst)) {
- mlx5_del_flow_rules(handler->rule);
- ft_prio->refcount--;
- kfree(handler);
- handler = handler_dst;
- } else {
- list_add(&handler_dst->list, &handler->list);
- }
- }
-
- return handler;
-}
enum {
LEFTOVERS_MC,
LEFTOVERS_UC,
@@ -3966,22 +3940,18 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
} else {
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- if (mqp->flags & MLX5_IB_QP_RSS)
+ if (mqp->is_rss)
dst->tir_num = mqp->rss_qp.tirn;
else
dst->tir_num = mqp->raw_packet_qp.rq.tirn;
}
if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
- if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
- handler = create_dont_trap_rule(dev, ft_prio,
- flow_attr, dst);
- } else {
- underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
- mqp->underlay_qpn : 0;
- handler = _create_flow_rule(dev, ft_prio, flow_attr,
- dst, underlay_qpn, ucmd);
- }
+ underlay_qpn = (mqp->flags & IB_QP_CREATE_SOURCE_QPN) ?
+ mqp->underlay_qpn :
+ 0;
+ handler = _create_flow_rule(dev, ft_prio, flow_attr, dst,
+ underlay_qpn, ucmd);
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
handler = create_leftovers_rule(dev, ft_prio, flow_attr,
@@ -4225,18 +4195,17 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
dst[dst_num].type = dest_type;
- dst[dst_num].tir_num = dest_id;
+ dst[dst_num++].tir_num = dest_id;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
- dst[dst_num].ft_num = dest_id;
+ dst[dst_num++].ft_num = dest_id;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- } else {
- dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
+ dst[dst_num++].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
}
- dst_num++;
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -4446,7 +4415,7 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
uid = ibqp->pd ?
to_mpd(ibqp->pd)->uid : 0;
- if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
+ if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) {
mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
return -EOPNOTSUPP;
}
@@ -4632,8 +4601,7 @@ static void delay_drop_handler(struct work_struct *work)
atomic_inc(&delay_drop->events_cnt);
mutex_lock(&delay_drop->lock);
- err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
- delay_drop->timeout);
+ err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout);
if (err) {
mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
delay_drop->timeout);
@@ -5439,15 +5407,21 @@ static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev)
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
int num_cnt_ports;
int i;
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+
for (i = 0; i < num_cnt_ports; i++) {
- if (dev->port[i].cnts.set_id_valid)
- mlx5_core_dealloc_q_counter(dev->mdev,
- dev->port[i].cnts.set_id);
+ if (dev->port[i].cnts.set_id) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ dev->port[i].cnts.set_id);
+ mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
+ }
kfree(dev->port[i].cnts.names);
kfree(dev->port[i].cnts.offsets);
}
@@ -5556,11 +5530,14 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
{
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
int num_cnt_ports;
int err = 0;
int i;
bool is_shared;
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
@@ -5572,17 +5549,19 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
dev->port[i].cnts.offsets);
- err = mlx5_cmd_alloc_q_counter(dev->mdev,
- &dev->port[i].cnts.set_id,
- is_shared ?
- MLX5_SHARED_RESOURCE_UID : 0);
+ MLX5_SET(alloc_q_counter_in, in, uid,
+ is_shared ? MLX5_SHARED_RESOURCE_UID : 0);
+
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out);
if (err) {
mlx5_ib_warn(dev,
"couldn't allocate queue counter for port %d, err %d\n",
i + 1, err);
goto err_alloc;
}
- dev->port[i].cnts.set_id_valid = true;
+
+ dev->port[i].cnts.set_id =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
}
return 0;
@@ -5638,27 +5617,23 @@ static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
struct rdma_hw_stats *stats,
u16 set_id)
{
- int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
- void *out;
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
__be32 val;
int ret, i;
- out = kvzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen);
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, counter_set_id, set_id);
+ ret = mlx5_cmd_exec_inout(mdev, query_q_counter, in, out);
if (ret)
- goto free;
+ return ret;
for (i = 0; i < cnts->num_q_counters; i++) {
- val = *(__be32 *)(out + cnts->offsets[i]);
+ val = *(__be32 *)((void *)out + cnts->offsets[i]);
stats->value[i] = (u64)be32_to_cpu(val);
}
-free:
- kvfree(out);
- return ret;
+ return 0;
}
static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
@@ -5765,20 +5740,38 @@ static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
counter->stats, counter->id);
}
+static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
+
+ if (!counter->id)
+ return 0;
+
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter->id);
+ return mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
+}
+
static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
- u16 cnt_set_id = 0;
int err;
if (!counter->id) {
- err = mlx5_cmd_alloc_q_counter(dev->mdev,
- &cnt_set_id,
- MLX5_SHARED_RESOURCE_UID);
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
+
+ MLX5_SET(alloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ MLX5_SET(alloc_q_counter_in, in, uid, MLX5_SHARED_RESOURCE_UID);
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out);
if (err)
return err;
- counter->id = cnt_set_id;
+ counter->id =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
}
err = mlx5_ib_qp_set_counter(qp, counter);
@@ -5788,7 +5781,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return 0;
fail_set_counter:
- mlx5_core_dealloc_q_counter(dev->mdev, cnt_set_id);
+ mlx5_ib_counter_dealloc(counter);
counter->id = 0;
return err;
@@ -5799,13 +5792,6 @@ static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
return mlx5_ib_qp_set_counter(qp, NULL);
}
-static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
-{
- struct mlx5_ib_dev *dev = to_mdev(counter->device);
-
- return mlx5_core_dealloc_q_counter(dev->mdev, counter->id);
-}
-
static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params)
@@ -6203,26 +6189,20 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
mmap_offset = mlx5_entry_to_mmap_offset(entry);
length = entry->rdma_entry.npages * PAGE_SIZE;
uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
&mmap_offset, sizeof(mmap_offset));
if (err)
- goto err;
+ return err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
&entry->page_idx, sizeof(entry->page_idx));
if (err)
- goto err;
+ return err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
&length, sizeof(length));
- if (err)
- goto err;
-
- return 0;
-
-err:
- rdma_user_mmap_entry_remove(&entry->rdma_entry);
return err;
}
@@ -6336,26 +6316,20 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
mmap_offset = mlx5_entry_to_mmap_offset(entry);
length = entry->rdma_entry.npages * PAGE_SIZE;
uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
&mmap_offset, sizeof(mmap_offset));
if (err)
- goto err;
+ return err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
&entry->page_idx, sizeof(entry->page_idx));
if (err)
- goto err;
+ return err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
&length, sizeof(length));
- if (err)
- goto err;
-
- return 0;
-
-err:
- rdma_user_mmap_entry_remove(&entry->rdma_entry);
return err;
}
@@ -6549,6 +6523,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
dev->ib_dev.dev.parent = mdev->device;
+ dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
@@ -6638,8 +6613,8 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.modify_qp = mlx5_ib_modify_qp,
.modify_srq = mlx5_ib_modify_srq,
.poll_cq = mlx5_ib_poll_cq,
- .post_recv = mlx5_ib_post_recv,
- .post_send = mlx5_ib_post_send,
+ .post_recv = mlx5_ib_post_recv_nodrain,
+ .post_send = mlx5_ib_post_send_nodrain,
.post_srq_recv = mlx5_ib_post_srq_recv,
.process_mad = mlx5_ib_process_mad,
.query_ah = mlx5_ib_query_ah,
@@ -7140,6 +7115,8 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
int err;
int i;
+ dev->profile = profile;
+
for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
if (profile->stage[i].init) {
err = profile->stage[i].init(dev);
@@ -7148,7 +7125,6 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
}
}
- dev->profile = profile;
dev->ib_active = true;
return dev;
@@ -7175,6 +7151,9 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_roce_init,
mlx5_ib_stage_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_QP,
+ mlx5_init_qp_table,
+ mlx5_cleanup_qp_table),
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
mlx5_init_srq_table,
mlx5_cleanup_srq_table),
@@ -7232,6 +7211,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_raw_eth_roce_init,
mlx5_ib_stage_raw_eth_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_QP,
+ mlx5_init_qp_table,
+ mlx5_cleanup_qp_table),
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
mlx5_init_srq_table,
mlx5_cleanup_srq_table),
@@ -7316,8 +7298,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
int port_type_cap;
int num_ports;
- printk_once(KERN_INFO "%s", mlx5_version);
-
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
if (!mlx5_core_mp_enabled(mdev))
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a4e522385de0..5dbe3eb0d9cb 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -337,7 +337,6 @@ struct mlx5_ib_rwq {
struct ib_umem *umem;
size_t buf_size;
unsigned int page_shift;
- int create_type;
struct mlx5_db db;
u32 user_index;
u32 wqe_count;
@@ -346,17 +345,6 @@ struct mlx5_ib_rwq {
u32 create_flags; /* Use enum mlx5_ib_wq_flags */
};
-enum {
- MLX5_QP_USER,
- MLX5_QP_KERNEL,
- MLX5_QP_EMPTY
-};
-
-enum {
- MLX5_WQ_USER,
- MLX5_WQ_KERNEL
-};
-
struct mlx5_ib_rwq_ind_table {
struct ib_rwq_ind_table ib_rwq_ind_tbl;
u32 rqtn;
@@ -443,34 +431,37 @@ struct mlx5_ib_qp {
/* serialize qp state modifications
*/
struct mutex mutex;
+ /* cached variant of create_flags from struct ib_qp_init_attr */
u32 flags;
u8 port;
u8 state;
- int wq_sig;
- int scat_cqe;
int max_inline_data;
struct mlx5_bf bf;
- int has_rq;
+ u8 has_rq:1;
+ u8 is_rss:1;
/* only for user space QPs. For kernel
* we have it from the bf object
*/
int bfregn;
- int create_type;
-
struct list_head qps_list;
struct list_head cq_recv_list;
struct list_head cq_send_list;
struct mlx5_rate_limit rl;
u32 underlay_qpn;
u32 flags_en;
- /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
- enum ib_qp_type qp_sub_type;
+ /*
+ * IB/core doesn't store low-level QP types, so
+ * store both MLX and IBTA types in the field below.
+ * IB_QPT_DRIVER will be break to DCI/DCT subtypes.
+ */
+ enum ib_qp_type type;
/* A flag to indicate if there's a new counter is configured
* but not take effective
*/
u32 counter_pending;
+ u16 gsi_lag_port;
};
struct mlx5_ib_cq_buf {
@@ -481,24 +472,6 @@ struct mlx5_ib_cq_buf {
int nent;
};
-enum mlx5_ib_qp_flags {
- MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
- MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
- MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
- MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
- MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
- MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
- /* QP uses 1 as its source QP number */
- MLX5_IB_QP_SQPN_QP1 = 1 << 6,
- MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
- MLX5_IB_QP_RSS = 1 << 8,
- MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
- MLX5_IB_QP_UNDERLAY = 1 << 10,
- MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
- MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
- MLX5_IB_QP_PACKET_BASED_CREDIT = 1 << 13,
-};
-
struct mlx5_umr_wr {
struct ib_send_wr wr;
u64 virt_addr;
@@ -702,12 +675,6 @@ struct umr_common {
struct semaphore sem;
};
-enum {
- MLX5_FMR_INVALID,
- MLX5_FMR_VALID,
- MLX5_FMR_BUSY,
-};
-
struct mlx5_cache_ent {
struct list_head head;
/* sync access to the cahce entry
@@ -780,7 +747,6 @@ struct mlx5_ib_counters {
u32 num_cong_counters;
u32 num_ext_ppcnt_counters;
u16 set_id;
- bool set_id_valid;
};
struct mlx5_ib_multiport_info;
@@ -870,6 +836,7 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_CAPS,
MLX5_IB_STAGE_NON_DEFAULT_CB,
MLX5_IB_STAGE_ROCE,
+ MLX5_IB_STAGE_QP,
MLX5_IB_STAGE_SRQ,
MLX5_IB_STAGE_DEVICE_RESOURCES,
MLX5_IB_STAGE_DEVICE_NOTIFIER,
@@ -1065,6 +1032,7 @@ struct mlx5_ib_dev {
struct mlx5_dm dm;
u16 devx_whitelist_uid;
struct mlx5_srq_table srq_table;
+ struct mlx5_qp_table qp_table;
struct mlx5_async_ctx async_ctx;
struct mlx5_devx_event_table devx_event_table;
struct mlx5_var_table var_table;
@@ -1180,7 +1148,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
-int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
@@ -1204,10 +1172,6 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
void mlx5_ib_drain_sq(struct ib_qp *qp);
void mlx5_ib_drain_rq(struct ib_qp *qp);
-int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr);
-int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr);
int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
size_t buflen, size_t *bc);
int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
@@ -1283,8 +1247,6 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
-int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
-void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
unsigned long max_page_shift,
int *count, int *shift,
@@ -1382,8 +1344,8 @@ int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
u64 guid, int type);
-__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
- const struct ib_gid_attr *attr);
+__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr);
void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
@@ -1580,4 +1542,11 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
int mlx5_ib_enable_driver(struct ib_device *dev);
int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
+
+static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
+{
+ return dev->lag_active ||
+ (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
+ MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
+}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 3de7606d4a1a..7d2ec9ee5097 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -36,6 +36,7 @@
#include "mlx5_ib.h"
#include "cmd.h"
+#include "qp.h"
#include <linux/mlx5/eq.h>
@@ -446,8 +447,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
{
int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
pfault->wqe.wq_num : pfault->token;
- u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = { };
- u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = { };
+ u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
int err;
MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
@@ -456,7 +456,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
MLX5_SET(page_fault_resume_in, in, error, !!error);
- err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
if (err)
mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
wq_num, err);
@@ -1135,8 +1135,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
*wqe += sizeof(struct mlx5_wqe_xrc_seg);
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->qp_sub_type == MLX5_IB_QPT_DCI) {
+ if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
av = *wqe;
if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
*wqe += sizeof(struct mlx5_av);
@@ -1189,7 +1188,7 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_wq *wq = &qp->rq;
int wqe_size = 1 << wq->wqe_shift;
- if (qp->wq_sig) {
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
return -EFAULT;
}
@@ -1219,7 +1218,7 @@ static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
case MLX5_WQE_PF_TYPE_RESP:
case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
- common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP);
+ common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP);
break;
default:
break;
diff --git a/drivers/infiniband/hw/mlx5/qos.c b/drivers/infiniband/hw/mlx5/qos.c
index cac878a70edb..dce92554142a 100644
--- a/drivers/infiniband/hw/mlx5/qos.c
+++ b/drivers/infiniband/hw/mlx5/qos.c
@@ -69,17 +69,14 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)(
if (err)
goto err;
- err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
- &pp_entry->index, sizeof(pp_entry->index));
- if (err)
- goto clean;
-
pp_entry->mdev = dev->mdev;
uobj->object = pp_entry;
- return 0;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+ &pp_entry->index, sizeof(pp_entry->index));
+ return err;
-clean:
- mlx5_rl_remove_rate_raw(dev->mdev, pp_entry->index);
err:
kfree(pp_entry);
return err;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 2210759843ba..81bf6b975e0e 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -39,9 +39,8 @@
#include "mlx5_ib.h"
#include "ib_rep.h"
#include "cmd.h"
-
-/* not supported currently */
-static int wq_signature;
+#include "qp.h"
+#include "wr.h"
enum {
MLX5_IB_ACK_REQ_FREQ = 8,
@@ -54,32 +53,6 @@ enum {
MLX5_IB_LINK_TYPE_ETH = 1
};
-enum {
- MLX5_IB_SQ_STRIDE = 6,
- MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
-};
-
-static const u32 mlx5_ib_opcode[] = {
- [IB_WR_SEND] = MLX5_OPCODE_SEND,
- [IB_WR_LSO] = MLX5_OPCODE_LSO,
- [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
- [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
- [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
- [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
- [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
- [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
- [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
- [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
- [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
-};
-
-struct mlx5_wqe_eth_pad {
- u8 rsvd0[16];
-};
-
enum raw_qp_set_mask_map {
MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
MLX5_RAW_QP_RATE_LIMIT = 1UL << 1,
@@ -391,17 +364,26 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
cap->max_recv_wr = 0;
cap->max_recv_sge = 0;
} else {
+ int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE);
+
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
return -EINVAL;
qp->rq.wqe_shift = ucmd->rq_wqe_shift;
- if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
+ if ((1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) <
+ wq_sig)
return -EINVAL;
- qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+ qp->rq.max_gs =
+ (1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) -
+ wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
} else {
- wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
+ wqe_size =
+ wq_sig ? sizeof(struct mlx5_wqe_signature_seg) :
+ 0;
wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
wqe_size = roundup_pow_of_two(wqe_size);
wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
@@ -415,7 +397,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
return -EINVAL;
}
qp->rq.wqe_shift = ilog2(wqe_size);
- qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+ qp->rq.max_gs =
+ (1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) -
+ wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
}
}
@@ -595,7 +580,7 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
}
if (attr->qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
} else {
@@ -750,10 +735,7 @@ static int to_mlx5_st(enum ib_qp_type type)
case IB_QPT_SMI: return MLX5_QP_ST_QP0;
case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1;
case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI;
- case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
- case IB_QPT_RAW_PACKET:
- case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
- case IB_QPT_MAX:
+ case IB_QPT_RAW_PACKET: return MLX5_QP_ST_RAW_ETHERTYPE;
default: return -EINVAL;
}
}
@@ -890,7 +872,6 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_umem;
}
- rwq->create_type = MLX5_WQ_USER;
return 0;
err_umem:
@@ -905,15 +886,14 @@ static int adjust_bfregn(struct mlx5_ib_dev *dev,
bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
}
-static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct mlx5_ib_qp *qp, struct ib_udata *udata,
- struct ib_qp_init_attr *attr,
- u32 **in,
- struct mlx5_ib_create_qp_resp *resp, int *inlen,
- struct mlx5_ib_qp_base *base)
+static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp, struct ib_udata *udata,
+ struct ib_qp_init_attr *attr, u32 **in,
+ struct mlx5_ib_create_qp_resp *resp, int *inlen,
+ struct mlx5_ib_qp_base *base,
+ struct mlx5_ib_create_qp *ucmd)
{
struct mlx5_ib_ucontext *context;
- struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
int page_shift = 0;
int uar_index = 0;
@@ -927,30 +907,24 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u16 uid;
u32 uar_flags;
- err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
- if (err) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return err;
- }
-
context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
ibucontext);
- uar_flags = ucmd.flags & (MLX5_QP_FLAG_UAR_PAGE_INDEX |
- MLX5_QP_FLAG_BFREG_INDEX);
+ uar_flags = qp->flags_en &
+ (MLX5_QP_FLAG_UAR_PAGE_INDEX | MLX5_QP_FLAG_BFREG_INDEX);
switch (uar_flags) {
case MLX5_QP_FLAG_UAR_PAGE_INDEX:
- uar_index = ucmd.bfreg_index;
+ uar_index = ucmd->bfreg_index;
bfregn = MLX5_IB_INVALID_BFREG;
break;
case MLX5_QP_FLAG_BFREG_INDEX:
uar_index = bfregn_to_uar_index(dev, &context->bfregi,
- ucmd.bfreg_index, true);
+ ucmd->bfreg_index, true);
if (uar_index < 0)
return uar_index;
bfregn = MLX5_IB_INVALID_BFREG;
break;
case 0:
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
return -EINVAL;
bfregn = alloc_bfreg(dev, &context->bfregi);
if (bfregn < 0)
@@ -969,12 +943,12 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
- err = set_user_buf_size(dev, qp, &ucmd, base, attr);
+ err = set_user_buf_size(dev, qp, ucmd, base, attr);
if (err)
goto err_bfreg;
- if (ucmd.buf_addr && ubuffer->buf_size) {
- ubuffer->buf_addr = ucmd.buf_addr;
+ if (ucmd->buf_addr && ubuffer->buf_size) {
+ ubuffer->buf_addr = ucmd->buf_addr;
err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr,
ubuffer->buf_size, &ubuffer->umem,
&npages, &page_shift, &ncont, &offset);
@@ -992,8 +966,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_umem;
}
- uid = (attr->qp_type != IB_QPT_XRC_TGT &&
- attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
+ uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
MLX5_SET(create_qp_in, *in, uid, uid);
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem)
@@ -1011,24 +984,14 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
resp->bfreg_index = MLX5_IB_INVALID_BFREG;
qp->bfregn = bfregn;
- err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &qp->db);
+ err = mlx5_ib_db_map_user(context, udata, ucmd->db_addr, &qp->db);
if (err) {
mlx5_ib_dbg(dev, "map failed\n");
goto err_free;
}
- err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
- if (err) {
- mlx5_ib_dbg(dev, "copy failed\n");
- goto err_unmap;
- }
- qp->create_type = MLX5_QP_USER;
-
return 0;
-err_unmap:
- mlx5_ib_db_unmap_user(context, &qp->db);
-
err_free:
kvfree(*in);
@@ -1041,72 +1004,50 @@ err_bfreg:
return err;
}
-static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base,
- struct ib_udata *udata)
+static void destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct mlx5_ib_qp_base *base, struct ib_udata *udata)
{
- struct mlx5_ib_ucontext *context =
- rdma_udata_to_drv_context(
- udata,
- struct mlx5_ib_ucontext,
- ibucontext);
-
- mlx5_ib_db_unmap_user(context, &qp->db);
- ib_umem_release(base->ubuffer.umem);
-
- /*
- * Free only the BFREGs which are handled by the kernel.
- * BFREGs of UARs allocated dynamically are handled by user.
- */
- if (qp->bfregn != MLX5_IB_INVALID_BFREG)
- mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
-}
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
-/* get_sq_edge - Get the next nearby edge.
- *
- * An 'edge' is defined as the first following address after the end
- * of the fragment or the SQ. Accordingly, during the WQE construction
- * which repetitively increases the pointer to write the next data, it
- * simply should check if it gets to an edge.
- *
- * @sq - SQ buffer.
- * @idx - Stride index in the SQ buffer.
- *
- * Return:
- * The new edge.
- */
-static void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
-{
- void *fragment_end;
+ if (udata) {
+ /* User QP */
+ mlx5_ib_db_unmap_user(context, &qp->db);
+ ib_umem_release(base->ubuffer.umem);
- fragment_end = mlx5_frag_buf_get_wqe
- (&sq->fbc,
- mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
+ /*
+ * Free only the BFREGs which are handled by the kernel.
+ * BFREGs of UARs allocated dynamically are handled by user.
+ */
+ if (qp->bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
+ return;
+ }
- return fragment_end + MLX5_SEND_WQE_BB;
+ /* Kernel QP */
+ kvfree(qp->sq.wqe_head);
+ kvfree(qp->sq.w_list);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->sq.wr_data);
+ kvfree(qp->rq.wrid);
+ if (qp->db.db)
+ mlx5_db_free(dev->mdev, &qp->db);
+ if (qp->buf.frags)
+ mlx5_frag_buf_free(dev->mdev, &qp->buf);
}
-static int create_kernel_qp(struct mlx5_ib_dev *dev,
- struct ib_qp_init_attr *init_attr,
- struct mlx5_ib_qp *qp,
- u32 **in, int *inlen,
- struct mlx5_ib_qp_base *base)
+static int _create_kernel_qp(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_qp *qp, u32 **in, int *inlen,
+ struct mlx5_ib_qp_base *base)
{
int uar_index;
void *qpc;
int err;
- if (init_attr->create_flags & ~(IB_QP_CREATE_INTEGRITY_EN |
- IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
- IB_QP_CREATE_IPOIB_UD_LSO |
- IB_QP_CREATE_NETIF_QP |
- MLX5_IB_QP_CREATE_SQPN_QP1 |
- MLX5_IB_QP_CREATE_WC_TEST))
- return -EINVAL;
-
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
qp->bf.bfreg = &dev->fp_bfreg;
- else if (init_attr->create_flags & MLX5_IB_QP_CREATE_WC_TEST)
+ else if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
qp->bf.bfreg = &dev->wc_bfreg;
else
qp->bf.bfreg = &dev->bfreg;
@@ -1166,10 +1107,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
MLX5_SET(qpc, qpc, fre, 1);
MLX5_SET(qpc, qpc, rlky, 1);
- if (init_attr->create_flags & MLX5_IB_QP_CREATE_SQPN_QP1) {
+ if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
MLX5_SET(qpc, qpc, deth_sqpn, 1);
- qp->flags |= MLX5_IB_QP_SQPN_QP1;
- }
mlx5_fill_page_frag_array(&qp->buf,
(__be64 *)MLX5_ADDR_OF(create_qp_in,
@@ -1197,7 +1136,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
err = -ENOMEM;
goto err_wrid;
}
- qp->create_type = MLX5_QP_KERNEL;
return 0;
@@ -1217,36 +1155,15 @@ err_buf:
return err;
}
-static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
-{
- kvfree(qp->sq.wqe_head);
- kvfree(qp->sq.w_list);
- kvfree(qp->sq.wrid);
- kvfree(qp->sq.wr_data);
- kvfree(qp->rq.wrid);
- mlx5_db_free(dev->mdev, &qp->db);
- mlx5_frag_buf_free(dev->mdev, &qp->buf);
-}
-
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
{
- if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
- (attr->qp_type == MLX5_IB_QPT_DCI) ||
- (attr->qp_type == IB_QPT_XRC_INI))
+ if (attr->srq || (qp->type == IB_QPT_XRC_TGT) ||
+ (qp->type == MLX5_IB_QPT_DCI) || (qp->type == IB_QPT_XRC_INI))
return MLX5_SRQ_RQ;
else if (!qp->has_rq)
return MLX5_ZERO_LEN_RQ;
- else
- return MLX5_NON_ZERO_RQ;
-}
-
-static int is_connected(enum ib_qp_type qp_type)
-{
- if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC ||
- qp_type == MLX5_IB_QPT_DCI)
- return 1;
- return 0;
+ return MLX5_NON_ZERO_RQ;
}
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
@@ -1254,15 +1171,15 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq, u32 tdn,
struct ib_pd *pd)
{
- u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
MLX5_SET(tisc, tisc, transport_domain, tdn);
- if (qp->flags & MLX5_IB_QP_UNDERLAY)
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
- return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
+ return mlx5_core_create_tis(dev->mdev, in, &sq->tisn);
}
static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
@@ -1336,7 +1253,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0);
- err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp);
+ err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp);
kvfree(in);
@@ -1356,7 +1273,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq)
{
destroy_flow_rule_vport_sq(sq);
- mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
+ mlx5_core_destroy_sq_tracked(dev, &sq->base.mqp);
ib_umem_release(sq->ubuffer.umem);
}
@@ -1408,7 +1325,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
- if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS)
+ if (mqp->flags & IB_QP_CREATE_SCATTER_FCS)
MLX5_SET(rqc, rqc, scatter_fcs, 1);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
@@ -1426,7 +1343,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas);
memcpy(pas, qp_pas, rq_pas_size);
- err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp);
+ err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp);
kvfree(in);
@@ -1436,14 +1353,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq)
{
- mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
-}
-
-static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
-{
- return (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) ||
- MLX5_CAP_ETH(dev, tunnel_stateless_gre) ||
- MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
+ mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp);
}
static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
@@ -1459,9 +1369,8 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, u32 tdn,
- u32 *qp_flags_en,
- struct ib_pd *pd,
- u32 *out, int outlen)
+ u32 *qp_flags_en, struct ib_pd *pd,
+ u32 *out)
{
u8 lb_flag = 0;
u32 *in;
@@ -1494,9 +1403,8 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
}
MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
-
- err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
-
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
rq->tirn = MLX5_GET(create_tir_out, out, tirn);
if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
err = mlx5_ib_enable_lb(dev, false, true);
@@ -1525,6 +1433,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u16 uid = to_mpd(pd)->uid;
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
+ if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt)
+ return -EINVAL;
if (qp->sq.wqe_cnt) {
err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
if (err)
@@ -1548,17 +1458,16 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->rq.wqe_cnt) {
rq->base.container_mibqp = qp;
- if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING)
+ if (qp->flags & IB_QP_CREATE_CVLAN_STRIPPING)
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
- if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING)
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd);
if (err)
goto err_destroy_sq;
- err = create_raw_packet_qp_tir(
- dev, rq, tdn, &qp->flags_en, pd, out,
- MLX5_ST_SZ_BYTES(create_tir_out));
+ err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd,
+ out);
if (err)
goto err_destroy_rq;
@@ -1586,14 +1495,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
rq->base.mqp.qpn;
- err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
- if (err)
- goto err_destroy_tir;
-
return 0;
-err_destroy_tir:
- destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, pd);
err_destroy_rq:
destroy_raw_packet_qp_rq(dev, rq);
err_destroy_sq:
@@ -1645,14 +1548,27 @@ static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *q
to_mpd(qp->ibqp.pd)->uid);
}
-static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+struct mlx5_create_qp_params {
+ struct ib_udata *udata;
+ size_t inlen;
+ size_t outlen;
+ size_t ucmd_size;
+ void *ucmd;
+ u8 is_rss_raw : 1;
+ struct ib_qp_init_attr *attr;
+ u32 uidx;
+ struct mlx5_ib_create_qp_resp resp;
+};
+
+static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp_rss *ucmd = params->ucmd;
+ struct ib_udata *udata = params->udata;
struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
- struct mlx5_ib_create_qp_resp resp = {};
int inlen;
int outlen;
int err;
@@ -1662,79 +1578,28 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
void *hfso;
u32 selected_fields = 0;
u32 outer_l4;
- size_t min_resp_len;
u32 tdn = mucontext->tdn;
- struct mlx5_ib_create_qp_rss ucmd = {};
- size_t required_cmd_sz;
u8 lb_flag = 0;
- if (init_attr->qp_type != IB_QPT_RAW_PACKET)
- return -EOPNOTSUPP;
-
- if (init_attr->create_flags || init_attr->send_cq)
- return -EINVAL;
-
- min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
- if (udata->outlen < min_resp_len)
- return -EINVAL;
-
- required_cmd_sz = offsetof(typeof(ucmd), flags) + sizeof(ucmd.flags);
- if (udata->inlen < required_cmd_sz) {
- mlx5_ib_dbg(dev, "invalid inlen\n");
- return -EINVAL;
- }
-
- if (udata->inlen > sizeof(ucmd) &&
- !ib_is_udata_cleared(udata, sizeof(ucmd),
- udata->inlen - sizeof(ucmd))) {
- mlx5_ib_dbg(dev, "inlen is not supported\n");
- return -EOPNOTSUPP;
- }
-
- if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return -EFAULT;
- }
-
- if (ucmd.comp_mask) {
+ if (ucmd->comp_mask) {
mlx5_ib_dbg(dev, "invalid comp mask\n");
return -EOPNOTSUPP;
}
- if (ucmd.flags & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
- MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
- MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) {
- mlx5_ib_dbg(dev, "invalid flags\n");
- return -EOPNOTSUPP;
- }
-
- if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS &&
- !tunnel_offload_supported(dev->mdev)) {
- mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n");
- return -EOPNOTSUPP;
- }
-
- if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
- !(ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
+ !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
return -EOPNOTSUPP;
}
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->is_rep) {
- lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ if (dev->is_rep)
qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
- }
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
- lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
- qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
- }
+ if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
- err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
- if (err) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return -EINVAL;
- }
+ if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
outlen = MLX5_ST_SZ_BYTES(create_tir_out);
@@ -1753,29 +1618,29 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
- if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
+ if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
- if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER)
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
else
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
- switch (ucmd.rx_hash_function) {
+ switch (ucmd->rx_hash_function) {
case MLX5_RX_HASH_FUNC_TOEPLITZ:
{
void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
- if (len != ucmd.rx_key_len) {
+ if (len != ucmd->rx_key_len) {
err = -EINVAL;
goto err;
}
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
- memcpy(rss_key, ucmd.rx_hash_key, len);
+ memcpy(rss_key, ucmd->rx_hash_key, len);
break;
}
default:
@@ -1783,7 +1648,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
goto err;
}
- if (!ucmd.rx_hash_fields_mask) {
+ if (!ucmd->rx_hash_fields_mask) {
/* special case when this TIR serves as steering entry without hashing */
if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
goto create_tir;
@@ -1791,29 +1656,31 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
goto err;
}
- if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
- ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
+ if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
+ ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
err = -EINVAL;
goto err;
}
/* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
- else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
- outer_l4 = ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) << 0 |
- ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) << 1 |
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
+ outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+ << 0 |
+ ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ << 1 |
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
/* Check that only one l4 protocol is set */
if (outer_l4 & (outer_l4 - 1)) {
@@ -1822,38 +1689,39 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
/* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
- else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
- if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI;
MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
create_tir:
- err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
@@ -1868,73 +1736,43 @@ create_tir:
goto err;
if (mucontext->devx_uid) {
- resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
- resp.tirn = qp->rss_qp.tirn;
+ params->resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ params->resp.tirn = qp->rss_qp.tirn;
if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
- resp.tir_icm_addr =
+ params->resp.tir_icm_addr =
MLX5_GET(create_tir_out, out, icm_address_31_0);
- resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
- icm_address_39_32)
- << 32;
- resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
- icm_address_63_40)
- << 40;
- resp.comp_mask |=
+ params->resp.tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_39_32)
+ << 32;
+ params->resp.tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_63_40)
+ << 40;
+ params->resp.comp_mask |=
MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
}
}
- err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
- if (err)
- goto err_copy;
-
kvfree(in);
/* qpn is reserved for that QP */
qp->trans_qp.base.mqp.qpn = 0;
- qp->flags |= MLX5_IB_QP_RSS;
+ qp->is_rss = true;
return 0;
-err_copy:
- mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, mucontext->devx_uid);
err:
kvfree(in);
return err;
}
-static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
- void *qpc)
-{
- int rcqe_sz;
-
- if (init_attr->qp_type == MLX5_IB_QPT_DCI)
- return;
-
- rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
-
- if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
- if (rcqe_sz == 128)
- MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
-
- return;
- }
-
- MLX5_SET(qpc, qpc, cs_res,
- rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
- MLX5_RES_SCAT_DATA32_CQE);
-}
-
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
struct ib_qp_init_attr *init_attr,
struct mlx5_ib_create_qp *ucmd,
void *qpc)
{
- enum ib_qp_type qpt = init_attr->qp_type;
int scqe_sz;
bool allow_scat_cqe = false;
- if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
- return;
-
if (ucmd)
allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
@@ -1999,269 +1837,182 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev,
return atomic_mode;
}
-static inline bool check_flags_mask(uint64_t input, uint64_t supported)
-{
- return (input & ~supported) == 0;
-}
-
-static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, struct mlx5_ib_qp *qp)
+static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
{
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ struct ib_qp_init_attr *attr = params->attr;
+ u32 uidx = params->uidx;
struct mlx5_ib_resources *devr = &dev->devr;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_ib_create_qp_resp resp = {};
- struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct mlx5_ib_ucontext, ibucontext);
- struct mlx5_ib_cq *send_cq;
- struct mlx5_ib_cq *recv_cq;
- unsigned long flags;
- u32 uidx = MLX5_IB_DEFAULT_UIDX;
- struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_qp_base *base;
- int mlx5_st;
+ unsigned long flags;
void *qpc;
u32 *in;
int err;
mutex_init(&qp->mutex);
- spin_lock_init(&qp->sq.lock);
- spin_lock_init(&qp->rq.lock);
- mlx5_st = to_mlx5_st(init_attr->qp_type);
- if (mlx5_st < 0)
- return -EINVAL;
+ if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
- if (init_attr->rwq_ind_tbl) {
- if (!udata)
- return -ENOSYS;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
- err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata);
- return err;
- }
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
- if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
- if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
- mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
- return -EINVAL;
- } else {
- qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
- }
- }
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(devr->p0)->pdn);
- if (init_attr->create_flags &
- (IB_QP_CREATE_CROSS_CHANNEL |
- IB_QP_CREATE_MANAGED_SEND |
- IB_QP_CREATE_MANAGED_RECV)) {
- if (!MLX5_CAP_GEN(mdev, cd)) {
- mlx5_ib_dbg(dev, "cross-channel isn't supported\n");
- return -EINVAL;
- }
- if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
- qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
- if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
- qp->flags |= MLX5_IB_QP_MANAGED_SEND;
- if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
- qp->flags |= MLX5_IB_QP_MANAGED_RECV;
- }
-
- if (init_attr->qp_type == IB_QPT_UD &&
- (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO))
- if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
- mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n");
- return -EOPNOTSUPP;
- }
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ MLX5_SET(qpc, qpc, cd_master, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
+ MLX5_SET(qpc, qpc, cd_slave_receive, 1);
- if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
- if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
- mlx5_ib_dbg(dev, "Scatter FCS is supported only for Raw Packet QPs");
- return -EOPNOTSUPP;
- }
- if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) ||
- !MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
- mlx5_ib_dbg(dev, "Scatter FCS isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS;
- }
+ MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(attr->xrcd)->xrcdn);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
- if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
- qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
- if (init_attr->create_flags & IB_QP_CREATE_CVLAN_STRIPPING) {
- if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
- MLX5_CAP_ETH(dev->mdev, vlan_cap)) ||
- (init_attr->qp_type != IB_QPT_RAW_PACKET))
- return -EOPNOTSUPP;
- qp->flags |= MLX5_IB_QP_CVLAN_STRIPPING;
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
}
- if (udata) {
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return -EFAULT;
- }
+ base = &qp->trans_qp.base;
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+ kvfree(in);
+ if (err)
+ return err;
- if (!check_flags_mask(ucmd.flags,
- MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
- MLX5_QP_FLAG_BFREG_INDEX |
- MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
- MLX5_QP_FLAG_SCATTER_CQE |
- MLX5_QP_FLAG_SIGNATURE |
- MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
- MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
- MLX5_QP_FLAG_TUNNEL_OFFLOADS |
- MLX5_QP_FLAG_UAR_PAGE_INDEX |
- MLX5_QP_FLAG_TYPE_DCI |
- MLX5_QP_FLAG_TYPE_DCT))
- return -EINVAL;
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
- err = get_qp_user_index(ucontext, &ucmd, udata->inlen, &uidx);
- if (err)
- return err;
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
- qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
- if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
- qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
- if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
- if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
- !tunnel_offload_supported(mdev)) {
- mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags_en |= MLX5_QP_FLAG_TUNNEL_OFFLOADS;
- }
+ qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn;
+ return 0;
+}
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) {
- if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
- mlx5_ib_dbg(dev, "Self-LB UC isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
- }
+static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ struct ib_udata *udata = params->udata;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
- if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
- mlx5_ib_dbg(dev, "Self-LB UM isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
- }
+ mutex_init(&qp->mutex);
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
- if (ucmd.flags & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) {
- if (init_attr->qp_type != IB_QPT_RC ||
- !MLX5_CAP_GEN(dev->mdev, qp_packet_based)) {
- mlx5_ib_dbg(dev, "packet based credit mode isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags |= MLX5_IB_QP_PACKET_BASED_CREDIT;
- }
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
- if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
- if (init_attr->qp_type != IB_QPT_UD ||
- (MLX5_CAP_GEN(dev->mdev, port_type) !=
- MLX5_CAP_PORT_TYPE_IB) ||
- !mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) {
- mlx5_ib_dbg(dev, "Source QP option isn't supported\n");
- return -EOPNOTSUPP;
- }
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
- qp->flags |= MLX5_IB_QP_UNDERLAY;
- qp->underlay_qpn = init_attr->source_qpn;
- }
- } else {
- qp->wq_sig = !!wq_signature;
- }
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
+ qp->underlay_qpn = init_attr->source_qpn;
base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) ?
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
qp->has_rq = qp_has_rq(init_attr);
- err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
- qp, udata ? &ucmd : NULL);
+ err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
return err;
}
- if (pd) {
- if (udata) {
- __u32 max_wqes =
- 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
- mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
- if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
- ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
- mlx5_ib_dbg(dev, "invalid rq params\n");
- return -EINVAL;
- }
- if (ucmd.sq_wqe_count > max_wqes) {
- mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
- ucmd.sq_wqe_count, max_wqes);
- return -EINVAL;
- }
- if (init_attr->create_flags &
- MLX5_IB_QP_CREATE_SQPN_QP1) {
- mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
- return -EINVAL;
- }
- err = create_user_qp(dev, pd, qp, udata, init_attr, &in,
- &resp, &inlen, base);
- if (err)
- mlx5_ib_dbg(dev, "err %d\n", err);
- } else {
- err = create_kernel_qp(dev, init_attr, qp, &in, &inlen,
- base);
- if (err)
- mlx5_ib_dbg(dev, "err %d\n", err);
- }
+ if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
+ ucmd->rq_wqe_count != qp->rq.wqe_cnt)
+ return -EINVAL;
- if (err)
- return err;
- } else {
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
+ if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
+ return -EINVAL;
- qp->create_type = MLX5_QP_EMPTY;
- }
+ err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
+ &inlen, base, ucmd);
+ if (err)
+ return err;
if (is_sqp(init_attr->qp_type))
qp->port = init_attr->port_num;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, mlx5_st);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn);
- if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
- MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
- else
- MLX5_SET(qpc, qpc, latency_sensitive, 1);
-
-
- if (qp->wq_sig)
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
MLX5_SET(qpc, qpc, wq_signature, 1);
- if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
MLX5_SET(qpc, qpc, block_lb_mc, 1);
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
MLX5_SET(qpc, qpc, cd_master, 1);
- if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
MLX5_SET(qpc, qpc, cd_slave_send, 1);
- if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
+ if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
MLX5_SET(qpc, qpc, cd_slave_receive, 1);
- if (qp->flags & MLX5_IB_QP_PACKET_BASED_CREDIT)
+ if (qp->flags_en & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE)
MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1);
- if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
- configure_responder_scat_cqe(init_attr, qpc);
- configure_requester_scat_cqe(dev, init_attr,
- udata ? &ucmd : NULL,
- qpc);
+ if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
+ (init_attr->qp_type == IB_QPT_RC ||
+ init_attr->qp_type == IB_QPT_UC)) {
+ int rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
+
+ MLX5_SET(qpc, qpc, cs_res,
+ rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
+ MLX5_RES_SCAT_DATA32_CQE);
}
+ if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
+ (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
+ configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
if (qp->rq.wqe_cnt) {
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
@@ -2282,12 +2033,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
/* Set default resources */
switch (init_attr->qp_type) {
- case IB_QPT_XRC_TGT:
- MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
- MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
- MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
- MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn);
- break;
case IB_QPT_XRC_INI:
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
@@ -2315,52 +2060,163 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
MLX5_SET(qpc, qpc, user_index, uidx);
- /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
- if (init_attr->qp_type == IB_QPT_UD &&
- (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
- MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
- qp->flags |= MLX5_IB_QP_LSO;
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING &&
+ init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
}
- if (init_attr->create_flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
- if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
- mlx5_ib_dbg(dev, "scatter end padding is not supported\n");
- err = -EOPNOTSUPP;
- goto err;
- } else if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
- MLX5_SET(qpc, qpc, end_padding_mode,
- MLX5_WQ_END_PAD_MODE_ALIGN);
- } else {
- qp->flags |= MLX5_IB_QP_PCI_WRITE_END_PADDING;
- }
+ if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
+ raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
+ err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
+ &params->resp);
+ } else
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+
+ kvfree(in);
+ if (err)
+ goto err_create;
+
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
+
+ get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ return 0;
+
+err_create:
+ destroy_qp(dev, qp, base, udata);
+ return err;
+}
+
+static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *attr = params->attr;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
+
+ mutex_init(&qp->mutex);
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
+
+ if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ base = &qp->trans_qp.base;
+
+ qp->has_rq = qp_has_rq(attr);
+ err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
}
- if (inlen < 0) {
- err = -EINVAL;
- goto err;
+ err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base);
+ if (err)
+ return err;
+
+ if (is_sqp(attr->qp_type))
+ qp->port = attr->port_num;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, mlx5_st);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+
+ if (attr->qp_type != MLX5_IB_QPT_REG_UMR)
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
+ else
+ MLX5_SET(qpc, qpc, latency_sensitive, 1);
+
+
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+
+ if (qp->rq.wqe_cnt) {
+ MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
}
- if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
- qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
- raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
- err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
- &resp);
+ MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr));
+
+ if (qp->sq.wqe_cnt)
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
+ else
+ MLX5_SET(qpc, qpc, no_sq, 1);
+
+ if (attr->srq) {
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(attr->srq)->msrq.srqn);
} else {
- err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(devr->s1)->msrq.srqn);
}
- if (err) {
- mlx5_ib_dbg(dev, "create qp failed\n");
- goto err_create;
- }
+ if (attr->send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn);
+
+ if (attr->recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
+ if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO)
+ MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
+
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
kvfree(in);
+ if (err)
+ goto err_create;
base->container_mibqp = qp;
base->mqp.event = mlx5_ib_qp_event;
- get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq,
+ get_cqs(qp->type, attr->send_cq, attr->recv_cq,
&send_cq, &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx5_ib_lock_cqs(send_cq, recv_cq);
@@ -2380,13 +2236,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return 0;
err_create:
- if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(dev, pd, qp, base, udata);
- else if (qp->create_type == MLX5_QP_KERNEL)
- destroy_qp_kernel(dev, qp);
-
-err:
- kvfree(in);
+ destroy_qp(dev, qp, base, NULL);
return err;
}
@@ -2448,11 +2298,6 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re
}
}
-static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
-{
- return to_mpd(qp->ibqp.pd);
-}
-
static void get_cqs(enum ib_qp_type qp_type,
struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
@@ -2473,14 +2318,10 @@ static void get_cqs(enum ib_qp_type qp_type,
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
- case IB_QPT_RAW_IPV6:
- case IB_QPT_RAW_ETHERTYPE:
case IB_QPT_RAW_PACKET:
*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
*recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
break;
-
- case IB_QPT_MAX:
default:
*send_cq = NULL;
*recv_cq = NULL;
@@ -2506,16 +2347,15 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) ?
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
if (qp->state != IB_QPS_RESET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
- !(qp->flags & MLX5_IB_QP_UNDERLAY)) {
- err = mlx5_core_qp_modify(dev->mdev,
- MLX5_CMD_OP_2RST_QP, 0,
- NULL, &base->mqp);
+ !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
+ err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
+ NULL, &base->mqp, NULL);
} else {
struct mlx5_modify_raw_qp_param raw_qp_param = {
.operation = MLX5_CMD_OP_2RST_QP
@@ -2541,7 +2381,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (recv_cq)
list_del(&qp->cq_recv_list);
- if (qp->create_type == MLX5_QP_KERNEL) {
+ if (!udata) {
__mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
@@ -2552,263 +2392,463 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
destroy_raw_packet_qp(dev, qp);
} else {
- err = mlx5_core_destroy_qp(dev->mdev, &base->mqp);
+ err = mlx5_core_destroy_qp(dev, &base->mqp);
if (err)
mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
base->mqp.qpn);
}
- if (qp->create_type == MLX5_QP_KERNEL)
- destroy_qp_kernel(dev, qp);
- else if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base, udata);
+ destroy_qp(dev, qp, base, udata);
}
-static const char *ib_qp_type_str(enum ib_qp_type type)
+static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
{
- switch (type) {
- case IB_QPT_SMI:
- return "IB_QPT_SMI";
- case IB_QPT_GSI:
- return "IB_QPT_GSI";
+ struct ib_qp_init_attr *attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 uidx = params->uidx;
+ void *dctc;
+
+ qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
+ if (!qp->dct.in)
+ return -ENOMEM;
+
+ MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
+ MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
+ MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
+ MLX5_SET(dctc, dctc, user_index, uidx);
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
+
+ if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) {
+ int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq);
+
+ if (rcqe_sz == 128)
+ MLX5_SET(dctc, dctc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+ }
+
+ qp->state = IB_QPS_RESET;
+
+ return 0;
+}
+
+static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
+ enum ib_qp_type *type)
+{
+ if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct))
+ goto out;
+
+ switch (attr->qp_type) {
+ case IB_QPT_XRC_TGT:
+ case IB_QPT_XRC_INI:
+ if (!MLX5_CAP_GEN(dev->mdev, xrc))
+ goto out;
+ fallthrough;
case IB_QPT_RC:
- return "IB_QPT_RC";
case IB_QPT_UC:
- return "IB_QPT_UC";
- case IB_QPT_UD:
- return "IB_QPT_UD";
- case IB_QPT_RAW_IPV6:
- return "IB_QPT_RAW_IPV6";
- case IB_QPT_RAW_ETHERTYPE:
- return "IB_QPT_RAW_ETHERTYPE";
- case IB_QPT_XRC_INI:
- return "IB_QPT_XRC_INI";
- case IB_QPT_XRC_TGT:
- return "IB_QPT_XRC_TGT";
+ case IB_QPT_SMI:
+ case MLX5_IB_QPT_HW_GSI:
+ case IB_QPT_DRIVER:
+ case IB_QPT_GSI:
+ if (dev->profile == &raw_eth_profile)
+ goto out;
case IB_QPT_RAW_PACKET:
- return "IB_QPT_RAW_PACKET";
+ case IB_QPT_UD:
case MLX5_IB_QPT_REG_UMR:
- return "MLX5_IB_QPT_REG_UMR";
- case IB_QPT_DRIVER:
- return "IB_QPT_DRIVER";
- case IB_QPT_MAX:
+ break;
default:
- return "Invalid QP type";
+ goto out;
}
+
+ *type = attr->qp_type;
+ return 0;
+
+out:
+ mlx5_ib_dbg(dev, "Unsupported QP type %d\n", attr->qp_type);
+ return -EOPNOTSUPP;
}
-static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
- struct ib_qp_init_attr *attr,
- struct mlx5_ib_create_qp *ucmd,
- struct ib_udata *udata)
+static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
{
struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
- struct mlx5_ib_qp *qp;
- int err = 0;
- u32 uidx = MLX5_IB_DEFAULT_UIDX;
- void *dctc;
- if (!attr->srq || !attr->recv_cq)
- return ERR_PTR(-EINVAL);
+ if (!udata) {
+ /* Kernel create_qp callers */
+ if (attr->rwq_ind_tbl)
+ return -EOPNOTSUPP;
- err = get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &uidx);
- if (err)
- return ERR_PTR(err);
+ switch (attr->qp_type) {
+ case IB_QPT_RAW_PACKET:
+ case IB_QPT_DRIVER:
+ return -EOPNOTSUPP;
+ default:
+ return 0;
+ }
+ }
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ /* Userspace create_qp callers */
+ if (attr->qp_type == IB_QPT_RAW_PACKET && !ucontext->cqe_version) {
+ mlx5_ib_dbg(dev,
+ "Raw Packet QP is only supported for CQE version > 0\n");
+ return -EINVAL;
+ }
- qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
- if (!qp->dct.in) {
- err = -ENOMEM;
- goto err_free;
+ if (attr->qp_type != IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) {
+ mlx5_ib_dbg(dev,
+ "Wrong QP type %d for the RWQ indirect table\n",
+ attr->qp_type);
+ return -EINVAL;
}
- MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
- dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
- qp->qp_sub_type = MLX5_IB_QPT_DCT;
- MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
- MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
- MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
- MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
- MLX5_SET(dctc, dctc, user_index, uidx);
+ switch (attr->qp_type) {
+ case IB_QPT_SMI:
+ case MLX5_IB_QPT_HW_GSI:
+ case MLX5_IB_QPT_REG_UMR:
+ case IB_QPT_GSI:
+ mlx5_ib_dbg(dev, "Kernel doesn't support QP type %d\n",
+ attr->qp_type);
+ return -EINVAL;
+ default:
+ break;
+ }
- if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE)
- configure_responder_scat_cqe(attr, dctc);
+ /*
+ * We don't need to see this warning, it means that kernel code
+ * missing ib_pd. Placed here to catch developer's mistakes.
+ */
+ WARN_ONCE(!pd && attr->qp_type != IB_QPT_XRC_TGT,
+ "There is a missing PD pointer assignment\n");
+ return 0;
+}
- qp->state = IB_QPS_RESET;
+static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
+ bool cond, struct mlx5_ib_qp *qp)
+{
+ if (!(*flags & flag))
+ return;
- return &qp->ibqp;
-err_free:
- kfree(qp);
- return ERR_PTR(err);
+ if (cond) {
+ qp->flags_en |= flag;
+ *flags &= ~flag;
+ return;
+ }
+
+ if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
+ /*
+ * We don't return error if this flag was provided,
+ * and mlx5 doesn't have right capability.
+ */
+ *flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
+ return;
+ }
+ mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
}
-static int set_mlx_qp_type(struct mlx5_ib_dev *dev,
- struct ib_qp_init_attr *init_attr,
- struct mlx5_ib_create_qp *ucmd,
- struct ib_udata *udata)
+static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ void *ucmd, struct ib_qp_init_attr *attr)
{
- enum { MLX_QP_FLAGS = MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI };
- int err;
+ struct mlx5_core_dev *mdev = dev->mdev;
+ bool cond;
+ int flags;
- if (!udata)
+ if (attr->rwq_ind_tbl)
+ flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags;
+ else
+ flags = ((struct mlx5_ib_create_qp *)ucmd)->flags;
+
+ switch (flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) {
+ case MLX5_QP_FLAG_TYPE_DCI:
+ qp->type = MLX5_IB_QPT_DCI;
+ break;
+ case MLX5_QP_FLAG_TYPE_DCT:
+ qp->type = MLX5_IB_QPT_DCT;
+ break;
+ default:
+ if (qp->type != IB_QPT_DRIVER)
+ break;
+ /*
+ * It is IB_QPT_DRIVER and or no subtype or
+ * wrong subtype were provided.
+ */
return -EINVAL;
+ }
- if (udata->inlen < sizeof(*ucmd)) {
- mlx5_ib_dbg(dev, "create_qp user command is smaller than expected\n");
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp);
+
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
+ MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+
+ if (qp->type == IB_QPT_RAW_PACKET) {
+ cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_gre) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TUNNEL_OFFLOADS,
+ cond, qp);
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC, true,
+ qp);
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC, true,
+ qp);
+ }
+
+ if (qp->type == IB_QPT_RC)
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE,
+ MLX5_CAP_GEN(mdev, qp_packet_based), qp);
+
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_BFREG_INDEX, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_UAR_PAGE_INDEX, true, qp);
+
+ cond = qp->flags_en & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC);
+ if (attr->rwq_ind_tbl && cond) {
+ mlx5_ib_dbg(dev, "RSS RAW QP has unsupported flags 0x%X\n",
+ cond);
return -EINVAL;
}
- err = ib_copy_from_udata(ucmd, udata, sizeof(*ucmd));
- if (err)
- return err;
- if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCI) {
- init_attr->qp_type = MLX5_IB_QPT_DCI;
- } else {
- if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCT) {
- init_attr->qp_type = MLX5_IB_QPT_DCT;
- } else {
- mlx5_ib_dbg(dev, "Invalid QP flags\n");
- return -EINVAL;
- }
+ if (flags)
+ mlx5_ib_dbg(dev, "udata has unsupported flags 0x%X\n", flags);
+
+ return (flags) ? -EINVAL : 0;
}
- if (!MLX5_CAP_GEN(dev->mdev, dct)) {
- mlx5_ib_dbg(dev, "DC transport is not supported\n");
- return -EOPNOTSUPP;
+static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
+ bool cond, struct mlx5_ib_qp *qp)
+{
+ if (!(*flags & flag))
+ return;
+
+ if (cond) {
+ qp->flags |= flag;
+ *flags &= ~flag;
+ return;
}
- return 0;
+ if (flag == MLX5_IB_QP_CREATE_WC_TEST) {
+ /*
+ * Special case, if condition didn't meet, it won't be error,
+ * just different in-kernel flow.
+ */
+ *flags &= ~MLX5_IB_QP_CREATE_WC_TEST;
+ return;
+ }
+ mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag);
}
-struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *verbs_init_attr,
- struct ib_udata *udata)
+static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_qp_init_attr *attr)
{
- struct mlx5_ib_dev *dev;
- struct mlx5_ib_qp *qp;
- u16 xrcdn = 0;
- int err;
- struct ib_qp_init_attr mlx_init_attr;
- struct ib_qp_init_attr *init_attr = verbs_init_attr;
- struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct mlx5_ib_ucontext, ibucontext);
+ enum ib_qp_type qp_type = qp->type;
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int create_flags = attr->create_flags;
+ bool cond;
- if (pd) {
- dev = to_mdev(pd->device);
+ if (qp->type == IB_QPT_UD && dev->profile == &raw_eth_profile)
+ if (create_flags & ~MLX5_IB_QP_CREATE_WC_TEST)
+ return -EINVAL;
- if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
- if (!ucontext) {
- mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n");
- return ERR_PTR(-EINVAL);
- } else if (!ucontext->cqe_version) {
- mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n");
- return ERR_PTR(-EINVAL);
- }
- }
- } else {
- /* being cautious here */
- if (init_attr->qp_type != IB_QPT_XRC_TGT &&
- init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
- pr_warn("%s: no PD for transport %s\n", __func__,
- ib_qp_type_str(init_attr->qp_type));
- return ERR_PTR(-EINVAL);
- }
- dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
+ if (qp_type == MLX5_IB_QPT_DCT)
+ return (create_flags) ? -EINVAL : 0;
+
+ if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
+ return (create_flags) ? -EINVAL : 0;
+
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
+ MLX5_CAP_GEN(mdev, block_lb_mc), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_CROSS_CHANNEL,
+ MLX5_CAP_GEN(mdev, cd), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_SEND,
+ MLX5_CAP_GEN(mdev, cd), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_RECV,
+ MLX5_CAP_GEN(mdev, cd), qp);
+
+ if (qp_type == IB_QPT_UD) {
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_IPOIB_UD_LSO,
+ MLX5_CAP_GEN(mdev, ipoib_basic_offloads),
+ qp);
+ cond = MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_IB;
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_SOURCE_QPN,
+ cond, qp);
+ }
+
+ if (qp_type == IB_QPT_RAW_PACKET) {
+ cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(mdev, scatter_fcs);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_SCATTER_FCS, cond, qp);
+
+ cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(mdev, vlan_cap);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_CVLAN_STRIPPING, cond, qp);
+ }
+
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_PCI_WRITE_END_PADDING,
+ MLX5_CAP_GEN(mdev, end_pad), qp);
+
+ process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_WC_TEST,
+ qp_type != MLX5_IB_QPT_REG_UMR, qp);
+ process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1,
+ true, qp);
+
+ if (create_flags)
+ mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n",
+ create_flags);
+
+ return (create_flags) ? -EINVAL : 0;
+}
+
+static int process_udata_size(struct mlx5_ib_dev *dev,
+ struct mlx5_create_qp_params *params)
+{
+ size_t ucmd = sizeof(struct mlx5_ib_create_qp);
+ struct ib_udata *udata = params->udata;
+ size_t outlen = udata->outlen;
+ size_t inlen = udata->inlen;
+
+ params->outlen = min(outlen, sizeof(struct mlx5_ib_create_qp_resp));
+ params->ucmd_size = ucmd;
+ if (!params->is_rss_raw) {
+ /* User has old rdma-core, which doesn't support ECE */
+ size_t min_inlen =
+ offsetof(struct mlx5_ib_create_qp, ece_options);
+
+ /*
+ * We will check in check_ucmd_data() that user
+ * cleared everything after inlen.
+ */
+ params->inlen = (inlen < min_inlen) ? 0 : min(inlen, ucmd);
+ goto out;
}
- if (init_attr->qp_type == IB_QPT_DRIVER) {
- struct mlx5_ib_create_qp ucmd;
+ /* RSS RAW QP */
+ if (inlen < offsetofend(struct mlx5_ib_create_qp_rss, flags))
+ return -EINVAL;
- init_attr = &mlx_init_attr;
- memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr));
- err = set_mlx_qp_type(dev, init_attr, &ucmd, udata);
- if (err)
- return ERR_PTR(err);
+ if (outlen < offsetofend(struct mlx5_ib_create_qp_resp, bfreg_index))
+ return -EINVAL;
- if (init_attr->qp_type == MLX5_IB_QPT_DCI) {
- if (init_attr->cap.max_recv_wr ||
- init_attr->cap.max_recv_sge) {
- mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n");
- return ERR_PTR(-EINVAL);
- }
- } else {
- return mlx5_ib_create_dct(pd, init_attr, &ucmd, udata);
- }
+ ucmd = sizeof(struct mlx5_ib_create_qp_rss);
+ params->ucmd_size = ucmd;
+ if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd))
+ return -EINVAL;
+
+ params->inlen = min(ucmd, inlen);
+out:
+ if (!params->inlen)
+ mlx5_ib_dbg(dev, "udata is too small\n");
+
+ return (params->inlen) ? 0 : -EINVAL;
+}
+
+static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ int err;
+
+ if (params->is_rss_raw) {
+ err = create_rss_raw_qp_tir(dev, pd, qp, params);
+ goto out;
}
- switch (init_attr->qp_type) {
- case IB_QPT_XRC_TGT:
- case IB_QPT_XRC_INI:
- if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
- mlx5_ib_dbg(dev, "XRC not supported\n");
- return ERR_PTR(-ENOSYS);
- }
- init_attr->recv_cq = NULL;
- if (init_attr->qp_type == IB_QPT_XRC_TGT) {
- xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
- init_attr->send_cq = NULL;
- }
+ if (qp->type == MLX5_IB_QPT_DCT) {
+ err = create_dct(dev, pd, qp, params);
+ goto out;
+ }
- /* fall through */
- case IB_QPT_RAW_PACKET:
- case IB_QPT_RC:
- case IB_QPT_UC:
- case IB_QPT_UD:
- case IB_QPT_SMI:
- case MLX5_IB_QPT_HW_GSI:
- case MLX5_IB_QPT_REG_UMR:
- case MLX5_IB_QPT_DCI:
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ if (qp->type == IB_QPT_XRC_TGT) {
+ err = create_xrc_tgt_qp(dev, qp, params);
+ goto out;
+ }
- err = create_qp_common(dev, pd, init_attr, udata, qp);
- if (err) {
- mlx5_ib_dbg(dev, "create_qp_common failed\n");
- kfree(qp);
- return ERR_PTR(err);
- }
+ if (params->udata)
+ err = create_user_qp(dev, pd, qp, params);
+ else
+ err = create_kernel_qp(dev, pd, qp, params);
- if (is_qp0(init_attr->qp_type))
- qp->ibqp.qp_num = 0;
- else if (is_qp1(init_attr->qp_type))
- qp->ibqp.qp_num = 1;
- else
- qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
+out:
+ if (err) {
+ mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type);
+ return err;
+ }
- mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
- qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
- init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
- init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
+ if (is_qp0(qp->type))
+ qp->ibqp.qp_num = 0;
+ else if (is_qp1(qp->type))
+ qp->ibqp.qp_num = 1;
+ else
+ qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
- qp->trans_qp.xrcdn = xrcdn;
+ mlx5_ib_dbg(dev,
+ "QP type %d, ib qpn 0x%X, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x, ece 0x%x\n",
+ qp->type, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
+ params->attr->recv_cq ? to_mcq(params->attr->recv_cq)->mcq.cqn :
+ -1,
+ params->attr->send_cq ? to_mcq(params->attr->send_cq)->mcq.cqn :
+ -1,
+ params->resp.ece_options);
- break;
+ return 0;
+}
- case IB_QPT_GSI:
- return mlx5_ib_gsi_create_qp(pd, init_attr);
+static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_qp_init_attr *attr)
+{
+ int ret = 0;
- case IB_QPT_RAW_IPV6:
- case IB_QPT_RAW_ETHERTYPE:
- case IB_QPT_MAX:
+ switch (qp->type) {
+ case MLX5_IB_QPT_DCT:
+ ret = (!attr->srq || !attr->recv_cq) ? -EINVAL : 0;
+ break;
+ case MLX5_IB_QPT_DCI:
+ ret = (attr->cap.max_recv_wr || attr->cap.max_recv_sge) ?
+ -EINVAL :
+ 0;
+ break;
+ case IB_QPT_RAW_PACKET:
+ ret = (attr->rwq_ind_tbl && attr->send_cq) ? -EINVAL : 0;
+ break;
default:
- mlx5_ib_dbg(dev, "unsupported qp type %d\n",
- init_attr->qp_type);
- /* Don't support raw QPs */
- return ERR_PTR(-EOPNOTSUPP);
+ break;
}
- if (verbs_init_attr->qp_type == IB_QPT_DRIVER)
- qp->qp_sub_type = init_attr->qp_type;
+ if (ret)
+ mlx5_ib_dbg(dev, "QP type %d has wrong attributes\n", qp->type);
- return &qp->ibqp;
+ return ret;
+}
+
+static int get_qp_uidx(struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ struct ib_udata *udata = params->udata;
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ if (params->is_rss_raw)
+ return 0;
+
+ return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &params->uidx);
}
static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
@@ -2818,7 +2858,7 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
if (mqp->state == IB_QPS_RTR) {
int err;
- err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct);
+ err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct);
if (err) {
mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
return err;
@@ -2830,6 +2870,150 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
return 0;
}
+static int check_ucmd_data(struct mlx5_ib_dev *dev,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *attr = params->attr;
+ struct ib_udata *udata = params->udata;
+ size_t size, last;
+ int ret;
+
+ if (params->is_rss_raw)
+ /*
+ * These QPs don't have "reserved" field in their
+ * create_qp input struct, so their data is always valid.
+ */
+ last = sizeof(struct mlx5_ib_create_qp_rss);
+ else
+ /* IB_QPT_RAW_PACKET doesn't have ECE data */
+ switch (attr->qp_type) {
+ case IB_QPT_RAW_PACKET:
+ last = offsetof(struct mlx5_ib_create_qp, ece_options);
+ break;
+ default:
+ last = offsetof(struct mlx5_ib_create_qp, reserved);
+ }
+
+ if (udata->inlen <= last)
+ return 0;
+
+ /*
+ * User provides different create_qp structures based on the
+ * flow and we need to know if he cleared memory after our
+ * struct create_qp ends.
+ */
+ size = udata->inlen - last;
+ ret = ib_is_udata_cleared(params->udata, last, size);
+ if (!ret)
+ mlx5_ib_dbg(
+ dev,
+ "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n",
+ udata->inlen, params->ucmd_size, last, size);
+ return ret ? 0 : -EINVAL;
+}
+
+struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_create_qp_params params = {};
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_qp *qp;
+ enum ib_qp_type type;
+ int err;
+
+ dev = pd ? to_mdev(pd->device) :
+ to_mdev(to_mxrcd(attr->xrcd)->ibxrcd.device);
+
+ err = check_qp_type(dev, attr, &type);
+ if (err)
+ return ERR_PTR(err);
+
+ err = check_valid_flow(dev, pd, attr, udata);
+ if (err)
+ return ERR_PTR(err);
+
+ if (attr->qp_type == IB_QPT_GSI)
+ return mlx5_ib_gsi_create_qp(pd, attr);
+
+ params.udata = udata;
+ params.uidx = MLX5_IB_DEFAULT_UIDX;
+ params.attr = attr;
+ params.is_rss_raw = !!attr->rwq_ind_tbl;
+
+ if (udata) {
+ err = process_udata_size(dev, &params);
+ if (err)
+ return ERR_PTR(err);
+
+ err = check_ucmd_data(dev, &params);
+ if (err)
+ return ERR_PTR(err);
+
+ params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL);
+ if (!params.ucmd)
+ return ERR_PTR(-ENOMEM);
+
+ err = ib_copy_from_udata(params.ucmd, udata, params.inlen);
+ if (err)
+ goto free_ucmd;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ err = -ENOMEM;
+ goto free_ucmd;
+ }
+
+ qp->type = type;
+ if (udata) {
+ err = process_vendor_flags(dev, qp, params.ucmd, attr);
+ if (err)
+ goto free_qp;
+
+ err = get_qp_uidx(qp, &params);
+ if (err)
+ goto free_qp;
+ }
+ err = process_create_flags(dev, qp, attr);
+ if (err)
+ goto free_qp;
+
+ err = check_qp_attr(dev, qp, attr);
+ if (err)
+ goto free_qp;
+
+ err = create_qp(dev, pd, qp, &params);
+ if (err)
+ goto free_qp;
+
+ kfree(params.ucmd);
+ params.ucmd = NULL;
+
+ if (udata)
+ /*
+ * It is safe to copy response for all user create QP flows,
+ * including MLX5_IB_QPT_DCT, which doesn't need it.
+ * In that case, resp will be filled with zeros.
+ */
+ err = ib_copy_to_udata(udata, &params.resp, params.outlen);
+ if (err)
+ goto destroy_qp;
+
+ return &qp->ibqp;
+
+destroy_qp:
+ if (qp->type == MLX5_IB_QPT_DCT)
+ mlx5_ib_destroy_dct(qp);
+ else
+ destroy_qp_common(dev, qp, udata);
+ qp = NULL;
+free_qp:
+ kfree(qp);
+free_ucmd:
+ kfree(params.ucmd);
+ return ERR_PTR(err);
+}
+
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
@@ -2838,7 +3022,7 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
if (unlikely(qp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_destroy_qp(qp);
- if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
+ if (mqp->type == MLX5_IB_QPT_DCT)
return mlx5_ib_destroy_dct(mqp);
destroy_qp_common(dev, mqp, udata);
@@ -2848,14 +3032,13 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
return 0;
}
-static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
- const struct ib_qp_attr *attr,
- int attr_mask, __be32 *hw_access_flags_be)
+static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ void *qpc)
{
- u8 dest_rd_atomic;
- u32 access_flags, hw_access_flags = 0;
-
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+ u8 dest_rd_atomic;
+ u32 access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
dest_rd_atomic = attr->max_dest_rd_atomic;
@@ -2870,8 +3053,8 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
if (!dest_rd_atomic)
access_flags &= IB_ACCESS_REMOTE_WRITE;
- if (access_flags & IB_ACCESS_REMOTE_READ)
- hw_access_flags |= MLX5_QP_BIT_RRE;
+ MLX5_SET(qpc, qpc, rre, !!(access_flags & IB_ACCESS_REMOTE_READ));
+
if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
int atomic_mode;
@@ -2879,15 +3062,11 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
if (atomic_mode < 0)
return -EOPNOTSUPP;
- hw_access_flags |= MLX5_QP_BIT_RAE;
- hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET;
+ MLX5_SET(qpc, qpc, rae, 1);
+ MLX5_SET(qpc, qpc, atomic_mode, atomic_mode);
}
- if (access_flags & IB_ACCESS_REMOTE_WRITE)
- hw_access_flags |= MLX5_QP_BIT_RWE;
-
- *hw_access_flags_be = cpu_to_be32(hw_access_flags);
-
+ MLX5_SET(qpc, qpc, rwe, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
return 0;
}
@@ -2933,7 +3112,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
- err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
+ err = mlx5_core_modify_tis(dev, sq->tisn, in);
kvfree(in);
@@ -2960,18 +3139,29 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
- err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
+ err = mlx5_core_modify_tis(dev, sq->tisn, in);
kvfree(in);
return err;
}
+static void mlx5_set_path_udp_sport(void *path, const struct rdma_ah_attr *ah,
+ u32 lqpn, u32 rqpn)
+
+{
+ u32 fl = ah->grh.flow_label;
+
+ if (!fl)
+ fl = rdma_calc_flow_label(lqpn, rqpn);
+
+ MLX5_SET(ads, path, udp_sport, rdma_flow_label_to_udp_sport(fl));
+}
+
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- const struct rdma_ah_attr *ah,
- struct mlx5_qp_path *path, u8 port, int attr_mask,
- u32 path_flags, const struct ib_qp_attr *attr,
- bool alt)
+ const struct rdma_ah_attr *ah, void *path, u8 port,
+ int attr_mask, u32 path_flags,
+ const struct ib_qp_attr *attr, bool alt)
{
const struct ib_global_route *grh = rdma_ah_read_grh(ah);
int err;
@@ -2980,8 +3170,8 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u8 sl = rdma_ah_get_sl(ah);
if (attr_mask & IB_QP_PKEY_INDEX)
- path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
- attr->pkey_index);
+ MLX5_SET(ads, path, pkey_index,
+ alt ? attr->alt_pkey_index : attr->pkey_index);
if (ah_flags & IB_AH_GRH) {
if (grh->sgid_index >=
@@ -2997,45 +3187,49 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!(ah_flags & IB_AH_GRH))
return -EINVAL;
- memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
- if (qp->ibqp.qp_type == IB_QPT_RC ||
- qp->ibqp.qp_type == IB_QPT_UC ||
- qp->ibqp.qp_type == IB_QPT_XRC_INI ||
- qp->ibqp.qp_type == IB_QPT_XRC_TGT)
- path->udp_sport =
- mlx5_get_roce_udp_sport(dev, ah->grh.sgid_attr);
- path->dci_cfi_prio_sl = (sl & 0x7) << 4;
+ ether_addr_copy(MLX5_ADDR_OF(ads, path, rmac_47_32),
+ ah->roce.dmac);
+ if ((qp->ibqp.qp_type == IB_QPT_RC ||
+ qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
+ (grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
+ (attr_mask & IB_QP_DEST_QPN))
+ mlx5_set_path_udp_sport(path, ah,
+ qp->ibqp.qp_num,
+ attr->dest_qp_num);
+ MLX5_SET(ads, path, eth_prio, sl & 0x7);
gid_type = ah->grh.sgid_attr->gid_type;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
- path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
+ MLX5_SET(ads, path, dscp, grh->traffic_class >> 2);
} else {
- path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
- path->fl_free_ar |=
- (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
- path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
- path->grh_mlid = rdma_ah_get_path_bits(ah) & 0x7f;
- if (ah_flags & IB_AH_GRH)
- path->grh_mlid |= 1 << 7;
- path->dci_cfi_prio_sl = sl & 0xf;
+ MLX5_SET(ads, path, fl, !!(path_flags & MLX5_PATH_FLAG_FL));
+ MLX5_SET(ads, path, free_ar,
+ !!(path_flags & MLX5_PATH_FLAG_FREE_AR));
+ MLX5_SET(ads, path, rlid, rdma_ah_get_dlid(ah));
+ MLX5_SET(ads, path, mlid, rdma_ah_get_path_bits(ah));
+ MLX5_SET(ads, path, grh, !!(ah_flags & IB_AH_GRH));
+ MLX5_SET(ads, path, sl, sl);
}
if (ah_flags & IB_AH_GRH) {
- path->mgid_index = grh->sgid_index;
- path->hop_limit = grh->hop_limit;
- path->tclass_flowlabel =
- cpu_to_be32((grh->traffic_class << 20) |
- (grh->flow_label));
- memcpy(path->rgid, grh->dgid.raw, 16);
+ MLX5_SET(ads, path, src_addr_index, grh->sgid_index);
+ MLX5_SET(ads, path, hop_limit, grh->hop_limit);
+ MLX5_SET(ads, path, tclass, grh->traffic_class);
+ MLX5_SET(ads, path, flow_label, grh->flow_label);
+ memcpy(MLX5_ADDR_OF(ads, path, rgid_rip), grh->dgid.raw,
+ sizeof(grh->dgid.raw));
}
err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
if (err < 0)
return err;
- path->static_rate = err;
- path->port = port;
+ MLX5_SET(ads, path, stat_rate, err);
+ MLX5_SET(ads, path, vhca_port_num, port);
if (attr_mask & IB_QP_TIMEOUT)
- path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
+ MLX5_SET(ads, path, ack_timeout,
+ alt ? attr->alt_timeout : attr->timeout);
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
@@ -3052,10 +3246,12 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
- MLX5_QP_OPTPAR_PRI_PORT,
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
- MLX5_QP_OPTPAR_PRI_PORT,
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_PRI_PORT,
@@ -3063,17 +3259,20 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
- MLX5_QP_OPTPAR_PRI_PORT,
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
},
[MLX5_QP_STATE_RTR] = {
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PKEY_INDEX,
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PKEY_INDEX,
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
@@ -3082,7 +3281,8 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PKEY_INDEX,
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
},
},
[MLX5_QP_STATE_RTR] = {
@@ -3240,7 +3440,7 @@ static int modify_raw_packet_qp_rq(
"RAW PACKET QP counters are not supported on current FW\n");
}
- err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
+ err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in);
if (err)
goto out;
@@ -3303,7 +3503,7 @@ static int modify_raw_packet_qp_sq(
MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
}
- err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
+ err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in);
if (err) {
/* Remove new rate from table if failed */
if (new_rate_added)
@@ -3416,43 +3616,80 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return 0;
}
-static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
- struct mlx5_ib_pd *pd,
- struct mlx5_ib_qp_base *qp_base,
- u8 port_num, struct ib_udata *udata)
+static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
+ struct ib_udata *udata)
{
struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
- unsigned int tx_port_affinity;
+ u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ atomic_t *tx_port_affinity;
+
+ if (ucontext)
+ tx_port_affinity = &ucontext->tx_port_affinity;
+ else
+ tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
+
+ return (unsigned int)atomic_add_return(1, tx_port_affinity) %
+ MLX5_MAX_PORTS + 1;
+}
+
+static bool qp_supports_affinity(struct ib_qp *qp)
+{
+ if ((qp->qp_type == IB_QPT_RC) ||
+ (qp->qp_type == IB_QPT_UD) ||
+ (qp->qp_type == IB_QPT_UC) ||
+ (qp->qp_type == IB_QPT_RAW_PACKET) ||
+ (qp->qp_type == IB_QPT_XRC_INI) ||
+ (qp->qp_type == IB_QPT_XRC_TGT))
+ return true;
+ return false;
+}
+
+static unsigned int get_tx_affinity(struct ib_qp *qp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, u8 init,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_qp_base *qp_base;
+ unsigned int tx_affinity;
+
+ if (!(mlx5_ib_lag_should_assign_affinity(dev) &&
+ qp_supports_affinity(qp)))
+ return 0;
+
+ if (mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
+ tx_affinity = mqp->gsi_lag_port;
+ else if (init)
+ tx_affinity = get_tx_affinity_rr(dev, udata);
+ else if ((attr_mask & IB_QP_AV) && attr->xmit_slave)
+ tx_affinity =
+ mlx5_lag_get_slave_port(dev->mdev, attr->xmit_slave);
+ else
+ return 0;
- if (ucontext) {
- tx_port_affinity = (unsigned int)atomic_add_return(
- 1, &ucontext->tx_port_affinity) %
- MLX5_MAX_PORTS +
- 1;
+ qp_base = &mqp->trans_qp.base;
+ if (ucontext)
mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
- tx_port_affinity, qp_base->mqp.qpn, ucontext);
- } else {
- tx_port_affinity =
- (unsigned int)atomic_add_return(
- 1, &dev->port[port_num].roce.tx_port_affinity) %
- MLX5_MAX_PORTS +
- 1;
+ tx_affinity, qp_base->mqp.qpn, ucontext);
+ else
mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
- tx_port_affinity, qp_base->mqp.qpn);
- }
-
- return tx_port_affinity;
+ tx_affinity, qp_base->mqp.qpn);
+ return tx_affinity;
}
static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
struct rdma_counter *counter)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ u32 in[MLX5_ST_SZ_DW(rts2rts_qp_in)] = {};
struct mlx5_ib_qp *mqp = to_mqp(qp);
- struct mlx5_qp_context context = {};
struct mlx5_ib_qp_base *base;
u32 set_id;
+ u32 *qpc;
if (counter)
set_id = counter->id;
@@ -3460,12 +3697,15 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
base = &mqp->trans_qp.base;
- context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff);
- context.qp_counter_set_usr_page |= cpu_to_be32(set_id << 24);
- return mlx5_core_qp_modify(dev->mdev,
- MLX5_CMD_OP_RTS2RTS_QP,
- MLX5_QP_OPTPAR_COUNTER_SET_ID,
- &context, &base->mqp);
+ MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
+ MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
+ MLX5_SET(rts2rts_qp_in, in, uid, base->mqp.uid);
+ MLX5_SET(rts2rts_qp_in, in, opt_param_mask,
+ MLX5_QP_OPTPAR_COUNTER_SET_ID);
+
+ qpc = MLX5_ADDR_OF(rts2rts_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, counter_set_id, set_id);
+ return mlx5_cmd_exec_in(dev->mdev, rts2rts_qp, in);
}
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
@@ -3473,6 +3713,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
enum ib_qp_state cur_state,
enum ib_qp_state new_state,
const struct mlx5_ib_modify_qp *ucmd,
+ struct mlx5_ib_modify_qp_resp *resp,
struct ib_udata *udata)
{
static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
@@ -3516,67 +3757,60 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_ib_cq *send_cq, *recv_cq;
- struct mlx5_qp_context *context;
struct mlx5_ib_pd *pd;
enum mlx5_qp_state mlx5_cur, mlx5_new;
- enum mlx5_qp_optpar optpar;
+ void *qpc, *pri_path, *alt_path;
+ enum mlx5_qp_optpar optpar = 0;
u32 set_id = 0;
int mlx5_st;
int err;
u16 op;
u8 tx_affinity = 0;
- mlx5_st = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
- qp->qp_sub_type : ibqp->qp_type);
+ mlx5_st = to_mlx5_st(qp->type);
if (mlx5_st < 0)
return -EINVAL;
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
+ qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
+ if (!qpc)
return -ENOMEM;
- pd = get_pd(qp);
- context->flags = cpu_to_be32(mlx5_st << 16);
+ pd = to_mpd(qp->ibqp.pd);
+ MLX5_SET(qpc, qpc, st, mlx5_st);
if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
- context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
} else {
switch (attr->path_mig_state) {
case IB_MIG_MIGRATED:
- context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
break;
case IB_MIG_REARM:
- context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_REARM);
break;
case IB_MIG_ARMED:
- context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_ARMED);
break;
}
}
- if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
- if ((ibqp->qp_type == IB_QPT_RC) ||
- (ibqp->qp_type == IB_QPT_UD &&
- !(qp->flags & MLX5_IB_QP_SQPN_QP1)) ||
- (ibqp->qp_type == IB_QPT_UC) ||
- (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
- (ibqp->qp_type == IB_QPT_XRC_INI) ||
- (ibqp->qp_type == IB_QPT_XRC_TGT)) {
- if (dev->lag_active) {
- u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
- tx_affinity = get_tx_affinity(dev, pd, base, p,
- udata);
- context->flags |= cpu_to_be32(tx_affinity << 24);
- }
- }
- }
+ tx_affinity = get_tx_affinity(ibqp, attr, attr_mask,
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT, udata);
+
+ MLX5_SET(qpc, qpc, lag_tx_port_affinity, tx_affinity);
+ if (tx_affinity && new_state == IB_QPS_RTR &&
+ MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity))
+ optpar |= MLX5_QP_OPTPAR_LAG_TX_AFF;
if (is_sqp(ibqp->qp_type)) {
- context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_256);
+ MLX5_SET(qpc, qpc, log_msg_max, 8);
} else if ((ibqp->qp_type == IB_QPT_UD &&
- !(qp->flags & MLX5_IB_QP_UNDERLAY)) ||
+ !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) ||
ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
- context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_4096);
+ MLX5_SET(qpc, qpc, log_msg_max, 12);
} else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > IB_MTU_4096) {
@@ -3584,40 +3818,45 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
err = -EINVAL;
goto out;
}
- context->mtu_msgmax = (attr->path_mtu << 5) |
- (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
+ MLX5_SET(qpc, qpc, mtu, attr->path_mtu);
+ MLX5_SET(qpc, qpc, log_msg_max,
+ MLX5_CAP_GEN(dev->mdev, log_max_msg));
}
if (attr_mask & IB_QP_DEST_QPN)
- context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
+ MLX5_SET(qpc, qpc, remote_qpn, attr->dest_qp_num);
+
+ pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
if (attr_mask & IB_QP_PKEY_INDEX)
- context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
+ MLX5_SET(ads, pri_path, pkey_index, attr->pkey_index);
/* todo implement counter_index functionality */
if (is_sqp(ibqp->qp_type))
- context->pri_path.port = qp->port;
+ MLX5_SET(ads, pri_path, vhca_port_num, qp->port);
if (attr_mask & IB_QP_PORT)
- context->pri_path.port = attr->port_num;
+ MLX5_SET(ads, pri_path, vhca_port_num, attr->port_num);
if (attr_mask & IB_QP_AV) {
- err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
- attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
+ err = mlx5_set_path(dev, qp, &attr->ah_attr, pri_path,
+ attr_mask & IB_QP_PORT ? attr->port_num :
+ qp->port,
attr_mask, 0, attr, false);
if (err)
goto out;
}
if (attr_mask & IB_QP_TIMEOUT)
- context->pri_path.ackto_lt |= attr->timeout << 3;
+ MLX5_SET(ads, pri_path, ack_timeout, attr->timeout);
if (attr_mask & IB_QP_ALT_PATH) {
- err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
- &context->alt_path,
+ err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, alt_path,
attr->alt_port_num,
- attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
+ attr_mask | IB_QP_PKEY_INDEX |
+ IB_QP_TIMEOUT,
0, attr, true);
if (err)
goto out;
@@ -3626,75 +3865,68 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
&send_cq, &recv_cq);
- context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
- context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
- context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
- context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
+ MLX5_SET(qpc, qpc, pd, pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
+ if (send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd, send_cq->mcq.cqn);
+ if (recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv, recv_cq->mcq.cqn);
+
+ MLX5_SET(qpc, qpc, log_ack_req_freq, MLX5_IB_ACK_REQ_FREQ);
if (attr_mask & IB_QP_RNR_RETRY)
- context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
+ MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
if (attr_mask & IB_QP_RETRY_CNT)
- context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
+ MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
- if (attr->max_rd_atomic)
- context->params1 |=
- cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
- }
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic)
+ MLX5_SET(qpc, qpc, log_sra_max, ilog2(attr->max_rd_atomic));
if (attr_mask & IB_QP_SQ_PSN)
- context->next_send_psn = cpu_to_be32(attr->sq_psn);
+ MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn);
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- if (attr->max_dest_rd_atomic)
- context->params2 |=
- cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
- }
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic)
+ MLX5_SET(qpc, qpc, log_rra_max,
+ ilog2(attr->max_dest_rd_atomic));
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
- __be32 access_flags;
-
- err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags);
+ err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);
if (err)
goto out;
-
- context->params2 |= access_flags;
}
if (attr_mask & IB_QP_MIN_RNR_TIMER)
- context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
+ MLX5_SET(qpc, qpc, min_rnr_nak, attr->min_rnr_timer);
if (attr_mask & IB_QP_RQ_PSN)
- context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
+ MLX5_SET(qpc, qpc, next_rcv_psn, attr->rq_psn);
if (attr_mask & IB_QP_QKEY)
- context->qkey = cpu_to_be32(attr->qkey);
+ MLX5_SET(qpc, qpc, q_key, attr->qkey);
if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- context->db_rec_addr = cpu_to_be64(qp->db.dma);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
qp->port) - 1;
/* Underlay port should be used - index 0 function per port */
- if (qp->flags & MLX5_IB_QP_UNDERLAY)
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
port_num = 0;
if (ibqp->counter)
set_id = ibqp->counter->id;
else
set_id = mlx5_ib_get_counters_id(dev, port_num);
- context->qp_counter_set_usr_page |=
- cpu_to_be32(set_id << 24);
+ MLX5_SET(qpc, qpc, counter_set_id, set_id);
}
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- context->sq_crq_size |= cpu_to_be16(1 << 4);
+ MLX5_SET(qpc, qpc, rlky, 1);
- if (qp->flags & MLX5_IB_QP_SQPN_QP1)
- context->deth_sqpn = cpu_to_be32(1);
+ if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
+ MLX5_SET(qpc, qpc, deth_sqpn, 1);
mlx5_cur = to_mlx5_state(cur_state);
mlx5_new = to_mlx5_state(new_state);
@@ -3706,11 +3938,11 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
op = optab[mlx5_cur][mlx5_new];
- optpar = ib_mask_to_mlx5_opt(attr_mask);
+ optpar |= ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
struct mlx5_modify_raw_qp_param raw_qp_param = {};
raw_qp_param.operation = op;
@@ -3752,8 +3984,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
} else {
- err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
- &base->mqp);
+ if (udata) {
+ /* For the kernel flows, the resp will stay zero */
+ resp->ece_options =
+ MLX5_CAP_GEN(dev->mdev, ece_support) ?
+ ucmd->ece_options : 0;
+ resp->response_length = sizeof(*resp);
+ }
+ err = mlx5_core_qp_modify(dev, op, optpar, qpc, &base->mqp,
+ &resp->ece_options);
}
if (err)
@@ -3800,7 +4039,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
out:
- kfree(context);
+ kfree(qpc);
return err;
}
@@ -3858,7 +4097,8 @@ static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new
* Other transitions and attributes are illegal
*/
static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
+ int attr_mask, struct mlx5_ib_modify_qp *ucmd,
+ struct ib_udata *udata)
{
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
@@ -3874,6 +4114,15 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr->qp_state;
dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options)
+ /*
+ * DCT doesn't initialize QP till modify command is executed,
+ * so we need to overwrite previously set ECE field if user
+ * provided any value except zero, which means not set/not
+ * valid.
+ */
+ MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
+
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u16 set_id;
@@ -3906,17 +4155,23 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
MLX5_SET(dctc, dctc, counter_set_id, set_id);
-
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {};
- u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
- u32 min_resp_len = offsetof(typeof(resp), dctn) +
- sizeof(resp.dctn);
+ u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
+ u32 min_resp_len = offsetofend(typeof(resp), dctn);
if (udata->outlen < min_resp_len)
return -EINVAL;
resp.response_length = min_resp_len;
+ /*
+ * If we don't have enough space for the ECE options,
+ * simply indicate it with resp.response_length.
+ */
+ resp.response_length = (udata->outlen < sizeof(resp)) ?
+ min_resp_len :
+ sizeof(resp);
+
required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
if (!is_valid_mask(attr_mask, required, 0))
return -EINVAL;
@@ -3927,15 +4182,17 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
- err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
+ err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
MLX5_ST_SZ_BYTES(create_dct_in), out,
sizeof(out));
if (err)
return err;
resp.dctn = qp->dct.mdct.mqp.qpn;
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ resp.ece_options = MLX5_GET(create_dct_out, out, ece);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err) {
- mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct);
+ mlx5_core_destroy_dct(dev, &qp->dct.mdct);
return err;
}
} else {
@@ -3953,11 +4210,11 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_modify_qp_resp resp = {};
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_modify_qp ucmd = {};
enum ib_qp_type qp_type;
enum ib_qp_state cur_state, new_state;
- size_t required_cmd_sz;
int err = -EINVAL;
int port;
@@ -3965,9 +4222,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return -ENOSYS;
if (udata && udata->inlen) {
- required_cmd_sz = offsetof(typeof(ucmd), reserved) +
- sizeof(ucmd.reserved);
- if (udata->inlen < required_cmd_sz)
+ if (udata->inlen < offsetofend(typeof(ucmd), ece_options))
return -EINVAL;
if (udata->inlen > sizeof(ucmd) &&
@@ -3980,23 +4235,20 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return -EFAULT;
if (ucmd.comp_mask ||
- memchr_inv(&ucmd.reserved, 0, sizeof(ucmd.reserved)) ||
memchr_inv(&ucmd.burst_info.reserved, 0,
sizeof(ucmd.burst_info.reserved)))
return -EOPNOTSUPP;
+
}
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
- if (ibqp->qp_type == IB_QPT_DRIVER)
- qp_type = qp->qp_sub_type;
- else
- qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
- IB_QPT_GSI : ibqp->qp_type;
+ qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ? IB_QPT_GSI :
+ qp->type;
if (qp_type == MLX5_IB_QPT_DCT)
- return mlx5_ib_modify_dct(ibqp, attr, attr_mask, udata);
+ return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
mutex_lock(&qp->mutex);
@@ -4007,7 +4259,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
}
- if (qp->flags & MLX5_IB_QP_UNDERLAY) {
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN) {
if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
attr_mask);
@@ -4067,1439 +4319,19 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
- new_state, &ucmd, udata);
-
-out:
- mutex_unlock(&qp->mutex);
- return err;
-}
-
-static void _handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
- u32 wqe_sz, void **cur_edge)
-{
- u32 idx;
-
- idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
- *cur_edge = get_sq_edge(sq, idx);
-
- *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
-}
-
-/* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
- * next nearby edge and get new address translation for current WQE position.
- * @sq - SQ buffer.
- * @seg: Current WQE position (16B aligned).
- * @wqe_sz: Total current WQE size [16B].
- * @cur_edge: Updated current edge.
- */
-static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
- u32 wqe_sz, void **cur_edge)
-{
- if (likely(*seg != *cur_edge))
- return;
-
- _handle_post_send_edge(sq, seg, wqe_sz, cur_edge);
-}
-
-/* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
- * pointers. At the end @seg is aligned to 16B regardless the copied size.
- * @sq - SQ buffer.
- * @cur_edge: Updated current edge.
- * @seg: Current WQE position (16B aligned).
- * @wqe_sz: Total current WQE size [16B].
- * @src: Pointer to copy from.
- * @n: Number of bytes to copy.
- */
-static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
- void **seg, u32 *wqe_sz, const void *src,
- size_t n)
-{
- while (likely(n)) {
- size_t leftlen = *cur_edge - *seg;
- size_t copysz = min_t(size_t, leftlen, n);
- size_t stride;
-
- memcpy(*seg, src, copysz);
-
- n -= copysz;
- src += copysz;
- stride = !n ? ALIGN(copysz, 16) : copysz;
- *seg += stride;
- *wqe_sz += stride >> 4;
- handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
- }
-}
-
-static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
-{
- struct mlx5_ib_cq *cq;
- unsigned cur;
-
- cur = wq->head - wq->tail;
- if (likely(cur + nreq < wq->max_post))
- return 0;
-
- cq = to_mcq(ib_cq);
- spin_lock(&cq->lock);
- cur = wq->head - wq->tail;
- spin_unlock(&cq->lock);
-
- return cur + nreq >= wq->max_post;
-}
-
-static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
- u64 remote_addr, u32 rkey)
-{
- rseg->raddr = cpu_to_be64(remote_addr);
- rseg->rkey = cpu_to_be32(rkey);
- rseg->reserved = 0;
-}
-
-static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
- void **seg, int *size, void **cur_edge)
-{
- struct mlx5_wqe_eth_seg *eseg = *seg;
-
- memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
-
- if (wr->send_flags & IB_SEND_IP_CSUM)
- eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
-
- if (wr->opcode == IB_WR_LSO) {
- struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
- size_t left, copysz;
- void *pdata = ud_wr->header;
- size_t stride;
-
- left = ud_wr->hlen;
- eseg->mss = cpu_to_be16(ud_wr->mss);
- eseg->inline_hdr.sz = cpu_to_be16(left);
-
- /* memcpy_send_wqe should get a 16B align address. Hence, we
- * first copy up to the current edge and then, if needed,
- * fall-through to memcpy_send_wqe.
- */
- copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
- left);
- memcpy(eseg->inline_hdr.start, pdata, copysz);
- stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
- sizeof(eseg->inline_hdr.start) + copysz, 16);
- *size += stride / 16;
- *seg += stride;
-
- if (copysz < left) {
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
- left -= copysz;
- pdata += copysz;
- memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata,
- left);
- }
-
- return;
- }
-
- *seg += sizeof(struct mlx5_wqe_eth_seg);
- *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
-}
-
-static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
- const struct ib_send_wr *wr)
-{
- memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
- dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
- dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
-}
-
-static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
-{
- dseg->byte_count = cpu_to_be32(sg->length);
- dseg->lkey = cpu_to_be32(sg->lkey);
- dseg->addr = cpu_to_be64(sg->addr);
-}
-
-static u64 get_xlt_octo(u64 bytes)
-{
- return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
- MLX5_IB_UMR_OCTOWORD;
-}
-
-static __be64 frwr_mkey_mask(bool atomic)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_EN_RINVAL |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_SMALL_FENCE |
- MLX5_MKEY_MASK_FREE;
-
- if (atomic)
- result |= MLX5_MKEY_MASK_A;
-
- return cpu_to_be64(result);
-}
-
-static __be64 sig_mkey_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_EN_SIGERR |
- MLX5_MKEY_MASK_EN_RINVAL |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_SMALL_FENCE |
- MLX5_MKEY_MASK_FREE |
- MLX5_MKEY_MASK_BSF_EN;
-
- return cpu_to_be64(result);
-}
-
-static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct mlx5_ib_mr *mr, u8 flags, bool atomic)
-{
- int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
-
- memset(umr, 0, sizeof(*umr));
-
- umr->flags = flags;
- umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
- umr->mkey_mask = frwr_mkey_mask(atomic);
-}
-
-static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
-{
- memset(umr, 0, sizeof(*umr));
- umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
- umr->flags = MLX5_UMR_INLINE;
-}
-
-static __be64 get_umr_enable_mr_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_FREE;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_disable_mr_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_FREE;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_update_translation_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_update_access_mask(int atomic)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW;
-
- if (atomic)
- result |= MLX5_MKEY_MASK_A;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_update_pd_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_PD;
-
- return cpu_to_be64(result);
-}
-
-static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
-{
- if ((mask & MLX5_MKEY_MASK_PAGE_SIZE &&
- MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
- (mask & MLX5_MKEY_MASK_A &&
- MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
- return -EPERM;
- return 0;
-}
-
-static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
- struct mlx5_wqe_umr_ctrl_seg *umr,
- const struct ib_send_wr *wr, int atomic)
-{
- const struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
- memset(umr, 0, sizeof(*umr));
-
- if (!umrwr->ignore_free_state) {
- if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
- /* fail if free */
- umr->flags = MLX5_UMR_CHECK_FREE;
- else
- /* fail if not free */
- umr->flags = MLX5_UMR_CHECK_NOT_FREE;
- }
-
- umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
- u64 offset = get_xlt_octo(umrwr->offset);
-
- umr->xlt_offset = cpu_to_be16(offset & 0xffff);
- umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
- umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
- }
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
- umr->mkey_mask |= get_umr_update_translation_mask();
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
- umr->mkey_mask |= get_umr_update_access_mask(atomic);
- umr->mkey_mask |= get_umr_update_pd_mask();
- }
- if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
- umr->mkey_mask |= get_umr_enable_mr_mask();
- if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
- umr->mkey_mask |= get_umr_disable_mr_mask();
-
- if (!wr->num_sge)
- umr->flags |= MLX5_UMR_INLINE;
-
- return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
-}
-
-static u8 get_umr_flags(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
- (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
- (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
- MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
-}
-
-static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
- struct mlx5_ib_mr *mr,
- u32 key, int access)
-{
- int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
-
- memset(seg, 0, sizeof(*seg));
-
- if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
- seg->log2_page_size = ilog2(mr->ibmr.page_size);
- else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
- /* KLMs take twice the size of MTTs */
- ndescs *= 2;
-
- seg->flags = get_umr_flags(access) | mr->access_mode;
- seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
- seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
- seg->start_addr = cpu_to_be64(mr->ibmr.iova);
- seg->len = cpu_to_be64(mr->ibmr.length);
- seg->xlt_oct_size = cpu_to_be32(ndescs);
-}
-
-static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
-{
- memset(seg, 0, sizeof(*seg));
- seg->status = MLX5_MKEY_STATUS_FREE;
-}
-
-static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
- const struct ib_send_wr *wr)
-{
- const struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
- memset(seg, 0, sizeof(*seg));
- if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
- seg->status = MLX5_MKEY_STATUS_FREE;
-
- seg->flags = convert_access(umrwr->access_flags);
- if (umrwr->pd)
- seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
- !umrwr->length)
- seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
-
- seg->start_addr = cpu_to_be64(umrwr->virt_addr);
- seg->len = cpu_to_be64(umrwr->length);
- seg->log2_page_size = umrwr->page_shift;
- seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
- mlx5_mkey_variant(umrwr->mkey));
-}
-
-static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
- struct mlx5_ib_mr *mr,
- struct mlx5_ib_pd *pd)
-{
- int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
-
- dseg->addr = cpu_to_be64(mr->desc_map);
- dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
- dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
-}
-
-static __be32 send_ieth(const struct ib_send_wr *wr)
-{
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- return wr->ex.imm_data;
-
- case IB_WR_SEND_WITH_INV:
- return cpu_to_be32(wr->ex.invalidate_rkey);
-
- default:
- return 0;
- }
-}
-
-static u8 calc_sig(void *wqe, int size)
-{
- u8 *p = wqe;
- u8 res = 0;
- int i;
-
- for (i = 0; i < size; i++)
- res ^= p[i];
-
- return ~res;
-}
-
-static u8 wq_sig(void *wqe)
-{
- return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
-}
-
-static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
- void **wqe, int *wqe_sz, void **cur_edge)
-{
- struct mlx5_wqe_inline_seg *seg;
- size_t offset;
- int inl = 0;
- int i;
-
- seg = *wqe;
- *wqe += sizeof(*seg);
- offset = sizeof(*seg);
-
- for (i = 0; i < wr->num_sge; i++) {
- size_t len = wr->sg_list[i].length;
- void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
-
- inl += len;
-
- if (unlikely(inl > qp->max_inline_data))
- return -ENOMEM;
-
- while (likely(len)) {
- size_t leftlen;
- size_t copysz;
+ new_state, &ucmd, &resp, udata);
- handle_post_send_edge(&qp->sq, wqe,
- *wqe_sz + (offset >> 4),
- cur_edge);
-
- leftlen = *cur_edge - *wqe;
- copysz = min_t(size_t, leftlen, len);
-
- memcpy(*wqe, addr, copysz);
- len -= copysz;
- addr += copysz;
- *wqe += copysz;
- offset += copysz;
- }
- }
-
- seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
-
- *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
-
- return 0;
-}
-
-static u16 prot_field_size(enum ib_signature_type type)
-{
- switch (type) {
- case IB_SIG_TYPE_T10_DIF:
- return MLX5_DIF_SIZE;
- default:
- return 0;
- }
-}
-
-static u8 bs_selector(int block_size)
-{
- switch (block_size) {
- case 512: return 0x1;
- case 520: return 0x2;
- case 4096: return 0x3;
- case 4160: return 0x4;
- case 1073741824: return 0x5;
- default: return 0;
- }
-}
-
-static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
- struct mlx5_bsf_inl *inl)
-{
- /* Valid inline section and allow BSF refresh */
- inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
- MLX5_BSF_REFRESH_DIF);
- inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
- inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
- /* repeating block */
- inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
- inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
- MLX5_DIF_CRC : MLX5_DIF_IPCS;
-
- if (domain->sig.dif.ref_remap)
- inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
-
- if (domain->sig.dif.app_escape) {
- if (domain->sig.dif.ref_escape)
- inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
- else
- inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
- }
-
- inl->dif_app_bitmask_check =
- cpu_to_be16(domain->sig.dif.apptag_check_mask);
-}
-
-static int mlx5_set_bsf(struct ib_mr *sig_mr,
- struct ib_sig_attrs *sig_attrs,
- struct mlx5_bsf *bsf, u32 data_size)
-{
- struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
- struct mlx5_bsf_basic *basic = &bsf->basic;
- struct ib_sig_domain *mem = &sig_attrs->mem;
- struct ib_sig_domain *wire = &sig_attrs->wire;
-
- memset(bsf, 0, sizeof(*bsf));
-
- /* Basic + Extended + Inline */
- basic->bsf_size_sbs = 1 << 7;
- /* Input domain check byte mask */
- basic->check_byte_mask = sig_attrs->check_mask;
- basic->raw_data_size = cpu_to_be32(data_size);
-
- /* Memory domain */
- switch (sig_attrs->mem.sig_type) {
- case IB_SIG_TYPE_NONE:
- break;
- case IB_SIG_TYPE_T10_DIF:
- basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
- basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
- mlx5_fill_inl_bsf(mem, &bsf->m_inl);
- break;
- default:
- return -EINVAL;
- }
-
- /* Wire domain */
- switch (sig_attrs->wire.sig_type) {
- case IB_SIG_TYPE_NONE:
- break;
- case IB_SIG_TYPE_T10_DIF:
- if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
- mem->sig_type == wire->sig_type) {
- /* Same block structure */
- basic->bsf_size_sbs |= 1 << 4;
- if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
- basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
- if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
- basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
- if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
- basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
- } else
- basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
-
- basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
- mlx5_fill_inl_bsf(wire, &bsf->w_inl);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int set_sig_data_segment(const struct ib_send_wr *send_wr,
- struct ib_mr *sig_mr,
- struct ib_sig_attrs *sig_attrs,
- struct mlx5_ib_qp *qp, void **seg, int *size,
- void **cur_edge)
-{
- struct mlx5_bsf *bsf;
- u32 data_len;
- u32 data_key;
- u64 data_va;
- u32 prot_len = 0;
- u32 prot_key = 0;
- u64 prot_va = 0;
- bool prot = false;
- int ret;
- int wqe_size;
- struct mlx5_ib_mr *mr = to_mmr(sig_mr);
- struct mlx5_ib_mr *pi_mr = mr->pi_mr;
-
- data_len = pi_mr->data_length;
- data_key = pi_mr->ibmr.lkey;
- data_va = pi_mr->data_iova;
- if (pi_mr->meta_ndescs) {
- prot_len = pi_mr->meta_length;
- prot_key = pi_mr->ibmr.lkey;
- prot_va = pi_mr->pi_iova;
- prot = true;
- }
-
- if (!prot || (data_key == prot_key && data_va == prot_va &&
- data_len == prot_len)) {
- /**
- * Source domain doesn't contain signature information
- * or data and protection are interleaved in memory.
- * So need construct:
- * ------------------
- * | data_klm |
- * ------------------
- * | BSF |
- * ------------------
- **/
- struct mlx5_klm *data_klm = *seg;
-
- data_klm->bcount = cpu_to_be32(data_len);
- data_klm->key = cpu_to_be32(data_key);
- data_klm->va = cpu_to_be64(data_va);
- wqe_size = ALIGN(sizeof(*data_klm), 64);
- } else {
- /**
- * Source domain contains signature information
- * So need construct a strided block format:
- * ---------------------------
- * | stride_block_ctrl |
- * ---------------------------
- * | data_klm |
- * ---------------------------
- * | prot_klm |
- * ---------------------------
- * | BSF |
- * ---------------------------
- **/
- struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
- struct mlx5_stride_block_entry *data_sentry;
- struct mlx5_stride_block_entry *prot_sentry;
- u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
- int prot_size;
-
- sblock_ctrl = *seg;
- data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
- prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
-
- prot_size = prot_field_size(sig_attrs->mem.sig_type);
- if (!prot_size) {
- pr_err("Bad block size given: %u\n", block_size);
- return -EINVAL;
- }
- sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
- prot_size);
- sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
- sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
- sblock_ctrl->num_entries = cpu_to_be16(2);
-
- data_sentry->bcount = cpu_to_be16(block_size);
- data_sentry->key = cpu_to_be32(data_key);
- data_sentry->va = cpu_to_be64(data_va);
- data_sentry->stride = cpu_to_be16(block_size);
-
- prot_sentry->bcount = cpu_to_be16(prot_size);
- prot_sentry->key = cpu_to_be32(prot_key);
- prot_sentry->va = cpu_to_be64(prot_va);
- prot_sentry->stride = cpu_to_be16(prot_size);
-
- wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
- sizeof(*prot_sentry), 64);
- }
-
- *seg += wqe_size;
- *size += wqe_size / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- bsf = *seg;
- ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
- if (ret)
- return -EINVAL;
-
- *seg += sizeof(*bsf);
- *size += sizeof(*bsf) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- return 0;
-}
-
-static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
- struct ib_mr *sig_mr, int access_flags,
- u32 size, u32 length, u32 pdn)
-{
- u32 sig_key = sig_mr->rkey;
- u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
-
- memset(seg, 0, sizeof(*seg));
-
- seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
- seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
- seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
- MLX5_MKEY_BSF_EN | pdn);
- seg->len = cpu_to_be64(length);
- seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
- seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
-}
-
-static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
- u32 size)
-{
- memset(umr, 0, sizeof(*umr));
-
- umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
- umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
- umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
- umr->mkey_mask = sig_mkey_mask();
-}
-
-static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
- struct mlx5_ib_qp *qp, void **seg, int *size,
- void **cur_edge)
-{
- const struct ib_reg_wr *wr = reg_wr(send_wr);
- struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
- struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
- struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
- u32 pdn = get_pd(qp)->pdn;
- u32 xlt_size;
- int region_len, ret;
-
- if (unlikely(send_wr->num_sge != 0) ||
- unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
- unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
- unlikely(!sig_mr->sig->sig_status_checked))
- return -EINVAL;
-
- /* length of the protected region, data + protection */
- region_len = pi_mr->ibmr.length;
-
- /**
- * KLM octoword size - if protection was provided
- * then we use strided block format (3 octowords),
- * else we use single KLM (1 octoword)
- **/
- if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
- xlt_size = 0x30;
- else
- xlt_size = sizeof(struct mlx5_klm);
-
- set_sig_umr_segment(*seg, xlt_size);
- *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
- pdn);
- *seg += sizeof(struct mlx5_mkey_seg);
- *size += sizeof(struct mlx5_mkey_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
- cur_edge);
- if (ret)
- return ret;
-
- sig_mr->sig->sig_status_checked = false;
- return 0;
-}
-
-static int set_psv_wr(struct ib_sig_domain *domain,
- u32 psv_idx, void **seg, int *size)
-{
- struct mlx5_seg_set_psv *psv_seg = *seg;
-
- memset(psv_seg, 0, sizeof(*psv_seg));
- psv_seg->psv_num = cpu_to_be32(psv_idx);
- switch (domain->sig_type) {
- case IB_SIG_TYPE_NONE:
- break;
- case IB_SIG_TYPE_T10_DIF:
- psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
- domain->sig.dif.app_tag);
- psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
- break;
- default:
- pr_err("Bad signature type (%d) is given.\n",
- domain->sig_type);
- return -EINVAL;
- }
-
- *seg += sizeof(*psv_seg);
- *size += sizeof(*psv_seg) / 16;
-
- return 0;
-}
-
-static int set_reg_wr(struct mlx5_ib_qp *qp,
- const struct ib_reg_wr *wr,
- void **seg, int *size, void **cur_edge,
- bool check_not_free)
-{
- struct mlx5_ib_mr *mr = to_mmr(wr->mr);
- struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
- struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
- int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
- bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
- bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
- u8 flags = 0;
-
- if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
- mlx5_ib_warn(to_mdev(qp->ibqp.device),
- "Fast update of %s for MR is disabled\n",
- (MLX5_CAP_GEN(dev->mdev,
- umr_modify_entity_size_disabled)) ?
- "entity size" :
- "atomic access");
- return -EINVAL;
- }
-
- if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
- mlx5_ib_warn(to_mdev(qp->ibqp.device),
- "Invalid IB_SEND_INLINE send flag\n");
- return -EINVAL;
- }
-
- if (check_not_free)
- flags |= MLX5_UMR_CHECK_NOT_FREE;
- if (umr_inline)
- flags |= MLX5_UMR_INLINE;
-
- set_reg_umr_seg(*seg, mr, flags, atomic);
- *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
- *seg += sizeof(struct mlx5_mkey_seg);
- *size += sizeof(struct mlx5_mkey_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- if (umr_inline) {
- memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
- mr_list_size);
- *size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
- } else {
- set_reg_data_seg(*seg, mr, pd);
- *seg += sizeof(struct mlx5_wqe_data_seg);
- *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
- }
- return 0;
-}
-
-static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
- void **cur_edge)
-{
- set_linv_umr_seg(*seg);
- *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
- set_linv_mkey_seg(*seg);
- *seg += sizeof(struct mlx5_mkey_seg);
- *size += sizeof(struct mlx5_mkey_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-}
-
-static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
-{
- __be32 *p = NULL;
- int i, j;
-
- pr_debug("dump WQE index %u:\n", idx);
- for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
- if ((i & 0xf) == 0) {
- p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
- pr_debug("WQBB at %p:\n", (void *)p);
- j = 0;
- idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
- }
- pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
- be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
- be32_to_cpu(p[j + 3]));
- }
-}
-
-static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
- struct mlx5_wqe_ctrl_seg **ctrl,
- const struct ib_send_wr *wr, unsigned int *idx,
- int *size, void **cur_edge, int nreq,
- bool send_signaled, bool solicited)
-{
- if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
- return -ENOMEM;
-
- *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
- *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
- *ctrl = *seg;
- *(uint32_t *)(*seg + 8) = 0;
- (*ctrl)->imm = send_ieth(wr);
- (*ctrl)->fm_ce_se = qp->sq_signal_bits |
- (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
- (solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
-
- *seg += sizeof(**ctrl);
- *size = sizeof(**ctrl) / 16;
- *cur_edge = qp->sq.cur_edge;
-
- return 0;
-}
-
-static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
- struct mlx5_wqe_ctrl_seg **ctrl,
- const struct ib_send_wr *wr, unsigned *idx,
- int *size, void **cur_edge, int nreq)
-{
- return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
- wr->send_flags & IB_SEND_SIGNALED,
- wr->send_flags & IB_SEND_SOLICITED);
-}
-
-static void finish_wqe(struct mlx5_ib_qp *qp,
- struct mlx5_wqe_ctrl_seg *ctrl,
- void *seg, u8 size, void *cur_edge,
- unsigned int idx, u64 wr_id, int nreq, u8 fence,
- u32 mlx5_opcode)
-{
- u8 opmod = 0;
-
- ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
- mlx5_opcode | ((u32)opmod << 24));
- ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
- ctrl->fm_ce_se |= fence;
- if (unlikely(qp->wq_sig))
- ctrl->signature = wq_sig(ctrl);
-
- qp->sq.wrid[idx] = wr_id;
- qp->sq.w_list[idx].opcode = mlx5_opcode;
- qp->sq.wqe_head[idx] = qp->sq.head + nreq;
- qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
- qp->sq.w_list[idx].next = qp->sq.cur_post;
-
- /* We save the edge which was possibly updated during the WQE
- * construction, into SQ's cache.
- */
- seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
- qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
- get_sq_edge(&qp->sq, qp->sq.cur_post &
- (qp->sq.wqe_cnt - 1)) :
- cur_edge;
-}
-
-static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr, bool drain)
-{
- struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
- struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx5_core_dev *mdev = dev->mdev;
- struct ib_reg_wr reg_pi_wr;
- struct mlx5_ib_qp *qp;
- struct mlx5_ib_mr *mr;
- struct mlx5_ib_mr *pi_mr;
- struct mlx5_ib_mr pa_pi_mr;
- struct ib_sig_attrs *sig_attrs;
- struct mlx5_wqe_xrc_seg *xrc;
- struct mlx5_bf *bf;
- void *cur_edge;
- int uninitialized_var(size);
- unsigned long flags;
- unsigned idx;
- int err = 0;
- int num_sge;
- void *seg;
- int nreq;
- int i;
- u8 next_fence = 0;
- u8 fence;
-
- if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
- !drain)) {
- *bad_wr = wr;
- return -EIO;
- }
-
- if (unlikely(ibqp->qp_type == IB_QPT_GSI))
- return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
-
- qp = to_mqp(ibqp);
- bf = &qp->bf;
-
- spin_lock_irqsave(&qp->sq.lock, flags);
-
- for (nreq = 0; wr; nreq++, wr = wr->next) {
- if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
- mlx5_ib_warn(dev, "\n");
- err = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- num_sge = wr->num_sge;
- if (unlikely(num_sge > qp->sq.max_gs)) {
- mlx5_ib_warn(dev, "\n");
- err = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
- nreq);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- if (wr->opcode == IB_WR_REG_MR ||
- wr->opcode == IB_WR_REG_MR_INTEGRITY) {
- fence = dev->umr_fence;
- next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
- } else {
- if (wr->send_flags & IB_SEND_FENCE) {
- if (qp->next_fence)
- fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
- else
- fence = MLX5_FENCE_MODE_FENCE;
- } else {
- fence = qp->next_fence;
- }
- }
-
- switch (ibqp->qp_type) {
- case IB_QPT_XRC_INI:
- xrc = seg;
- seg += sizeof(*xrc);
- size += sizeof(*xrc) / 16;
- /* fall through */
- case IB_QPT_RC:
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
- rdma_wr(wr)->rkey);
- seg += sizeof(struct mlx5_wqe_raddr_seg);
- size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
- err = -ENOSYS;
- *bad_wr = wr;
- goto out;
-
- case IB_WR_LOCAL_INV:
- qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
- ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
- set_linv_wr(qp, &seg, &size, &cur_edge);
- num_sge = 0;
- break;
-
- case IB_WR_REG_MR:
- qp->sq.wr_data[idx] = IB_WR_REG_MR;
- ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
- err = set_reg_wr(qp, reg_wr(wr), &seg, &size,
- &cur_edge, true);
- if (err) {
- *bad_wr = wr;
- goto out;
- }
- num_sge = 0;
- break;
-
- case IB_WR_REG_MR_INTEGRITY:
- qp->sq.wr_data[idx] = IB_WR_REG_MR_INTEGRITY;
-
- mr = to_mmr(reg_wr(wr)->mr);
- pi_mr = mr->pi_mr;
-
- if (pi_mr) {
- memset(&reg_pi_wr, 0,
- sizeof(struct ib_reg_wr));
-
- reg_pi_wr.mr = &pi_mr->ibmr;
- reg_pi_wr.access = reg_wr(wr)->access;
- reg_pi_wr.key = pi_mr->ibmr.rkey;
-
- ctrl->imm = cpu_to_be32(reg_pi_wr.key);
- /* UMR for data + prot registration */
- err = set_reg_wr(qp, &reg_pi_wr, &seg,
- &size, &cur_edge,
- false);
- if (err) {
- *bad_wr = wr;
- goto out;
- }
- finish_wqe(qp, ctrl, seg, size,
- cur_edge, idx, wr->wr_id,
- nreq, fence,
- MLX5_OPCODE_UMR);
-
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, &cur_edge,
- nreq);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
- } else {
- memset(&pa_pi_mr, 0,
- sizeof(struct mlx5_ib_mr));
- /* No UMR, use local_dma_lkey */
- pa_pi_mr.ibmr.lkey =
- mr->ibmr.pd->local_dma_lkey;
-
- pa_pi_mr.ndescs = mr->ndescs;
- pa_pi_mr.data_length = mr->data_length;
- pa_pi_mr.data_iova = mr->data_iova;
- if (mr->meta_ndescs) {
- pa_pi_mr.meta_ndescs =
- mr->meta_ndescs;
- pa_pi_mr.meta_length =
- mr->meta_length;
- pa_pi_mr.pi_iova = mr->pi_iova;
- }
-
- pa_pi_mr.ibmr.length = mr->ibmr.length;
- mr->pi_mr = &pa_pi_mr;
- }
- ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
- /* UMR for sig MR */
- err = set_pi_umr_wr(wr, qp, &seg, &size,
- &cur_edge);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
- wr->wr_id, nreq, fence,
- MLX5_OPCODE_UMR);
-
- /*
- * SET_PSV WQEs are not signaled and solicited
- * on error
- */
- sig_attrs = mr->ibmr.sig_attrs;
- err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
- &size, &cur_edge, nreq, false,
- true);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
- err = set_psv_wr(&sig_attrs->mem,
- mr->sig->psv_memory.psv_idx,
- &seg, &size);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
- wr->wr_id, nreq, next_fence,
- MLX5_OPCODE_SET_PSV);
-
- err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
- &size, &cur_edge, nreq, false,
- true);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
- err = set_psv_wr(&sig_attrs->wire,
- mr->sig->psv_wire.psv_idx,
- &seg, &size);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
- wr->wr_id, nreq, next_fence,
- MLX5_OPCODE_SET_PSV);
-
- qp->next_fence =
- MLX5_FENCE_MODE_INITIATOR_SMALL;
- num_sge = 0;
- goto skip_psv;
-
- default:
- break;
- }
- break;
-
- case IB_QPT_UC:
- switch (wr->opcode) {
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
- rdma_wr(wr)->rkey);
- seg += sizeof(struct mlx5_wqe_raddr_seg);
- size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
- break;
-
- default:
- break;
- }
- break;
-
- case IB_QPT_SMI:
- if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
- mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
- err = -EPERM;
- *bad_wr = wr;
- goto out;
- }
- /* fall through */
- case MLX5_IB_QPT_HW_GSI:
- set_datagram_seg(seg, wr);
- seg += sizeof(struct mlx5_wqe_datagram_seg);
- size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
- handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
-
- break;
- case IB_QPT_UD:
- set_datagram_seg(seg, wr);
- seg += sizeof(struct mlx5_wqe_datagram_seg);
- size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
- handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
-
- /* handle qp that supports ud offload */
- if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
- struct mlx5_wqe_eth_pad *pad;
-
- pad = seg;
- memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
- seg += sizeof(struct mlx5_wqe_eth_pad);
- size += sizeof(struct mlx5_wqe_eth_pad) / 16;
- set_eth_seg(wr, qp, &seg, &size, &cur_edge);
- handle_post_send_edge(&qp->sq, &seg, size,
- &cur_edge);
- }
- break;
- case MLX5_IB_QPT_REG_UMR:
- if (wr->opcode != MLX5_IB_WR_UMR) {
- err = -EINVAL;
- mlx5_ib_warn(dev, "bad opcode\n");
- goto out;
- }
- qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
- ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
- err = set_reg_umr_segment(dev, seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
- if (unlikely(err))
- goto out;
- seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
- set_reg_mkey_segment(seg, wr);
- seg += sizeof(struct mlx5_mkey_seg);
- size += sizeof(struct mlx5_mkey_seg) / 16;
- handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
- break;
-
- default:
- break;
- }
-
- if (wr->send_flags & IB_SEND_INLINE && num_sge) {
- err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
- if (unlikely(err)) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- } else {
- for (i = 0; i < num_sge; i++) {
- handle_post_send_edge(&qp->sq, &seg, size,
- &cur_edge);
- if (likely(wr->sg_list[i].length)) {
- set_data_ptr_seg
- ((struct mlx5_wqe_data_seg *)seg,
- wr->sg_list + i);
- size += sizeof(struct mlx5_wqe_data_seg) / 16;
- seg += sizeof(struct mlx5_wqe_data_seg);
- }
- }
- }
-
- qp->next_fence = next_fence;
- finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq,
- fence, mlx5_ib_opcode[wr->opcode]);
-skip_psv:
- if (0)
- dump_wqe(qp, idx, size);
- }
-
-out:
- if (likely(nreq)) {
- qp->sq.head += nreq;
-
- /* Make sure that descriptors are written before
- * updating doorbell record and ringing the doorbell
- */
- wmb();
-
- qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
-
- /* Make sure doorbell record is visible to the HCA before
- * we hit doorbell */
- wmb();
-
- mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
- /* Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order.
- */
- bf->offset ^= bf->buf_size;
- }
-
- spin_unlock_irqrestore(&qp->sq.lock, flags);
-
- return err;
-}
-
-int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
-{
- return _mlx5_ib_post_send(ibqp, wr, bad_wr, false);
-}
-
-static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
-{
- sig->signature = calc_sig(sig, size);
-}
-
-static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr, bool drain)
-{
- struct mlx5_ib_qp *qp = to_mqp(ibqp);
- struct mlx5_wqe_data_seg *scat;
- struct mlx5_rwqe_sig *sig;
- struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx5_core_dev *mdev = dev->mdev;
- unsigned long flags;
- int err = 0;
- int nreq;
- int ind;
- int i;
-
- if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
- !drain)) {
- *bad_wr = wr;
- return -EIO;
- }
-
- if (unlikely(ibqp->qp_type == IB_QPT_GSI))
- return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
-
- spin_lock_irqsave(&qp->rq.lock, flags);
-
- ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
-
- for (nreq = 0; wr; nreq++, wr = wr->next) {
- if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- if (unlikely(wr->num_sge > qp->rq.max_gs)) {
- err = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
- if (qp->wq_sig)
- scat++;
-
- for (i = 0; i < wr->num_sge; i++)
- set_data_ptr_seg(scat + i, wr->sg_list + i);
-
- if (i < qp->rq.max_gs) {
- scat[i].byte_count = 0;
- scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
- scat[i].addr = 0;
- }
-
- if (qp->wq_sig) {
- sig = (struct mlx5_rwqe_sig *)scat;
- set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
- }
-
- qp->rq.wrid[ind] = wr->wr_id;
-
- ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
- }
+ /* resp.response_length is set in ECE supported flows only */
+ if (!err && resp.response_length &&
+ udata->outlen >= resp.response_length)
+ /* Return -EFAULT to the user and expect him to destroy QP. */
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
out:
- if (likely(nreq)) {
- qp->rq.head += nreq;
-
- /* Make sure that descriptors are written before
- * doorbell record.
- */
- wmb();
-
- *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
- }
-
- spin_unlock_irqrestore(&qp->rq.lock, flags);
-
+ mutex_unlock(&qp->mutex);
return err;
}
-int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- return _mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
-}
-
static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
{
switch (mlx5_state) {
@@ -5525,50 +4357,35 @@ static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
}
}
-static int to_ib_qp_access_flags(int mlx5_flags)
-{
- int ib_flags = 0;
-
- if (mlx5_flags & MLX5_QP_BIT_RRE)
- ib_flags |= IB_ACCESS_REMOTE_READ;
- if (mlx5_flags & MLX5_QP_BIT_RWE)
- ib_flags |= IB_ACCESS_REMOTE_WRITE;
- if (mlx5_flags & MLX5_QP_BIT_RAE)
- ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
-
- return ib_flags;
-}
-
static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
- struct rdma_ah_attr *ah_attr,
- struct mlx5_qp_path *path)
+ struct rdma_ah_attr *ah_attr, void *path)
{
+ int port = MLX5_GET(ads, path, vhca_port_num);
+ int static_rate;
memset(ah_attr, 0, sizeof(*ah_attr));
- if (!path->port || path->port > ibdev->num_ports)
+ if (!port || port > ibdev->num_ports)
return;
- ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
+ ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port);
- rdma_ah_set_port_num(ah_attr, path->port);
- rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
+ rdma_ah_set_port_num(ah_attr, port);
+ rdma_ah_set_sl(ah_attr, MLX5_GET(ads, path, sl));
- rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
- rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
- rdma_ah_set_static_rate(ah_attr,
- path->static_rate ? path->static_rate - 5 : 0);
+ rdma_ah_set_dlid(ah_attr, MLX5_GET(ads, path, rlid));
+ rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
- if (path->grh_mlid & (1 << 7) ||
+ static_rate = MLX5_GET(ads, path, stat_rate);
+ rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
+ if (MLX5_GET(ads, path, grh) ||
ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
- u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
-
- rdma_ah_set_grh(ah_attr, NULL,
- tc_fl & 0xfffff,
- path->mgid_index,
- path->hop_limit,
- (tc_fl >> 20) & 0xff);
- rdma_ah_set_dgid_raw(ah_attr, path->rgid);
+ rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
+ MLX5_GET(ads, path, src_addr_index),
+ MLX5_GET(ads, path, hop_limit),
+ MLX5_GET(ads, path, tclass));
+ memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip),
+ MLX5_FLD_SZ_BYTES(ads, rgid_rip));
}
}
@@ -5690,61 +4507,58 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct ib_qp_attr *qp_attr)
{
int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
- struct mlx5_qp_context *context;
- int mlx5_state;
+ void *qpc, *pri_path, *alt_path;
u32 *outb;
- int err = 0;
+ int err;
outb = kzalloc(outlen, GFP_KERNEL);
if (!outb)
return -ENOMEM;
- err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
- outlen);
+ err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen);
if (err)
goto out;
- /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
- context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc);
+ qpc = MLX5_ADDR_OF(query_qp_out, outb, qpc);
- mlx5_state = be32_to_cpu(context->flags) >> 28;
+ qp->state = to_ib_qp_state(MLX5_GET(qpc, qpc, state));
+ if (MLX5_GET(qpc, qpc, state) == MLX5_QP_STATE_SQ_DRAINING)
+ qp_attr->sq_draining = 1;
- qp->state = to_ib_qp_state(mlx5_state);
- qp_attr->path_mtu = context->mtu_msgmax >> 5;
- qp_attr->path_mig_state =
- to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
- qp_attr->qkey = be32_to_cpu(context->qkey);
- qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
- qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
- qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
- qp_attr->qp_access_flags =
- to_ib_qp_access_flags(be32_to_cpu(context->params2));
+ qp_attr->path_mtu = MLX5_GET(qpc, qpc, mtu);
+ qp_attr->path_mig_state = to_ib_mig_state(MLX5_GET(qpc, qpc, pm_state));
+ qp_attr->qkey = MLX5_GET(qpc, qpc, q_key);
+ qp_attr->rq_psn = MLX5_GET(qpc, qpc, next_rcv_psn);
+ qp_attr->sq_psn = MLX5_GET(qpc, qpc, next_send_psn);
+ qp_attr->dest_qp_num = MLX5_GET(qpc, qpc, remote_qpn);
- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
- to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
- to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
- qp_attr->alt_pkey_index =
- be16_to_cpu(context->alt_path.pkey_index);
- qp_attr->alt_port_num =
- rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
- }
+ if (MLX5_GET(qpc, qpc, rre))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
+ if (MLX5_GET(qpc, qpc, rwe))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (MLX5_GET(qpc, qpc, rae))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_ATOMIC;
- qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
- qp_attr->port_num = context->pri_path.port;
+ qp_attr->max_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_sra_max);
+ qp_attr->max_dest_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_rra_max);
+ qp_attr->min_rnr_timer = MLX5_GET(qpc, qpc, min_rnr_nak);
+ qp_attr->retry_cnt = MLX5_GET(qpc, qpc, retry_count);
+ qp_attr->rnr_retry = MLX5_GET(qpc, qpc, rnr_retry);
- /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
- qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
+ pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
- qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
+ to_rdma_ah_attr(dev, &qp_attr->ah_attr, pri_path);
+ to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, alt_path);
+ qp_attr->alt_pkey_index = MLX5_GET(ads, alt_path, pkey_index);
+ qp_attr->alt_port_num = MLX5_GET(ads, alt_path, vhca_port_num);
+ }
- qp_attr->max_dest_rd_atomic =
- 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
- qp_attr->min_rnr_timer =
- (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
- qp_attr->timeout = context->pri_path.ackto_lt >> 3;
- qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
- qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
- qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
+ qp_attr->pkey_index = MLX5_GET(ads, pri_path, pkey_index);
+ qp_attr->port_num = MLX5_GET(ads, pri_path, vhca_port_num);
+ qp_attr->timeout = MLX5_GET(ads, pri_path, ack_timeout);
+ qp_attr->alt_timeout = MLX5_GET(ads, alt_path, ack_timeout);
out:
kfree(outb);
@@ -5778,7 +4592,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
if (!out)
return -ENOMEM;
- err = mlx5_core_dct_query(dev->mdev, dct, out, outlen);
+ err = mlx5_core_dct_query(dev, dct, out, outlen);
if (err)
goto out;
@@ -5835,14 +4649,14 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
memset(qp_attr, 0, sizeof(*qp_attr));
- if (unlikely(qp->qp_sub_type == MLX5_IB_QPT_DCT))
+ if (unlikely(qp->type == MLX5_IB_QPT_DCT))
return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
qp_attr_mask, qp_init_attr);
mutex_lock(&qp->mutex);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
if (err)
goto out;
@@ -5876,18 +4690,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
- qp_init_attr->create_flags = 0;
- if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
- qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
-
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
- qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL;
- if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
- qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
- if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
- qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
- if (qp->flags & MLX5_IB_QP_SQPN_QP1)
- qp_init_attr->create_flags |= MLX5_IB_QP_CREATE_SQPN_QP1;
+ qp_init_attr->create_flags = qp->flags;
qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
@@ -5964,7 +4767,7 @@ static int set_delay_drop(struct mlx5_ib_dev *dev)
if (dev->delay_drop.activate)
goto out;
- err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout);
+ err = mlx5_core_set_delay_drop(dev, dev->delay_drop.timeout);
if (err)
goto out;
@@ -6070,13 +4873,13 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
}
rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
- err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
+ err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp);
if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
err = set_delay_drop(dev);
if (err) {
mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
err);
- mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
} else {
rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
}
@@ -6258,7 +5061,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
return &rwq->ibwq;
err_copy:
- mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
err_user_rq:
destroy_user_rq(dev, pd, rwq, udata);
err:
@@ -6271,7 +5074,7 @@ void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
- mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
destroy_user_rq(dev, wq->pd, rwq, udata);
kfree(rwq);
}
@@ -6449,7 +5252,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
"Receive WQ counters are not supported on current FW\n");
}
- err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
+ err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in);
if (!err)
rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
@@ -6548,7 +5351,7 @@ void mlx5_ib_drain_sq(struct ib_qp *qp)
sdrain.cqe.done = mlx5_ib_drain_qp_done;
init_completion(&sdrain.done);
- ret = _mlx5_ib_post_send(qp, &swr.wr, &bad_swr, true);
+ ret = mlx5_ib_post_send_drain(qp, &swr.wr, &bad_swr);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
return;
@@ -6578,7 +5381,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
rdrain.cqe.done = mlx5_ib_drain_qp_done;
init_completion(&rdrain.done);
- ret = _mlx5_ib_post_recv(qp, &rwr, &bad_rwr, true);
+ ret = mlx5_ib_post_recv_drain(qp, &rwr, &bad_rwr);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return;
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
new file mode 100644
index 000000000000..82ea2b94dfa6
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qp.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_QP_H
+#define _MLX5_IB_QP_H
+
+#include "mlx5_ib.h"
+
+int mlx5_init_qp_table(struct mlx5_ib_dev *dev);
+void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev);
+
+int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *qp,
+ u32 *in, int inlen, u32 *out, int outlen);
+int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *in, int inlen, u32 *out);
+int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
+ void *qpc, struct mlx5_core_qp *qp, u32 *ece);
+int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp);
+int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct);
+int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *out, int outlen);
+int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen);
+
+int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec);
+
+void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *rq);
+int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *sq);
+void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *sq);
+
+int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *rq);
+
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
+ int res_num,
+ enum mlx5_res_type res_type);
+void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
+
+int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn);
+int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
+#endif /* _MLX5_IB_QP_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/infiniband/hw/mlx5/qpc.c
index c3aea4cc2fff..c19d91d6dce8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -1,46 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/gfp.h>
-#include <linux/export.h>
-#include <linux/mlx5/cmd.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/transobj.h>
+#include "mlx5_ib.h"
+#include "qp.h"
-#include "mlx5_core.h"
-#include "lib/eq.h"
-
-static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct);
static struct mlx5_core_rsc_common *
@@ -124,11 +93,9 @@ static int rsc_event_notifier(struct notifier_block *nb,
{
struct mlx5_core_rsc_common *common;
struct mlx5_qp_table *table;
- struct mlx5_core_dev *dev;
struct mlx5_core_dct *dct;
u8 event_type = (u8)type;
struct mlx5_core_qp *qp;
- struct mlx5_priv *priv;
struct mlx5_eqe *eqe;
u32 rsn;
@@ -155,22 +122,12 @@ static int rsc_event_notifier(struct notifier_block *nb,
}
table = container_of(nb, struct mlx5_qp_table, nb);
- priv = container_of(table, struct mlx5_priv, qp_table);
- dev = container_of(priv, struct mlx5_core_dev, priv);
-
- mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn);
-
common = mlx5_get_rsc(table, rsn);
- if (!common) {
- mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn);
+ if (!common)
return NOTIFY_OK;
- }
- if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
- mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
- event_type, rsn);
+ if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type))
goto out;
- }
switch (common->res) {
case MLX5_RES_QP:
@@ -185,7 +142,7 @@ static int rsc_event_notifier(struct notifier_block *nb,
complete(&dct->drained);
break;
default:
- mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
+ break;
}
out:
mlx5_core_put_rsc(common);
@@ -193,11 +150,10 @@ out:
return NOTIFY_OK;
}
-static int create_resource_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- int rsc_type)
+static int create_resource_common(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp, int rsc_type)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
int err;
qp->common.res = rsc_type;
@@ -216,10 +172,10 @@ static int create_resource_common(struct mlx5_core_dev *dev,
return 0;
}
-static void destroy_resource_common(struct mlx5_core_dev *dev,
+static void destroy_resource_common(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *qp)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
unsigned long flags;
spin_lock_irqsave(&table->lock, flags);
@@ -230,24 +186,19 @@ static void destroy_resource_common(struct mlx5_core_dev *dev,
wait_for_completion(&qp->common.free);
}
-static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+static int _mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct, bool need_cleanup)
{
- u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {};
struct mlx5_core_qp *qp = &dct->mqp;
int err;
err = mlx5_core_drain_dct(dev, dct);
if (err) {
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto destroy;
- } else {
- mlx5_core_warn(
- dev, "failed drain DCT 0x%x with error 0x%x\n",
- qp->qpn, err);
- return err;
- }
+
+ return err;
}
wait_for_completion(&dct->drained);
destroy:
@@ -256,15 +207,12 @@ destroy:
MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
MLX5_SET(destroy_dct_in, in, uid, qp->uid);
- err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)&out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev->mdev, destroy_dct, in);
return err;
}
-int mlx5_core_create_dct(struct mlx5_core_dev *dev,
- struct mlx5_core_dct *dct,
- u32 *in, int inlen,
- u32 *out, int outlen)
+int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ u32 *in, int inlen, u32 *out, int outlen)
{
struct mlx5_core_qp *qp = &dct->mqp;
int err;
@@ -272,11 +220,9 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
init_completion(&dct->drained);
MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
- if (err) {
- mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
+ if (err)
return err;
- }
qp->qpn = MLX5_GET(create_dct_out, out, dctn);
qp->uid = MLX5_GET(create_dct_in, in, uid);
@@ -289,108 +235,83 @@ err_cmd:
_mlx5_core_destroy_dct(dev, dct, false);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
-int mlx5_core_create_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- u32 *in, int inlen)
+int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *in, int inlen, u32 *out)
{
- u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
- u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
- u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
+ u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
int err;
MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out,
+ MLX5_ST_SZ_BYTES(create_qp_out));
if (err)
return err;
qp->uid = MLX5_GET(create_qp_in, in, uid);
qp->qpn = MLX5_GET(create_qp_out, out, qpn);
- mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
err = create_resource_common(dev, qp, MLX5_RES_QP);
if (err)
goto err_cmd;
- err = mlx5_debug_qp_add(dev, qp);
- if (err)
- mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
- qp->qpn);
-
- atomic_inc(&dev->num_qps);
+ mlx5_debug_qp_add(dev->mdev, qp);
return 0;
err_cmd:
- memset(din, 0, sizeof(din));
- memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
MLX5_SET(destroy_qp_in, din, uid, qp->uid);
- mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
+ mlx5_cmd_exec_in(dev->mdev, destroy_qp, din);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
-static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct)
{
- u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {};
struct mlx5_core_qp *qp = &dct->mqp;
MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
MLX5_SET(drain_dct_in, in, uid, qp->uid);
- return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)&out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, drain_dct, in);
}
-int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct)
{
return _mlx5_core_destroy_dct(dev, dct, true);
}
-EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
-int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp)
+int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
{
- u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
- int err;
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
- mlx5_debug_qp_remove(dev, qp);
+ mlx5_debug_qp_remove(dev->mdev, qp);
destroy_resource_common(dev, qp);
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
MLX5_SET(destroy_qp_in, in, uid, qp->uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- atomic_dec(&dev->num_qps);
+ mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
-int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
+int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
u32 timeout_usec)
{
- u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {};
MLX5_SET(set_delay_drop_params_in, in, opcode,
MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
timeout_usec / 100);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, set_delay_drop_params, in);
}
-EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
struct mbox_info {
u32 *in;
@@ -420,9 +341,30 @@ static void mbox_free(struct mbox_info *mbox)
kfree(mbox->out);
}
+static int get_ece_from_mbox(void *out, u16 opcode)
+{
+ int ece = 0;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ ece = MLX5_GET(init2rtr_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ ece = MLX5_GET(rtr2rts_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ ece = MLX5_GET(rts2rts_qp_out, out, ece);
+ break;
+ default:
+ break;
+ }
+
+ return ece;
+}
+
static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
u32 opt_param_mask, void *qpc,
- struct mbox_info *mbox, u16 uid)
+ struct mbox_info *mbox, u16 uid, u32 ece)
{
mbox->out = NULL;
mbox->in = NULL;
@@ -470,18 +412,21 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(init2rtr_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_RTR2RTS_QP:
if (MBOX_ALLOC(mbox, rtr2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(rtr2rts_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_RTS2RTS_QP:
if (MBOX_ALLOC(mbox, rts2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(rts2rts_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_SQERR2RTS_QP:
if (MBOX_ALLOC(mbox, sqerr2rts_qp))
@@ -496,120 +441,116 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
opt_param_mask, qpc, uid);
break;
default:
- mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
- opcode, qpn);
return -EINVAL;
}
return 0;
}
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
- u32 opt_param_mask, void *qpc,
- struct mlx5_core_qp *qp)
+int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
+ void *qpc, struct mlx5_core_qp *qp, u32 *ece)
{
struct mbox_info mbox;
int err;
- err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
- opt_param_mask, qpc, &mbox, qp->uid);
+ err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn, opt_param_mask,
+ qpc, &mbox, qp->uid, (ece) ? *ece : 0);
if (err)
return err;
- err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
+ err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out,
+ mbox.outlen);
+
+ if (ece)
+ *ece = get_ece_from_mbox(mbox.out, opcode);
+
mbox_free(&mbox);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
-void mlx5_init_qp_table(struct mlx5_core_dev *dev)
+int mlx5_init_qp_table(struct mlx5_ib_dev *dev)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
- memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
- mlx5_qp_debugfs_init(dev);
+ mlx5_qp_debugfs_init(dev->mdev);
table->nb.notifier_call = rsc_event_notifier;
- mlx5_notifier_register(dev, &table->nb);
+ mlx5_notifier_register(dev->mdev, &table->nb);
+
+ return 0;
}
-void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
+void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
- mlx5_notifier_unregister(dev, &table->nb);
- mlx5_qp_debugfs_cleanup(dev);
+ mlx5_notifier_unregister(dev->mdev, &table->nb);
+ mlx5_qp_debugfs_cleanup(dev->mdev);
}
-int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
+int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
MLX5_SET(query_qp_in, in, qpn, qp->qpn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, outlen);
}
-EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
-int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {};
struct mlx5_core_qp *qp = &dct->mqp;
MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
MLX5_SET(query_dct_in, in, dctn, qp->qpn);
- return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)out, outlen);
+ return mlx5_cmd_exec(dev->mdev, (void *)&in, sizeof(in), (void *)out,
+ outlen);
}
-EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
-int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
+int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn)
{
- u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
int err;
MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_xrcd, in, out);
if (!err)
*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
-int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
+int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
}
-EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
-static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid)
+static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn);
MLX5_SET(destroy_rq_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
}
-int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *rq)
{
int err;
u32 rqn;
- err = mlx5_core_create_rq(dev, in, inlen, &rqn);
+ err = mlx5_core_create_rq(dev->mdev, in, inlen, &rqn);
if (err)
return err;
@@ -626,39 +567,37 @@ err_destroy_rq:
return err;
}
-EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
-void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
+void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq)
{
destroy_resource_common(dev, rq);
destroy_rq_tracked(dev, rq->qpn, rq->uid);
}
-EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
-static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid)
+static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn);
MLX5_SET(destroy_sq_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev->mdev, destroy_sq, in);
}
-int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq)
{
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
int err;
- u32 sqn;
- err = mlx5_core_create_sq(dev, in, inlen, &sqn);
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
if (err)
return err;
+ sq->qpn = MLX5_GET(create_sq_out, out, sqn);
sq->uid = MLX5_GET(create_sq_in, in, uid);
- sq->qpn = sqn;
err = create_resource_common(dev, sq, MLX5_RES_SQ);
if (err)
goto err_destroy_sq;
@@ -670,68 +609,25 @@ err_destroy_sq:
return err;
}
-EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
-void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
+void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *sq)
{
destroy_resource_common(dev, sq);
destroy_sq_tracked(dev, sq->qpn, sq->uid);
}
-EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
-
-int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
-{
- u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
- int err;
-
- MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *counter_id = MLX5_GET(alloc_q_counter_out, out,
- counter_set_id);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
-
-int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
-{
- u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
-
- MLX5_SET(dealloc_q_counter_in, in, opcode,
- MLX5_CMD_OP_DEALLOC_Q_COUNTER);
- MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
-
-int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
- int reset, void *out, int out_size)
-{
- u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
-
- MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
- MLX5_SET(query_q_counter_in, in, clear, reset);
- MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
-struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
int res_num,
enum mlx5_res_type res_type)
{
u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
- struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_qp_table *table = &dev->qp_table;
return mlx5_get_rsc(table, rsn);
}
-EXPORT_SYMBOL_GPL(mlx5_core_res_hold);
void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
{
mlx5_core_put_rsc(res);
}
-EXPORT_SYMBOL_GPL(mlx5_core_res_put);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index b1a8a9175040..6d1ff13d2283 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -310,12 +310,18 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
srq->msrq.event = mlx5_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
- if (udata)
- if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
+ if (udata) {
+ struct mlx5_ib_create_srq_resp resp = {
+ .srqn = srq->msrq.srqn,
+ };
+
+ if (ib_copy_to_udata(udata, &resp, min(udata->outlen,
+ sizeof(resp)))) {
mlx5_ib_dbg(dev, "copy to user failed\n");
err = -EFAULT;
goto err_core;
}
+ }
init_attr->attr.max_wr = srq->msrq.max - 1;
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 8fc3630a9d4c..6f5eadc4d183 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -5,9 +5,9 @@
#include <linux/kernel.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_ib.h"
#include "srq.h"
+#include "qp.h"
static int get_pas_size(struct mlx5_srq_attr *in)
{
@@ -132,38 +132,33 @@ static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
- u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
- u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_srq_in)] = {};
- MLX5_SET(destroy_srq_in, srq_in, opcode,
- MLX5_CMD_OP_DESTROY_SRQ);
- MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
- MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
+ MLX5_SET(destroy_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, in, srqn, srq->srqn);
+ MLX5_SET(destroy_srq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
- sizeof(srq_out));
+ return mlx5_cmd_exec_in(dev->mdev, destroy_srq, in);
}
static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
- u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
- MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
- MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
- MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
- MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
- MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
+ MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
+ MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
+ MLX5_SET(arm_rq_in, in, lwm, lwm);
+ MLX5_SET(arm_rq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
- sizeof(srq_out));
+ return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
}
static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
- u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_srq_in)] = {};
u32 *srq_out;
void *srqc;
int err;
@@ -172,11 +167,9 @@ static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
if (!srq_out)
return -ENOMEM;
- MLX5_SET(query_srq_in, srq_in, opcode,
- MLX5_CMD_OP_QUERY_SRQ);
- MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
- err = mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
- MLX5_ST_SZ_BYTES(query_srq_out));
+ MLX5_SET(query_srq_in, in, opcode, MLX5_CMD_OP_QUERY_SRQ);
+ MLX5_SET(query_srq_in, in, srqn, srq->srqn);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_srq, in, srq_out);
if (err)
goto out;
@@ -234,39 +227,35 @@ out:
static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
- u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {};
- MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
- MLX5_CMD_OP_DESTROY_XRC_SRQ);
- MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
+ MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, srq->srqn);
+ MLX5_SET(destroy_xrc_srq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
+ return mlx5_cmd_exec_in(dev->mdev, destroy_xrc_srq, in);
}
static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
u16 lwm)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
- u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {};
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
+ MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, op_mod,
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
+ return mlx5_cmd_exec_in(dev->mdev, arm_xrc_srq, in);
}
static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {};
u32 *xrcsrq_out;
void *xrc_srqc;
int err;
@@ -274,14 +263,11 @@ static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
if (!xrcsrq_out)
return -ENOMEM;
- memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
- MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
- MLX5_CMD_OP_QUERY_XRC_SRQ);
- MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, in, xrc_srqn, srq->srqn);
- err = mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ err = mlx5_cmd_exec_inout(dev->mdev, query_xrc_srq, in, xrcsrq_out);
if (err)
goto out;
@@ -341,13 +327,12 @@ out:
static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, destroy_rmp, in);
}
static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
@@ -384,7 +369,7 @@ static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
- err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
+ err = mlx5_cmd_exec_inout(dev->mdev, modify_rmp, in, out);
out:
kvfree(in);
@@ -414,7 +399,7 @@ static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
MLX5_SET(query_rmp_in, rmp_in, rmpn, srq->srqn);
- err = mlx5_cmd_exec(dev->mdev, rmp_in, inlen, rmp_out, outlen);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_rmp, rmp_in, rmp_out);
if (err)
goto out;
@@ -477,36 +462,34 @@ static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
- u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {};
MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
- MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
+ MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, destroy_xrq, in);
}
static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq,
u16 lwm)
{
- u32 out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
- MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
- MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
+ MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
- MLX5_SET(arm_rq_in, in, lwm, lwm);
+ MLX5_SET(arm_rq_in, in, lwm, lwm);
MLX5_SET(arm_rq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
}
static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
- u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {};
u32 *xrq_out;
int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
void *xrqc;
@@ -519,7 +502,7 @@ static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
- err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), xrq_out, outlen);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_xrq, in, xrq_out);
if (err)
goto out;
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
new file mode 100644
index 000000000000..2c6df1c43b55
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -0,0 +1,1504 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/gfp.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/driver.h>
+#include "wr.h"
+
+static const u32 mlx5_ib_opcode[] = {
+ [IB_WR_SEND] = MLX5_OPCODE_SEND,
+ [IB_WR_LSO] = MLX5_OPCODE_LSO,
+ [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
+ [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
+ [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
+ [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
+ [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
+ [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
+ [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
+ [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
+ [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
+};
+
+/* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
+ * next nearby edge and get new address translation for current WQE position.
+ * @sq - SQ buffer.
+ * @seg: Current WQE position (16B aligned).
+ * @wqe_sz: Total current WQE size [16B].
+ * @cur_edge: Updated current edge.
+ */
+static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
+ u32 wqe_sz, void **cur_edge)
+{
+ u32 idx;
+
+ if (likely(*seg != *cur_edge))
+ return;
+
+ idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
+ *cur_edge = get_sq_edge(sq, idx);
+
+ *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
+}
+
+/* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
+ * pointers. At the end @seg is aligned to 16B regardless the copied size.
+ * @sq - SQ buffer.
+ * @cur_edge: Updated current edge.
+ * @seg: Current WQE position (16B aligned).
+ * @wqe_sz: Total current WQE size [16B].
+ * @src: Pointer to copy from.
+ * @n: Number of bytes to copy.
+ */
+static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
+ void **seg, u32 *wqe_sz, const void *src,
+ size_t n)
+{
+ while (likely(n)) {
+ size_t leftlen = *cur_edge - *seg;
+ size_t copysz = min_t(size_t, leftlen, n);
+ size_t stride;
+
+ memcpy(*seg, src, copysz);
+
+ n -= copysz;
+ src += copysz;
+ stride = !n ? ALIGN(copysz, 16) : copysz;
+ *seg += stride;
+ *wqe_sz += stride >> 4;
+ handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
+ }
+}
+
+static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq,
+ struct ib_cq *ib_cq)
+{
+ struct mlx5_ib_cq *cq;
+ unsigned int cur;
+
+ cur = wq->head - wq->tail;
+ if (likely(cur + nreq < wq->max_post))
+ return 0;
+
+ cq = to_mcq(ib_cq);
+ spin_lock(&cq->lock);
+ cur = wq->head - wq->tail;
+ spin_unlock(&cq->lock);
+
+ return cur + nreq >= wq->max_post;
+}
+
+static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
+ u64 remote_addr, u32 rkey)
+{
+ rseg->raddr = cpu_to_be64(remote_addr);
+ rseg->rkey = cpu_to_be32(rkey);
+ rseg->reserved = 0;
+}
+
+static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
+ void **seg, int *size, void **cur_edge)
+{
+ struct mlx5_wqe_eth_seg *eseg = *seg;
+
+ memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
+
+ if (wr->send_flags & IB_SEND_IP_CSUM)
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+
+ if (wr->opcode == IB_WR_LSO) {
+ struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
+ size_t left, copysz;
+ void *pdata = ud_wr->header;
+ size_t stride;
+
+ left = ud_wr->hlen;
+ eseg->mss = cpu_to_be16(ud_wr->mss);
+ eseg->inline_hdr.sz = cpu_to_be16(left);
+
+ /* memcpy_send_wqe should get a 16B align address. Hence, we
+ * first copy up to the current edge and then, if needed,
+ * continue to memcpy_send_wqe.
+ */
+ copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
+ left);
+ memcpy(eseg->inline_hdr.start, pdata, copysz);
+ stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
+ sizeof(eseg->inline_hdr.start) + copysz, 16);
+ *size += stride / 16;
+ *seg += stride;
+
+ if (copysz < left) {
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ left -= copysz;
+ pdata += copysz;
+ memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata,
+ left);
+ }
+
+ return;
+ }
+
+ *seg += sizeof(struct mlx5_wqe_eth_seg);
+ *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
+}
+
+static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
+ const struct ib_send_wr *wr)
+{
+ memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
+ dseg->av.dqp_dct =
+ cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
+ dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
+}
+
+static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
+{
+ dseg->byte_count = cpu_to_be32(sg->length);
+ dseg->lkey = cpu_to_be32(sg->lkey);
+ dseg->addr = cpu_to_be64(sg->addr);
+}
+
+static u64 get_xlt_octo(u64 bytes)
+{
+ return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
+ MLX5_IB_UMR_OCTOWORD;
+}
+
+static __be64 frwr_mkey_mask(bool atomic)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_EN_RINVAL |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_SMALL_FENCE |
+ MLX5_MKEY_MASK_FREE;
+
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 sig_mkey_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_EN_SIGERR |
+ MLX5_MKEY_MASK_EN_RINVAL |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_SMALL_FENCE |
+ MLX5_MKEY_MASK_FREE |
+ MLX5_MKEY_MASK_BSF_EN;
+
+ return cpu_to_be64(result);
+}
+
+static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
+ struct mlx5_ib_mr *mr, u8 flags, bool atomic)
+{
+ int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
+
+ memset(umr, 0, sizeof(*umr));
+
+ umr->flags = flags;
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
+ umr->mkey_mask = frwr_mkey_mask(atomic);
+}
+
+static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
+{
+ memset(umr, 0, sizeof(*umr));
+ umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ umr->flags = MLX5_UMR_INLINE;
+}
+
+static __be64 get_umr_enable_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_disable_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_translation_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_access_mask(int atomic)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW;
+
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_pd_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_PD;
+
+ return cpu_to_be64(result);
+}
+
+static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
+{
+ if ((mask & MLX5_MKEY_MASK_PAGE_SIZE &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
+ (mask & MLX5_MKEY_MASK_A &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
+ return -EPERM;
+ return 0;
+}
+
+static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
+ struct mlx5_wqe_umr_ctrl_seg *umr,
+ const struct ib_send_wr *wr, int atomic)
+{
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
+
+ memset(umr, 0, sizeof(*umr));
+
+ if (!umrwr->ignore_free_state) {
+ if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+ /* fail if free */
+ umr->flags = MLX5_UMR_CHECK_FREE;
+ else
+ /* fail if not free */
+ umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+ }
+
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
+ u64 offset = get_xlt_octo(umrwr->offset);
+
+ umr->xlt_offset = cpu_to_be16(offset & 0xffff);
+ umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
+ umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
+ }
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
+ umr->mkey_mask |= get_umr_update_translation_mask();
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
+ umr->mkey_mask |= get_umr_update_access_mask(atomic);
+ umr->mkey_mask |= get_umr_update_pd_mask();
+ }
+ if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
+ umr->mkey_mask |= get_umr_enable_mr_mask();
+ if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
+ umr->mkey_mask |= get_umr_disable_mr_mask();
+
+ if (!wr->num_sge)
+ umr->flags |= MLX5_UMR_INLINE;
+
+ return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
+}
+
+static u8 get_umr_flags(int acc)
+{
+ return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
+ (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
+ (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
+ MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
+}
+
+static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
+ struct mlx5_ib_mr *mr,
+ u32 key, int access)
+{
+ int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
+
+ memset(seg, 0, sizeof(*seg));
+
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ seg->log2_page_size = ilog2(mr->ibmr.page_size);
+ else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
+ /* KLMs take twice the size of MTTs */
+ ndescs *= 2;
+
+ seg->flags = get_umr_flags(access) | mr->access_mode;
+ seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
+ seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
+ seg->start_addr = cpu_to_be64(mr->ibmr.iova);
+ seg->len = cpu_to_be64(mr->ibmr.length);
+ seg->xlt_oct_size = cpu_to_be32(ndescs);
+}
+
+static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
+{
+ memset(seg, 0, sizeof(*seg));
+ seg->status = MLX5_MKEY_STATUS_FREE;
+}
+
+static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
+ const struct ib_send_wr *wr)
+{
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
+
+ memset(seg, 0, sizeof(*seg));
+ if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
+ seg->status = MLX5_MKEY_STATUS_FREE;
+
+ seg->flags = convert_access(umrwr->access_flags);
+ if (umrwr->pd)
+ seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
+ !umrwr->length)
+ seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
+
+ seg->start_addr = cpu_to_be64(umrwr->virt_addr);
+ seg->len = cpu_to_be64(umrwr->length);
+ seg->log2_page_size = umrwr->page_shift;
+ seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
+ mlx5_mkey_variant(umrwr->mkey));
+}
+
+static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
+ struct mlx5_ib_mr *mr,
+ struct mlx5_ib_pd *pd)
+{
+ int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
+
+ dseg->addr = cpu_to_be64(mr->desc_map);
+ dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
+ dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
+}
+
+static __be32 send_ieth(const struct ib_send_wr *wr)
+{
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return wr->ex.imm_data;
+
+ case IB_WR_SEND_WITH_INV:
+ return cpu_to_be32(wr->ex.invalidate_rkey);
+
+ default:
+ return 0;
+ }
+}
+
+static u8 calc_sig(void *wqe, int size)
+{
+ u8 *p = wqe;
+ u8 res = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ res ^= p[i];
+
+ return ~res;
+}
+
+static u8 wq_sig(void *wqe)
+{
+ return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
+}
+
+static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ void **wqe, int *wqe_sz, void **cur_edge)
+{
+ struct mlx5_wqe_inline_seg *seg;
+ size_t offset;
+ int inl = 0;
+ int i;
+
+ seg = *wqe;
+ *wqe += sizeof(*seg);
+ offset = sizeof(*seg);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ size_t len = wr->sg_list[i].length;
+ void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
+
+ inl += len;
+
+ if (unlikely(inl > qp->max_inline_data))
+ return -ENOMEM;
+
+ while (likely(len)) {
+ size_t leftlen;
+ size_t copysz;
+
+ handle_post_send_edge(&qp->sq, wqe,
+ *wqe_sz + (offset >> 4),
+ cur_edge);
+
+ leftlen = *cur_edge - *wqe;
+ copysz = min_t(size_t, leftlen, len);
+
+ memcpy(*wqe, addr, copysz);
+ len -= copysz;
+ addr += copysz;
+ *wqe += copysz;
+ offset += copysz;
+ }
+ }
+
+ seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
+
+ *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
+
+ return 0;
+}
+
+static u16 prot_field_size(enum ib_signature_type type)
+{
+ switch (type) {
+ case IB_SIG_TYPE_T10_DIF:
+ return MLX5_DIF_SIZE;
+ default:
+ return 0;
+ }
+}
+
+static u8 bs_selector(int block_size)
+{
+ switch (block_size) {
+ case 512: return 0x1;
+ case 520: return 0x2;
+ case 4096: return 0x3;
+ case 4160: return 0x4;
+ case 1073741824: return 0x5;
+ default: return 0;
+ }
+}
+
+static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
+ struct mlx5_bsf_inl *inl)
+{
+ /* Valid inline section and allow BSF refresh */
+ inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
+ MLX5_BSF_REFRESH_DIF);
+ inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
+ inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
+ /* repeating block */
+ inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
+ inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
+ MLX5_DIF_CRC : MLX5_DIF_IPCS;
+
+ if (domain->sig.dif.ref_remap)
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
+
+ if (domain->sig.dif.app_escape) {
+ if (domain->sig.dif.ref_escape)
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
+ else
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
+ }
+
+ inl->dif_app_bitmask_check =
+ cpu_to_be16(domain->sig.dif.apptag_check_mask);
+}
+
+static int mlx5_set_bsf(struct ib_mr *sig_mr,
+ struct ib_sig_attrs *sig_attrs,
+ struct mlx5_bsf *bsf, u32 data_size)
+{
+ struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
+ struct mlx5_bsf_basic *basic = &bsf->basic;
+ struct ib_sig_domain *mem = &sig_attrs->mem;
+ struct ib_sig_domain *wire = &sig_attrs->wire;
+
+ memset(bsf, 0, sizeof(*bsf));
+
+ /* Basic + Extended + Inline */
+ basic->bsf_size_sbs = 1 << 7;
+ /* Input domain check byte mask */
+ basic->check_byte_mask = sig_attrs->check_mask;
+ basic->raw_data_size = cpu_to_be32(data_size);
+
+ /* Memory domain */
+ switch (sig_attrs->mem.sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
+ basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
+ mlx5_fill_inl_bsf(mem, &bsf->m_inl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Wire domain */
+ switch (sig_attrs->wire.sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
+ mem->sig_type == wire->sig_type) {
+ /* Same block structure */
+ basic->bsf_size_sbs |= 1 << 4;
+ if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
+ basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
+ if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
+ basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
+ if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
+ basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
+ } else
+ basic->wire.bs_selector =
+ bs_selector(wire->sig.dif.pi_interval);
+
+ basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
+ mlx5_fill_inl_bsf(wire, &bsf->w_inl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int set_sig_data_segment(const struct ib_send_wr *send_wr,
+ struct ib_mr *sig_mr,
+ struct ib_sig_attrs *sig_attrs,
+ struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ struct mlx5_bsf *bsf;
+ u32 data_len;
+ u32 data_key;
+ u64 data_va;
+ u32 prot_len = 0;
+ u32 prot_key = 0;
+ u64 prot_va = 0;
+ bool prot = false;
+ int ret;
+ int wqe_size;
+ struct mlx5_ib_mr *mr = to_mmr(sig_mr);
+ struct mlx5_ib_mr *pi_mr = mr->pi_mr;
+
+ data_len = pi_mr->data_length;
+ data_key = pi_mr->ibmr.lkey;
+ data_va = pi_mr->data_iova;
+ if (pi_mr->meta_ndescs) {
+ prot_len = pi_mr->meta_length;
+ prot_key = pi_mr->ibmr.lkey;
+ prot_va = pi_mr->pi_iova;
+ prot = true;
+ }
+
+ if (!prot || (data_key == prot_key && data_va == prot_va &&
+ data_len == prot_len)) {
+ /**
+ * Source domain doesn't contain signature information
+ * or data and protection are interleaved in memory.
+ * So need construct:
+ * ------------------
+ * | data_klm |
+ * ------------------
+ * | BSF |
+ * ------------------
+ **/
+ struct mlx5_klm *data_klm = *seg;
+
+ data_klm->bcount = cpu_to_be32(data_len);
+ data_klm->key = cpu_to_be32(data_key);
+ data_klm->va = cpu_to_be64(data_va);
+ wqe_size = ALIGN(sizeof(*data_klm), 64);
+ } else {
+ /**
+ * Source domain contains signature information
+ * So need construct a strided block format:
+ * ---------------------------
+ * | stride_block_ctrl |
+ * ---------------------------
+ * | data_klm |
+ * ---------------------------
+ * | prot_klm |
+ * ---------------------------
+ * | BSF |
+ * ---------------------------
+ **/
+ struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
+ struct mlx5_stride_block_entry *data_sentry;
+ struct mlx5_stride_block_entry *prot_sentry;
+ u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
+ int prot_size;
+
+ sblock_ctrl = *seg;
+ data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
+ prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
+
+ prot_size = prot_field_size(sig_attrs->mem.sig_type);
+ if (!prot_size) {
+ pr_err("Bad block size given: %u\n", block_size);
+ return -EINVAL;
+ }
+ sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
+ prot_size);
+ sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
+ sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
+ sblock_ctrl->num_entries = cpu_to_be16(2);
+
+ data_sentry->bcount = cpu_to_be16(block_size);
+ data_sentry->key = cpu_to_be32(data_key);
+ data_sentry->va = cpu_to_be64(data_va);
+ data_sentry->stride = cpu_to_be16(block_size);
+
+ prot_sentry->bcount = cpu_to_be16(prot_size);
+ prot_sentry->key = cpu_to_be32(prot_key);
+ prot_sentry->va = cpu_to_be64(prot_va);
+ prot_sentry->stride = cpu_to_be16(prot_size);
+
+ wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
+ sizeof(*prot_sentry), 64);
+ }
+
+ *seg += wqe_size;
+ *size += wqe_size / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ bsf = *seg;
+ ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
+ if (ret)
+ return -EINVAL;
+
+ *seg += sizeof(*bsf);
+ *size += sizeof(*bsf) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ return 0;
+}
+
+static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
+ struct ib_mr *sig_mr, int access_flags,
+ u32 size, u32 length, u32 pdn)
+{
+ u32 sig_key = sig_mr->rkey;
+ u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
+
+ memset(seg, 0, sizeof(*seg));
+
+ seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
+ seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
+ seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
+ MLX5_MKEY_BSF_EN | pdn);
+ seg->len = cpu_to_be64(length);
+ seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
+ seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
+}
+
+static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
+ u32 size)
+{
+ memset(umr, 0, sizeof(*umr));
+
+ umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
+ umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
+ umr->mkey_mask = sig_mkey_mask();
+}
+
+static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
+ struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ const struct ib_reg_wr *wr = reg_wr(send_wr);
+ struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
+ struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
+ struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
+ u32 pdn = to_mpd(qp->ibqp.pd)->pdn;
+ u32 xlt_size;
+ int region_len, ret;
+
+ if (unlikely(send_wr->num_sge != 0) ||
+ unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
+ unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
+ unlikely(!sig_mr->sig->sig_status_checked))
+ return -EINVAL;
+
+ /* length of the protected region, data + protection */
+ region_len = pi_mr->ibmr.length;
+
+ /**
+ * KLM octoword size - if protection was provided
+ * then we use strided block format (3 octowords),
+ * else we use single KLM (1 octoword)
+ **/
+ if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
+ xlt_size = 0x30;
+ else
+ xlt_size = sizeof(struct mlx5_klm);
+
+ set_sig_umr_segment(*seg, xlt_size);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
+ pdn);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
+ cur_edge);
+ if (ret)
+ return ret;
+
+ sig_mr->sig->sig_status_checked = false;
+ return 0;
+}
+
+static int set_psv_wr(struct ib_sig_domain *domain,
+ u32 psv_idx, void **seg, int *size)
+{
+ struct mlx5_seg_set_psv *psv_seg = *seg;
+
+ memset(psv_seg, 0, sizeof(*psv_seg));
+ psv_seg->psv_num = cpu_to_be32(psv_idx);
+ switch (domain->sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
+ domain->sig.dif.app_tag);
+ psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
+ break;
+ default:
+ pr_err("Bad signature type (%d) is given.\n",
+ domain->sig_type);
+ return -EINVAL;
+ }
+
+ *seg += sizeof(*psv_seg);
+ *size += sizeof(*psv_seg) / 16;
+
+ return 0;
+}
+
+static int set_reg_wr(struct mlx5_ib_qp *qp,
+ const struct ib_reg_wr *wr,
+ void **seg, int *size, void **cur_edge,
+ bool check_not_free)
+{
+ struct mlx5_ib_mr *mr = to_mmr(wr->mr);
+ struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
+ int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
+ bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
+ bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
+ u8 flags = 0;
+
+ if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
+ mlx5_ib_warn(to_mdev(qp->ibqp.device),
+ "Fast update of %s for MR is disabled\n",
+ (MLX5_CAP_GEN(dev->mdev,
+ umr_modify_entity_size_disabled)) ?
+ "entity size" :
+ "atomic access");
+ return -EINVAL;
+ }
+
+ if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
+ mlx5_ib_warn(to_mdev(qp->ibqp.device),
+ "Invalid IB_SEND_INLINE send flag\n");
+ return -EINVAL;
+ }
+
+ if (check_not_free)
+ flags |= MLX5_UMR_CHECK_NOT_FREE;
+ if (umr_inline)
+ flags |= MLX5_UMR_INLINE;
+
+ set_reg_umr_seg(*seg, mr, flags, atomic);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ if (umr_inline) {
+ memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
+ mr_list_size);
+ *size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
+ } else {
+ set_reg_data_seg(*seg, mr, pd);
+ *seg += sizeof(struct mlx5_wqe_data_seg);
+ *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
+ }
+ return 0;
+}
+
+static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ set_linv_umr_seg(*seg);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ set_linv_mkey_seg(*seg);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+}
+
+static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
+{
+ __be32 *p = NULL;
+ int i, j;
+
+ pr_debug("dump WQE index %u:\n", idx);
+ for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
+ if ((i & 0xf) == 0) {
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
+ pr_debug("WQBB at %p:\n", (void *)p);
+ j = 0;
+ idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
+ }
+ pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
+ be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
+ be32_to_cpu(p[j + 3]));
+ }
+}
+
+static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned int *idx,
+ int *size, void **cur_edge, int nreq,
+ bool send_signaled, bool solicited)
+{
+ if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
+ return -ENOMEM;
+
+ *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+ *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
+ *ctrl = *seg;
+ *(uint32_t *)(*seg + 8) = 0;
+ (*ctrl)->imm = send_ieth(wr);
+ (*ctrl)->fm_ce_se = qp->sq_signal_bits |
+ (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+ (solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
+
+ *seg += sizeof(**ctrl);
+ *size = sizeof(**ctrl) / 16;
+ *cur_edge = qp->sq.cur_edge;
+
+ return 0;
+}
+
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned int *idx, int *size,
+ void **cur_edge, int nreq)
+{
+ return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
+ wr->send_flags & IB_SEND_SIGNALED,
+ wr->send_flags & IB_SEND_SOLICITED);
+}
+
+static void finish_wqe(struct mlx5_ib_qp *qp,
+ struct mlx5_wqe_ctrl_seg *ctrl,
+ void *seg, u8 size, void *cur_edge,
+ unsigned int idx, u64 wr_id, int nreq, u8 fence,
+ u32 mlx5_opcode)
+{
+ u8 opmod = 0;
+
+ ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
+ mlx5_opcode | ((u32)opmod << 24));
+ ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
+ ctrl->fm_ce_se |= fence;
+ if (unlikely(qp->flags_en & MLX5_QP_FLAG_SIGNATURE))
+ ctrl->signature = wq_sig(ctrl);
+
+ qp->sq.wrid[idx] = wr_id;
+ qp->sq.w_list[idx].opcode = mlx5_opcode;
+ qp->sq.wqe_head[idx] = qp->sq.head + nreq;
+ qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
+ qp->sq.w_list[idx].next = qp->sq.cur_post;
+
+ /* We save the edge which was possibly updated during the WQE
+ * construction, into SQ's cache.
+ */
+ seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
+ qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
+ get_sq_edge(&qp->sq, qp->sq.cur_post &
+ (qp->sq.wqe_cnt - 1)) :
+ cur_edge;
+}
+
+static void handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size)
+{
+ set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey);
+ *seg += sizeof(struct mlx5_wqe_raddr_seg);
+ *size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
+}
+
+static void handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge, unsigned int idx)
+{
+ qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
+ (*ctrl)->imm = cpu_to_be32(wr->ex.invalidate_rkey);
+ set_linv_wr(qp, seg, size, cur_edge);
+}
+
+static int handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int idx)
+{
+ qp->sq.wr_data[idx] = IB_WR_REG_MR;
+ (*ctrl)->imm = cpu_to_be32(reg_wr(wr)->key);
+ return set_reg_wr(qp, reg_wr(wr), seg, size, cur_edge, true);
+}
+
+static int handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int *idx, int nreq,
+ struct ib_sig_domain *domain, u32 psv_index,
+ u8 next_fence)
+{
+ int err;
+
+ /*
+ * SET_PSV WQEs are not signaled and solicited on error.
+ */
+ err = __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
+ false, true);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ err = set_psv_wr(domain, psv_index, seg, size);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ goto out;
+ }
+ finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq,
+ next_fence, MLX5_OPCODE_SET_PSV);
+
+out:
+ return err;
+}
+
+static int handle_reg_mr_integrity(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge,
+ unsigned int *idx, int nreq, u8 fence,
+ u8 next_fence)
+{
+ struct mlx5_ib_mr *mr;
+ struct mlx5_ib_mr *pi_mr;
+ struct mlx5_ib_mr pa_pi_mr;
+ struct ib_sig_attrs *sig_attrs;
+ struct ib_reg_wr reg_pi_wr;
+ int err;
+
+ qp->sq.wr_data[*idx] = IB_WR_REG_MR_INTEGRITY;
+
+ mr = to_mmr(reg_wr(wr)->mr);
+ pi_mr = mr->pi_mr;
+
+ if (pi_mr) {
+ memset(&reg_pi_wr, 0,
+ sizeof(struct ib_reg_wr));
+
+ reg_pi_wr.mr = &pi_mr->ibmr;
+ reg_pi_wr.access = reg_wr(wr)->access;
+ reg_pi_wr.key = pi_mr->ibmr.rkey;
+
+ (*ctrl)->imm = cpu_to_be32(reg_pi_wr.key);
+ /* UMR for data + prot registration */
+ err = set_reg_wr(qp, &reg_pi_wr, seg, size, cur_edge, false);
+ if (unlikely(err))
+ goto out;
+
+ finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
+ nreq, fence, MLX5_OPCODE_UMR);
+
+ err = begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ } else {
+ memset(&pa_pi_mr, 0, sizeof(struct mlx5_ib_mr));
+ /* No UMR, use local_dma_lkey */
+ pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey;
+ pa_pi_mr.ndescs = mr->ndescs;
+ pa_pi_mr.data_length = mr->data_length;
+ pa_pi_mr.data_iova = mr->data_iova;
+ if (mr->meta_ndescs) {
+ pa_pi_mr.meta_ndescs = mr->meta_ndescs;
+ pa_pi_mr.meta_length = mr->meta_length;
+ pa_pi_mr.pi_iova = mr->pi_iova;
+ }
+
+ pa_pi_mr.ibmr.length = mr->ibmr.length;
+ mr->pi_mr = &pa_pi_mr;
+ }
+ (*ctrl)->imm = cpu_to_be32(mr->ibmr.rkey);
+ /* UMR for sig MR */
+ err = set_pi_umr_wr(wr, qp, seg, size, cur_edge);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ goto out;
+ }
+ finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq,
+ fence, MLX5_OPCODE_UMR);
+
+ sig_attrs = mr->ibmr.sig_attrs;
+ err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
+ &sig_attrs->mem, mr->sig->psv_memory.psv_idx,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+
+ err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
+ &sig_attrs->wire, mr->sig->psv_wire.psv_idx,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+
+ qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+
+out:
+ return err;
+}
+
+static int handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int *idx, int nreq, u8 fence,
+ u8 next_fence, int *num_sge)
+{
+ int err = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ handle_rdma_op(wr, seg, size);
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
+ err = -EOPNOTSUPP;
+ goto out;
+
+ case IB_WR_LOCAL_INV:
+ handle_local_inv(qp, wr, ctrl, seg, size, cur_edge, *idx);
+ *num_sge = 0;
+ break;
+
+ case IB_WR_REG_MR:
+ err = handle_reg_mr(qp, wr, ctrl, seg, size, cur_edge, *idx);
+ if (unlikely(err))
+ goto out;
+ *num_sge = 0;
+ break;
+
+ case IB_WR_REG_MR_INTEGRITY:
+ err = handle_reg_mr_integrity(dev, qp, wr, ctrl, seg, size,
+ cur_edge, idx, nreq, fence,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+ *num_sge = 0;
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ return err;
+}
+
+static void handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size)
+{
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ handle_rdma_op(wr, seg, size);
+ break;
+ default:
+ break;
+ }
+}
+
+static void handle_qpt_hw_gsi(struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr, void **seg,
+ int *size, void **cur_edge)
+{
+ set_datagram_seg(*seg, wr);
+ *seg += sizeof(struct mlx5_wqe_datagram_seg);
+ *size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+}
+
+static void handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ void **seg, int *size, void **cur_edge)
+{
+ set_datagram_seg(*seg, wr);
+ *seg += sizeof(struct mlx5_wqe_datagram_seg);
+ *size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ /* handle qp that supports ud offload */
+ if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
+ struct mlx5_wqe_eth_pad *pad;
+
+ pad = *seg;
+ memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
+ *seg += sizeof(struct mlx5_wqe_eth_pad);
+ *size += sizeof(struct mlx5_wqe_eth_pad) / 16;
+ set_eth_seg(wr, qp, seg, size, cur_edge);
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ }
+}
+
+static int handle_qpt_reg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge, unsigned int idx)
+{
+ int err = 0;
+
+ if (unlikely(wr->opcode != MLX5_IB_WR_UMR)) {
+ err = -EINVAL;
+ mlx5_ib_warn(dev, "bad opcode %d\n", wr->opcode);
+ goto out;
+ }
+
+ qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
+ (*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey);
+ err = set_reg_umr_segment(dev, *seg, wr,
+ !!(MLX5_CAP_GEN(dev->mdev, atomic)));
+ if (unlikely(err))
+ goto out;
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ set_reg_mkey_segment(*seg, wr);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+out:
+ return err;
+}
+
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_qp *qp;
+ struct mlx5_wqe_xrc_seg *xrc;
+ struct mlx5_bf *bf;
+ void *cur_edge;
+ int uninitialized_var(size);
+ unsigned long flags;
+ unsigned int idx;
+ int err = 0;
+ int num_sge;
+ void *seg;
+ int nreq;
+ int i;
+ u8 next_fence = 0;
+ u8 fence;
+
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
+
+ qp = to_mqp(ibqp);
+ bf = &qp->bf;
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
+ mlx5_ib_warn(dev, "\n");
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ num_sge = wr->num_sge;
+ if (unlikely(num_sge > qp->sq.max_gs)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
+ nreq);
+ if (err) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (wr->opcode == IB_WR_REG_MR ||
+ wr->opcode == IB_WR_REG_MR_INTEGRITY) {
+ fence = dev->umr_fence;
+ next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ } else {
+ if (wr->send_flags & IB_SEND_FENCE) {
+ if (qp->next_fence)
+ fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+ else
+ fence = MLX5_FENCE_MODE_FENCE;
+ } else {
+ fence = qp->next_fence;
+ }
+ }
+
+ switch (ibqp->qp_type) {
+ case IB_QPT_XRC_INI:
+ xrc = seg;
+ seg += sizeof(*xrc);
+ size += sizeof(*xrc) / 16;
+ fallthrough;
+ case IB_QPT_RC:
+ err = handle_qpt_rc(dev, qp, wr, &ctrl, &seg, &size,
+ &cur_edge, &idx, nreq, fence,
+ next_fence, &num_sge);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ goto out;
+ } else if (wr->opcode == IB_WR_REG_MR_INTEGRITY) {
+ goto skip_psv;
+ }
+ break;
+
+ case IB_QPT_UC:
+ handle_qpt_uc(wr, &seg, &size);
+ break;
+ case IB_QPT_SMI:
+ if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
+ mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
+ err = -EPERM;
+ *bad_wr = wr;
+ goto out;
+ }
+ fallthrough;
+ case MLX5_IB_QPT_HW_GSI:
+ handle_qpt_hw_gsi(qp, wr, &seg, &size, &cur_edge);
+ break;
+ case IB_QPT_UD:
+ handle_qpt_ud(qp, wr, &seg, &size, &cur_edge);
+ break;
+ case MLX5_IB_QPT_REG_UMR:
+ err = handle_qpt_reg_umr(dev, qp, wr, &ctrl, &seg,
+ &size, &cur_edge, idx);
+ if (unlikely(err))
+ goto out;
+ break;
+
+ default:
+ break;
+ }
+
+ if (wr->send_flags & IB_SEND_INLINE && num_sge) {
+ err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ *bad_wr = wr;
+ goto out;
+ }
+ } else {
+ for (i = 0; i < num_sge; i++) {
+ handle_post_send_edge(&qp->sq, &seg, size,
+ &cur_edge);
+ if (unlikely(!wr->sg_list[i].length))
+ continue;
+
+ set_data_ptr_seg(
+ (struct mlx5_wqe_data_seg *)seg,
+ wr->sg_list + i);
+ size += sizeof(struct mlx5_wqe_data_seg) / 16;
+ seg += sizeof(struct mlx5_wqe_data_seg);
+ }
+ }
+
+ qp->next_fence = next_fence;
+ finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq,
+ fence, mlx5_ib_opcode[wr->opcode]);
+skip_psv:
+ if (0)
+ dump_wqe(qp, idx, size);
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->sq.head += nreq;
+
+ /* Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ wmb();
+
+ qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell.
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
+ /* Make sure doorbells don't leak out of SQ spinlock
+ * and reach the HCA out of order.
+ */
+ bf->offset ^= bf->buf_size;
+ }
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return err;
+}
+
+static void set_sig_seg(struct mlx5_rwqe_sig *sig, int max_gs)
+{
+ sig->signature = calc_sig(sig, (max_gs + 1) << 2);
+}
+
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_data_seg *scat;
+ struct mlx5_rwqe_sig *sig;
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ unsigned long flags;
+ int err = 0;
+ int nreq;
+ int ind;
+ int i;
+
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->rq.lock, flags);
+
+ ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->rq.max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
+ scat++;
+
+ for (i = 0; i < wr->num_sge; i++)
+ set_data_ptr_seg(scat + i, wr->sg_list + i);
+
+ if (i < qp->rq.max_gs) {
+ scat[i].byte_count = 0;
+ scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ scat[i].addr = 0;
+ }
+
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
+ sig = (struct mlx5_rwqe_sig *)scat;
+ set_sig_seg(sig, qp->rq.max_gs);
+ }
+
+ qp->rq.wrid[ind] = wr->wr_id;
+
+ ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->rq.head += nreq;
+
+ /* Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
+ }
+
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/wr.h b/drivers/infiniband/hw/mlx5/wr.h
new file mode 100644
index 000000000000..4f0057516402
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/wr.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_WR_H
+#define _MLX5_IB_WR_H
+
+#include "mlx5_ib.h"
+
+enum {
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
+};
+
+struct mlx5_wqe_eth_pad {
+ u8 rsvd0[16];
+};
+
+
+/* get_sq_edge - Get the next nearby edge.
+ *
+ * An 'edge' is defined as the first following address after the end
+ * of the fragment or the SQ. Accordingly, during the WQE construction
+ * which repetitively increases the pointer to write the next data, it
+ * simply should check if it gets to an edge.
+ *
+ * @sq - SQ buffer.
+ * @idx - Stride index in the SQ buffer.
+ *
+ * Return:
+ * The new edge.
+ */
+static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
+{
+ void *fragment_end;
+
+ fragment_end = mlx5_frag_buf_get_wqe
+ (&sq->fbc,
+ mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
+
+ return fragment_end + MLX5_SEND_WQE_BB;
+}
+
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain);
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain);
+
+static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
+}
+
+static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
+}
+
+static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
+}
+
+static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
+}
+#endif /* _MLX5_IB_WR_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 599794c5a78f..7550e9d03dec 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -478,16 +478,6 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_mr *mr);
void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr);
-int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
- u32 access, struct mthca_fmr *fmr);
-int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
-int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
-int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr);
-
int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt);
void mthca_unmap_eq_icm(struct mthca_dev *dev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 4250b2c18c64..ce0e0867e488 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -541,7 +541,7 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
return err;
}
-/* Free mr or fmr */
+/* Free mr */
static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
{
mthca_table_put(dev, dev->mr_table.mpt_table,
@@ -564,266 +564,6 @@ void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
mthca_free_mtt(dev, mr->mtt);
}
-int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
- u32 access, struct mthca_fmr *mr)
-{
- struct mthca_mpt_entry *mpt_entry;
- struct mthca_mailbox *mailbox;
- u64 mtt_seg;
- u32 key, idx;
- int list_len = mr->attr.max_pages;
- int err = -ENOMEM;
- int i;
-
- if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
- return -EINVAL;
-
- /* For Arbel, all MTTs must fit in the same page. */
- if (mthca_is_memfree(dev) &&
- mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
- return -EINVAL;
-
- mr->maps = 0;
-
- key = mthca_alloc(&dev->mr_table.mpt_alloc);
- if (key == -1)
- return -ENOMEM;
- key = adjust_key(dev, key);
-
- idx = key & (dev->limits.num_mpts - 1);
- mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
-
- if (mthca_is_memfree(dev)) {
- err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
- if (err)
- goto err_out_mpt_free;
-
- mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key, NULL);
- BUG_ON(!mr->mem.arbel.mpt);
- } else
- mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
- sizeof *(mr->mem.tavor.mpt) * idx;
-
- mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
- if (IS_ERR(mr->mtt)) {
- err = PTR_ERR(mr->mtt);
- goto err_out_table;
- }
-
- mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
-
- if (mthca_is_memfree(dev)) {
- mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
- mr->mtt->first_seg,
- &mr->mem.arbel.dma_handle);
- BUG_ON(!mr->mem.arbel.mtts);
- } else
- mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
-
- mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
- if (IS_ERR(mailbox)) {
- err = PTR_ERR(mailbox);
- goto err_out_free_mtt;
- }
-
- mpt_entry = mailbox->buf;
-
- mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
- MTHCA_MPT_FLAG_MIO |
- MTHCA_MPT_FLAG_REGION |
- access);
-
- mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12);
- mpt_entry->key = cpu_to_be32(key);
- mpt_entry->pd = cpu_to_be32(pd);
- memset(&mpt_entry->start, 0,
- sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start));
- mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg);
-
- if (0) {
- mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
- for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
- if (i % 4 == 0)
- printk("[%02x] ", i * 4);
- printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
- if ((i + 1) % 4 == 0)
- printk("\n");
- }
- }
-
- err = mthca_SW2HW_MPT(dev, mailbox,
- key & (dev->limits.num_mpts - 1));
- if (err) {
- mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
- goto err_out_mailbox_free;
- }
-
- mthca_free_mailbox(dev, mailbox);
- return 0;
-
-err_out_mailbox_free:
- mthca_free_mailbox(dev, mailbox);
-
-err_out_free_mtt:
- mthca_free_mtt(dev, mr->mtt);
-
-err_out_table:
- mthca_table_put(dev, dev->mr_table.mpt_table, key);
-
-err_out_mpt_free:
- mthca_free(&dev->mr_table.mpt_alloc, key);
- return err;
-}
-
-int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
-{
- if (fmr->maps)
- return -EBUSY;
-
- mthca_free_region(dev, fmr->ibmr.lkey);
- mthca_free_mtt(dev, fmr->mtt);
-
- return 0;
-}
-
-static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
- int list_len, u64 iova)
-{
- int i, page_mask;
-
- if (list_len > fmr->attr.max_pages)
- return -EINVAL;
-
- page_mask = (1 << fmr->attr.page_shift) - 1;
-
- /* We are getting page lists, so va must be page aligned. */
- if (iova & page_mask)
- return -EINVAL;
-
- /* Trust the user not to pass misaligned data in page_list */
- if (0)
- for (i = 0; i < list_len; ++i) {
- if (page_list[i] & ~page_mask)
- return -EINVAL;
- }
-
- if (fmr->maps >= fmr->attr.max_maps)
- return -EINVAL;
-
- return 0;
-}
-
-
-int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct mthca_fmr *fmr = to_mfmr(ibfmr);
- struct mthca_dev *dev = to_mdev(ibfmr->device);
- struct mthca_mpt_entry mpt_entry;
- u32 key;
- int i, err;
-
- err = mthca_check_fmr(fmr, page_list, list_len, iova);
- if (err)
- return err;
-
- ++fmr->maps;
-
- key = tavor_key_to_hw_index(fmr->ibmr.lkey);
- key += dev->limits.num_mpts;
- fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
-
- writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
-
- for (i = 0; i < list_len; ++i) {
- __be64 mtt_entry = cpu_to_be64(page_list[i] |
- MTHCA_MTT_FLAG_PRESENT);
- mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i);
- }
-
- mpt_entry.lkey = cpu_to_be32(key);
- mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
- mpt_entry.start = cpu_to_be64(iova);
-
- __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
- memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
- offsetof(struct mthca_mpt_entry, window_count) -
- offsetof(struct mthca_mpt_entry, start));
-
- writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt);
-
- return 0;
-}
-
-int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct mthca_fmr *fmr = to_mfmr(ibfmr);
- struct mthca_dev *dev = to_mdev(ibfmr->device);
- u32 key;
- int i, err;
-
- err = mthca_check_fmr(fmr, page_list, list_len, iova);
- if (err)
- return err;
-
- ++fmr->maps;
-
- key = arbel_key_to_hw_index(fmr->ibmr.lkey);
- if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
- key += SINAI_FMR_KEY_INC;
- else
- key += dev->limits.num_mpts;
- fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
-
- *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
-
- wmb();
-
- dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
- list_len * sizeof(u64), DMA_TO_DEVICE);
-
- for (i = 0; i < list_len; ++i)
- fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
- MTHCA_MTT_FLAG_PRESENT);
-
- dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
- list_len * sizeof(u64), DMA_TO_DEVICE);
-
- fmr->mem.arbel.mpt->key = cpu_to_be32(key);
- fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
- fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
- fmr->mem.arbel.mpt->start = cpu_to_be64(iova);
-
- wmb();
-
- *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW;
-
- wmb();
-
- return 0;
-}
-
-void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
-{
- if (!fmr->maps)
- return;
-
- fmr->maps = 0;
-
- writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
-}
-
-void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
-{
- if (!fmr->maps)
- return;
-
- fmr->maps = 0;
-
- *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
-}
-
int mthca_init_mr_table(struct mthca_dev *dev)
{
phys_addr_t addr;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 69a3e4f62fb1..9fa2f9164a47 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -118,16 +118,6 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
- /*
- * If Sinai memory key optimization is being used, then only
- * the 8-bit key portion will change. For other HCAs, the
- * unused index bits will also be used for FMR remapping.
- */
- if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
- props->max_map_per_fmr = 255;
- else
- props->max_map_per_fmr =
- (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
err = 0;
out:
@@ -388,14 +378,15 @@ static void mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
}
-static int mthca_ah_create(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+static int mthca_ah_create(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct mthca_ah *ah = to_mah(ibah);
- return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd), ah_attr,
- ah);
+ return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd),
+ init_attr->ah_attr, ah);
}
static void mthca_ah_destroy(struct ib_ah *ah, u32 flags)
@@ -957,69 +948,6 @@ static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata)
return 0;
}
-static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct mthca_fmr *fmr;
- int err;
-
- fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
- if (!fmr)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
- err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
- convert_access(mr_access_flags), fmr);
-
- if (err) {
- kfree(fmr);
- return ERR_PTR(err);
- }
-
- return &fmr->ibmr;
-}
-
-static int mthca_dealloc_fmr(struct ib_fmr *fmr)
-{
- struct mthca_fmr *mfmr = to_mfmr(fmr);
- int err;
-
- err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
- if (err)
- return err;
-
- kfree(mfmr);
- return 0;
-}
-
-static int mthca_unmap_fmr(struct list_head *fmr_list)
-{
- struct ib_fmr *fmr;
- int err;
- struct mthca_dev *mdev = NULL;
-
- list_for_each_entry(fmr, fmr_list, list) {
- if (mdev && to_mdev(fmr->device) != mdev)
- return -EINVAL;
- mdev = to_mdev(fmr->device);
- }
-
- if (!mdev)
- return 0;
-
- if (mthca_is_memfree(mdev)) {
- list_for_each_entry(fmr, fmr_list, list)
- mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
-
- wmb();
- } else
- list_for_each_entry(fmr, fmr_list, list)
- mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
-
- err = mthca_SYNC_TPT(mdev);
- return err;
-}
-
static ssize_t hw_rev_show(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -1203,20 +1131,6 @@ static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
};
-static const struct ib_device_ops mthca_dev_arbel_fmr_ops = {
- .alloc_fmr = mthca_alloc_fmr,
- .dealloc_fmr = mthca_dealloc_fmr,
- .map_phys_fmr = mthca_arbel_map_phys_fmr,
- .unmap_fmr = mthca_unmap_fmr,
-};
-
-static const struct ib_device_ops mthca_dev_tavor_fmr_ops = {
- .alloc_fmr = mthca_alloc_fmr,
- .dealloc_fmr = mthca_dealloc_fmr,
- .map_phys_fmr = mthca_tavor_map_phys_fmr,
- .unmap_fmr = mthca_unmap_fmr,
-};
-
static const struct ib_device_ops mthca_dev_arbel_ops = {
.post_recv = mthca_arbel_post_receive,
.post_send = mthca_arbel_post_send,
@@ -1275,15 +1189,6 @@ int mthca_register_device(struct mthca_dev *dev)
&mthca_dev_tavor_srq_ops);
}
- if (dev->mthca_flags & MTHCA_FLAG_FMR) {
- if (mthca_is_memfree(dev))
- ib_set_device_ops(&dev->ib_dev,
- &mthca_dev_arbel_fmr_ops);
- else
- ib_set_device_ops(&dev->ib_dev,
- &mthca_dev_tavor_fmr_ops);
- }
-
ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops);
if (mthca_is_memfree(dev))
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 596acc45569b..84c64bff0d92 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -76,24 +76,6 @@ struct mthca_mr {
struct mthca_mtt *mtt;
};
-struct mthca_fmr {
- struct ib_fmr ibmr;
- struct ib_fmr_attr attr;
- struct mthca_mtt *mtt;
- int maps;
- union {
- struct {
- struct mthca_mpt_entry __iomem *mpt;
- u64 __iomem *mtts;
- } tavor;
- struct {
- struct mthca_mpt_entry *mpt;
- __be64 *mtts;
- dma_addr_t dma_handle;
- } arbel;
- } mem;
-};
-
struct mthca_pd {
struct ib_pd ibpd;
u32 pd_num;
@@ -301,11 +283,6 @@ static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext
return container_of(ibucontext, struct mthca_ucontext, ibucontext);
}
-static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
-{
- return container_of(ibmr, struct mthca_fmr, ibmr);
-}
-
static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct mthca_mr, ibmr);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 7baedc74e39d..fcfe0e82197a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -98,7 +98,6 @@ struct ocrdma_dev_attr {
u64 max_mr_size;
u32 max_num_mr_pbl;
int max_mw;
- int max_fmr;
int max_map_per_fmr;
int max_pages_per_frmr;
u16 max_ord_per_qp;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 2b7f00ac41b0..6eea02b18968 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -155,7 +155,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
return status;
}
-int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
u32 *ahid_addr;
@@ -165,6 +165,7 @@ int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
u16 vlan_tag = 0xffff;
const struct ib_gid_attr *sgid_attr;
struct ocrdma_pd *pd = get_ocrdma_pd(ibah->pd);
+ struct rdma_ah_attr *attr = init_attr->ah_attr;
struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
if ((attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 9780afcde780..8b73b3489f3a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -51,7 +51,7 @@ enum {
OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */
};
-int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index d82d3ec3649e..e07bf0b2209a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1190,7 +1190,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_mr = rsp->max_mr;
attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
rsp->max_mr_size_lo;
- attr->max_fmr = 0;
attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
attr->max_cqe = rsp->max_cq_cqes_per_cq &
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 10e343894595..d11c74390a12 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -99,8 +99,6 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->max_mw = dev->attr.max_mw;
attr->max_pd = dev->attr.max_pd;
attr->atomic_cap = 0;
- attr->max_fmr = 0;
- attr->max_map_per_fmr = 0;
attr->max_qp_rd_atom =
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index dcdc85a1ab25..ccaedfd53e49 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -632,7 +632,6 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
attr->max_mr_size = qed_attr->max_mr_size;
attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
attr->max_mw = qed_attr->max_mw;
- attr->max_fmr = qed_attr->max_fmr;
attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
attr->max_pd = qed_attr->max_pd;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 5488dbd59d3c..fdf90ecb2699 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -103,7 +103,6 @@ struct qedr_device_attr {
u64 max_mr_size;
u32 max_cqe;
u32 max_mw;
- u32 max_fmr;
u32 max_mr_mw_fmr_pbl;
u64 max_mr_mw_fmr_size;
u32 max_pd;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index a5bd3adaf90a..9b9e80266367 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -145,8 +145,6 @@ int qedr_query_device(struct ib_device *ibdev,
attr->max_mw = qattr->max_mw;
attr->max_pd = qattr->max_pd;
attr->atomic_cap = dev->atomic_cap;
- attr->max_fmr = qattr->max_fmr;
- attr->max_map_per_fmr = 16;
attr->max_qp_init_rd_atom =
1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
attr->max_qp_rd_atom =
@@ -2750,12 +2748,12 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
return 0;
}
-int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
struct qedr_ah *ah = get_qedr_ah(ibah);
- rdma_copy_ah_attr(&ah->attr, attr);
+ rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
return 0;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 18027844eb87..5e02387e068d 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -70,7 +70,7 @@ int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_recv_wr);
-int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
void qedr_destroy_ah(struct ib_ah *ibah, u32 flags);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index b0144229cf3b..ff87a67dd7b7 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -40,10 +40,10 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/jiffies.h>
-#include <asm/pgtable.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/uio.h>
+#include <linux/pgtable.h>
#include <rdma/ib.h>
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 91d64dd71a8a..8bcbc884e5b6 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2375,7 +2375,6 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
struct qib_devdata *dd = ppd->dd;
u64 val, guid, ibc;
unsigned long flags;
- int ret = 0;
/*
* SerDes model not in Pd, but still need to
@@ -2510,7 +2509,7 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
val | ERR_MASK_N(IBStatusChanged));
/* Always zero until we start messing with SerDes for real */
- return ret;
+ return 0;
}
/**
@@ -6875,7 +6874,7 @@ static int init_sdma_7322_regs(struct qib_pportdata *ppd)
struct qib_devdata *dd = ppd->dd;
unsigned lastbuf, erstbuf;
u64 senddmabufmask[3] = { 0 };
- int n, ret = 0;
+ int n;
qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
qib_sdma_7322_setlengen(ppd);
@@ -6904,7 +6903,7 @@ static int init_sdma_7322_regs(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
- return ret;
+ return 0;
}
/* sdma_lock must be held */
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 342e3172ca40..4c24e83f3175 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -106,18 +106,18 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
goto bail;
}
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
for (got = 0; got < num_pages; got += ret) {
ret = pin_user_pages(start_page + got * PAGE_SIZE,
num_pages - got,
FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
goto bail_release;
}
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return 0;
bail_release:
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 7508abb6a0fa..7acf9ba5358a 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1460,7 +1460,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_cq = ib_qib_max_cqs;
rdi->dparms.props.max_cqe = ib_qib_max_cqes;
rdi->dparms.props.max_ah = ib_qib_max_ahs;
- rdi->dparms.props.max_map_per_fmr = 32767;
rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
rdi->dparms.props.max_qp_init_rd_atom = 255;
rdi->dparms.props.max_srq = ib_qib_max_srqs;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 71f82339446c..b8a77ce11590 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -322,7 +322,6 @@ int usnic_ib_query_device(struct ib_device *ibdev,
props->max_mcast_grp = 0;
props->max_mcast_qp_attach = 0;
props->max_total_mcast_qp_attach = 0;
- props->max_map_per_fmr = 0;
/* Owned by Userspace
* max_qp_wr, max_sge, max_sge_rd, max_cqe */
mutex_unlock(&us_ibdev->usdev_lock);
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index bd9f944b68fc..760b254ba42d 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -123,7 +123,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
uiomr->owning_mm = mm = current->mm;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
locked = atomic64_add_return(npages, &current->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -187,7 +187,7 @@ out:
} else
mmgrab(uiomr->owning_mm);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
free_page((unsigned long) page_list);
return ret;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index faf7ecd7b3fa..ccbded2d26ce 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -509,9 +509,10 @@ void pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
*
* @return: 0 on success, otherwise errno.
*/
-int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
struct pvrdma_dev *dev = to_vdev(ibah->device);
struct pvrdma_ah *ah = to_vah(ibah);
const struct ib_global_route *grh;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index e4a48f5c0c85..267702226f10 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -414,7 +414,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
-int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags);
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index ee02c6176007..40480add7dd3 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -98,14 +98,14 @@ EXPORT_SYMBOL(rvt_check_ah);
*
* Return: 0 on success
*/
-int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 create_flags, struct ib_udata *udata)
+int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct rvt_ah *ah = ibah_to_rvtah(ibah);
struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
unsigned long flags;
- if (rvt_check_ah(ibah->device, ah_attr))
+ if (rvt_check_ah(ibah->device, init_attr->ah_attr))
return -EINVAL;
spin_lock_irqsave(&dev->n_ahs_lock, flags);
@@ -117,10 +117,11 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
dev->n_ahs_allocated++;
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
- rdma_copy_ah_attr(&ah->attr, ah_attr);
+ rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
if (dev->driver_f.notify_new_ah)
- dev->driver_f.notify_new_ah(ibah->device, ah_attr, ah);
+ dev->driver_f.notify_new_ah(ibah->device,
+ init_attr->ah_attr, ah);
return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
index bbb4d3bdec4e..40b7123fec76 100644
--- a/drivers/infiniband/sw/rdmavt/ah.h
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -50,8 +50,8 @@
#include <rdma/rdma_vt.h>
-int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
- u32 create_flags, struct ib_udata *udata);
+int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags);
int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index 37853aa3bcf7..f5d0e33cf3d7 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -48,7 +48,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
-#include <asm/pgtable.h>
#include <rdma/uverbs_ioctl.h>
#include "mmap.h"
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 72f6534fbb52..60864e5ca7cb 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -97,7 +97,6 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
rdi->dparms.props.max_mr = rdi->lkey_table.max;
- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
return 0;
}
@@ -714,160 +713,6 @@ bail:
EXPORT_SYMBOL(rvt_invalidate_rkey);
/**
- * rvt_alloc_fmr - allocate a fast memory region
- * @pd: the protection domain for this memory region
- * @mr_access_flags: access flags for this memory region
- * @fmr_attr: fast memory region attributes
- *
- * Return: the memory region on success, otherwise returns an errno.
- */
-struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct rvt_fmr *fmr;
- int m;
- struct ib_fmr *ret;
- int rval = -ENOMEM;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
- fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL);
- if (!fmr)
- goto bail;
-
- rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages,
- PERCPU_REF_INIT_ATOMIC);
- if (rval)
- goto bail;
-
- /*
- * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
- * rkey.
- */
- rval = rvt_alloc_lkey(&fmr->mr, 0);
- if (rval)
- goto bail_mregion;
- fmr->ibfmr.rkey = fmr->mr.lkey;
- fmr->ibfmr.lkey = fmr->mr.lkey;
- /*
- * Resources are allocated but no valid mapping (RKEY can't be
- * used).
- */
- fmr->mr.access_flags = mr_access_flags;
- fmr->mr.max_segs = fmr_attr->max_pages;
- fmr->mr.page_shift = fmr_attr->page_shift;
-
- ret = &fmr->ibfmr;
-done:
- return ret;
-
-bail_mregion:
- rvt_deinit_mregion(&fmr->mr);
-bail:
- kfree(fmr);
- ret = ERR_PTR(rval);
- goto done;
-}
-
-/**
- * rvt_map_phys_fmr - set up a fast memory region
- * @ibfmr: the fast memory region to set up
- * @page_list: the list of pages to associate with the fast memory region
- * @list_len: the number of pages to associate with the fast memory region
- * @iova: the virtual address of the start of the fast memory region
- *
- * This may be called from interrupt context.
- *
- * Return: 0 on success
- */
-
-int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct rvt_fmr *fmr = to_ifmr(ibfmr);
- struct rvt_lkey_table *rkt;
- unsigned long flags;
- int m, n;
- unsigned long i;
- u32 ps;
- struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
-
- i = atomic_long_read(&fmr->mr.refcount.count);
- if (i > 2)
- return -EBUSY;
-
- if (list_len > fmr->mr.max_segs)
- return -EINVAL;
-
- rkt = &rdi->lkey_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = iova;
- fmr->mr.iova = iova;
- ps = 1 << fmr->mr.page_shift;
- fmr->mr.length = list_len * ps;
- m = 0;
- n = 0;
- for (i = 0; i < list_len; i++) {
- fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
- fmr->mr.map[m]->segs[n].length = ps;
- trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps);
- if (++n == RVT_SEGSZ) {
- m++;
- n = 0;
- }
- }
- spin_unlock_irqrestore(&rkt->lock, flags);
- return 0;
-}
-
-/**
- * rvt_unmap_fmr - unmap fast memory regions
- * @fmr_list: the list of fast memory regions to unmap
- *
- * Return: 0 on success.
- */
-int rvt_unmap_fmr(struct list_head *fmr_list)
-{
- struct rvt_fmr *fmr;
- struct rvt_lkey_table *rkt;
- unsigned long flags;
- struct rvt_dev_info *rdi;
-
- list_for_each_entry(fmr, fmr_list, ibfmr.list) {
- rdi = ib_to_rvt(fmr->ibfmr.device);
- rkt = &rdi->lkey_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = 0;
- fmr->mr.iova = 0;
- fmr->mr.length = 0;
- spin_unlock_irqrestore(&rkt->lock, flags);
- }
- return 0;
-}
-
-/**
- * rvt_dealloc_fmr - deallocate a fast memory region
- * @ibfmr: the fast memory region to deallocate
- *
- * Return: 0 on success.
- */
-int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
-{
- struct rvt_fmr *fmr = to_ifmr(ibfmr);
- int ret = 0;
-
- rvt_free_lkey(&fmr->mr);
- rvt_put_mr(&fmr->mr); /* will set completion if last */
- ret = rvt_check_refs(&fmr->mr, __func__);
- if (ret)
- goto out;
- rvt_deinit_mregion(&fmr->mr);
- kfree(fmr);
-out:
- return ret;
-}
-
-/**
* rvt_sge_adjacent - is isge compressible
* @last_sge: last outgoing SGE written
* @sge: SGE to check
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h
index 2c8d0752e8e3..780fc63af98b 100644
--- a/drivers/infiniband/sw/rdmavt/mr.h
+++ b/drivers/infiniband/sw/rdmavt/mr.h
@@ -49,10 +49,6 @@
*/
#include <rdma/rdma_vt.h>
-struct rvt_fmr {
- struct ib_fmr ibfmr;
- struct rvt_mregion mr; /* must be last */
-};
struct rvt_mr {
struct ib_mr ibmr;
@@ -60,11 +56,6 @@ struct rvt_mr {
struct rvt_mregion mr; /* must be last */
};
-static inline struct rvt_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct rvt_fmr, ibfmr);
-}
-
static inline struct rvt_mr *to_imr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct rvt_mr, ibmr);
@@ -83,11 +74,5 @@ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
-struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-int rvt_unmap_fmr(struct list_head *fmr_list);
-int rvt_dealloc_fmr(struct ib_fmr *ibfmr);
#endif /* DEF_RVTMR_H */
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 500a7ee04c44..511b72809e14 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 - 2019 Intel Corporation.
+ * Copyright(c) 2016 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -525,15 +525,18 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
* @rdi: rvt device info structure
* @qpt: queue pair number table pointer
* @port_num: IB port number, 1 based, comes from core
+ * @exclude_prefix: prefix of special queue pair number being allocated
*
* Return: The queue pair number
*/
static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num)
+ enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
u32 ret;
+ u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
+ RVT_AIP_QPN_MAX : RVT_QPN_MAX;
if (rdi->driver_f.alloc_qpn)
return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
@@ -553,7 +556,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
}
qpn = qpt->last + qpt->incr;
- if (qpn >= RVT_QPN_MAX)
+ if (qpn >= max_qpn)
qpn = qpt->incr | ((qpt->last & 1) ^ 1);
/* offset carries bit 0 */
offset = qpn & RVT_BITS_PER_PAGE_MASK;
@@ -987,6 +990,9 @@ static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
{
struct rvt_qpn_map *map;
+ if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
+ qpn &= RVT_AIP_QP_SUFFIX;
+
map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
if (map->page)
clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
@@ -1074,13 +1080,15 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
void *priv = NULL;
size_t sqsize;
+ u8 exclude_prefix = 0;
if (!rdi)
return ERR_PTR(-EINVAL);
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
- init_attr->create_flags)
+ (init_attr->create_flags &&
+ init_attr->create_flags != IB_QP_CREATE_NETDEV_USE))
return ERR_PTR(-EINVAL);
/* Check receive queue parameters if no SRQ is specified. */
@@ -1199,14 +1207,20 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
goto bail_driver_priv;
}
+ if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
+ exclude_prefix = RVT_AIP_QP_PREFIX;
+
err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
init_attr->qp_type,
- init_attr->port_num);
+ init_attr->port_num,
+ exclude_prefix);
if (err < 0) {
ret = ERR_PTR(err);
goto bail_rq_wq;
}
qp->ibqp.qp_num = err;
+ if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
+ qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
qp->port_num = init_attr->port_num;
rvt_init_qp(rdi, qp, init_attr->qp_type);
if (rdi->driver_f.qp_priv_init) {
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 72b031ab7092..f904bb34477a 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -378,7 +378,6 @@ enum {
static const struct ib_device_ops rvt_dev_ops = {
.uverbs_abi_ver = RVT_UVERBS_ABI_VERSION,
- .alloc_fmr = rvt_alloc_fmr,
.alloc_mr = rvt_alloc_mr,
.alloc_pd = rvt_alloc_pd,
.alloc_ucontext = rvt_alloc_ucontext,
@@ -387,7 +386,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.create_cq = rvt_create_cq,
.create_qp = rvt_create_qp,
.create_srq = rvt_create_srq,
- .dealloc_fmr = rvt_dealloc_fmr,
.dealloc_pd = rvt_dealloc_pd,
.dealloc_ucontext = rvt_dealloc_ucontext,
.dereg_mr = rvt_dereg_mr,
@@ -399,7 +397,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.get_dma_mr = rvt_get_dma_mr,
.get_port_immutable = rvt_get_port_immutable,
.map_mr_sg = rvt_map_mr_sg,
- .map_phys_fmr = rvt_map_phys_fmr,
.mmap = rvt_mmap,
.modify_ah = rvt_modify_ah,
.modify_device = rvt_modify_device,
@@ -420,7 +417,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.reg_user_mr = rvt_reg_user_mr,
.req_notify_cq = rvt_req_notify_cq,
.resize_cq = rvt_resize_cq,
- .unmap_fmr = rvt_unmap_fmr,
INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq),
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 4afdd2e20883..5642eefb4ba1 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -77,6 +77,7 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
{
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
+ rxe->attr.vendor_id = RXE_VENDOR_ID;
rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
rxe->attr.max_qp = RXE_MAX_QP;
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index 6a413d73b95d..7887f623f62c 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -35,7 +35,6 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/errno.h>
-#include <asm/pgtable.h>
#include <rdma/uverbs_ioctl.h>
#include "rxe.h"
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index f59616b02477..99e9d8ba9767 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -127,6 +127,9 @@ enum rxe_device_param {
/* Delay before calling arbiter timer */
RXE_NSEC_ARB_TIMER_DELAY = 200,
+
+ /* IBTA v1.4 A3.3.1 VENDOR INFORMATION section */
+ RXE_VENDOR_ID = 0XFFFFFF,
};
/* default/initial rxe port parameters */
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 9dd4bd7aea92..b8a22af724e8 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -195,15 +195,16 @@ static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
rxe_drop_ref(pd);
}
-static int rxe_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr,
- u32 flags, struct ib_udata *udata)
+static int rxe_create_ah(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
int err;
struct rxe_dev *rxe = to_rdev(ibah->device);
struct rxe_ah *ah = to_rah(ibah);
- err = rxe_av_chk_attr(rxe, attr);
+ err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
if (err)
return err;
@@ -211,7 +212,7 @@ static int rxe_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr,
if (err)
return err;
- rxe_init_av(attr, &ah->av);
+ rxe_init_av(init_attr->ah_attr, &ah->av);
return 0;
}
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index af5e9f8c0fcd..e9753831ac3f 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -30,7 +30,6 @@
#define SIW_MAX_MR (SIW_MAX_QP * 10)
#define SIW_MAX_PD SIW_MAX_QP
#define SIW_MAX_MW 0 /* to be set if MW's are supported */
-#define SIW_MAX_FMR SIW_MAX_MR
#define SIW_MAX_SRQ SIW_MAX_QP
#define SIW_MAX_SRQ_WR (SIW_MAX_QP_WR * 10)
#define SIW_MAX_CONTEXT SIW_MAX_PD
@@ -59,7 +58,6 @@ struct siw_dev_cap {
int max_mr;
int max_pd;
int max_mw;
- int max_fmr;
int max_srq;
int max_srq_wr;
int max_srq_sge;
@@ -139,7 +137,7 @@ struct siw_pble {
struct siw_pbl {
unsigned int num_buf;
unsigned int max_buf;
- struct siw_pble pbe[1];
+ struct siw_pble pbe[];
};
/*
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 559e5fd3bad8..1662216be66d 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -947,16 +947,8 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_get(new_cep);
new_s->sk->sk_user_data = new_cep;
- if (siw_tcp_nagle == false) {
- int val = 1;
-
- rv = kernel_setsockopt(new_s, SOL_TCP, TCP_NODELAY,
- (char *)&val, sizeof(val));
- if (rv) {
- siw_dbg_cep(cep, "setsockopt NODELAY error: %d\n", rv);
- goto error;
- }
- }
+ if (siw_tcp_nagle == false)
+ tcp_sock_set_nodelay(new_s->sk);
new_cep->state = SIW_EPSTATE_AWAIT_MPAREQ;
rv = siw_cm_queue_work(new_cep, SIW_CM_WORK_MPATIMEOUT);
@@ -1312,17 +1304,14 @@ static void siw_cm_llp_state_change(struct sock *sk)
static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
struct sockaddr *raddr)
{
- int rv, flags = 0, s_val = 1;
+ int rv, flags = 0;
size_t size = laddr->sa_family == AF_INET ?
sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
/*
* Make address available again asap.
*/
- rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
- sizeof(s_val));
- if (rv < 0)
- return rv;
+ sock_set_reuseaddr(s->sk);
rv = s->ops->bind(s, laddr, size);
if (rv < 0)
@@ -1389,16 +1378,8 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
siw_dbg_qp(qp, "kernel_bindconnect: error %d\n", rv);
goto error;
}
- if (siw_tcp_nagle == false) {
- int val = 1;
-
- rv = kernel_setsockopt(s, SOL_TCP, TCP_NODELAY, (char *)&val,
- sizeof(val));
- if (rv) {
- siw_dbg_qp(qp, "setsockopt NODELAY error: %d\n", rv);
- goto error;
- }
- }
+ if (siw_tcp_nagle == false)
+ tcp_sock_set_nodelay(s->sk);
cep = siw_cep_alloc(sdev);
if (!cep) {
rv = -ENOMEM;
@@ -1781,7 +1762,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
struct siw_cep *cep = NULL;
struct siw_device *sdev = to_siw_dev(id->device);
int addr_family = id->local_addr.ss_family;
- int rv = 0, s_val;
+ int rv = 0;
if (addr_family != AF_INET && addr_family != AF_INET6)
return -EAFNOSUPPORT;
@@ -1793,13 +1774,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
/*
* Allow binding local port when still in TIME_WAIT from last close.
*/
- s_val = 1;
- rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
- sizeof(s_val));
- if (rv) {
- siw_dbg(id->device, "setsockopt error: %d\n", rv);
- goto error;
- }
+ sock_set_reuseaddr(s->sk);
+
if (addr_family == AF_INET) {
struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 5cd40fb9e20c..a0b8cc643c5c 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -413,7 +413,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
sdev->attrs.max_mr = SIW_MAX_MR;
sdev->attrs.max_pd = SIW_MAX_PD;
sdev->attrs.max_mw = SIW_MAX_MW;
- sdev->attrs.max_fmr = SIW_MAX_FMR;
sdev->attrs.max_srq = SIW_MAX_SRQ;
sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR;
sdev->attrs.max_srq_sge = SIW_MAX_SGE;
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index e2061dc0b043..34a910cf0edb 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -349,14 +349,11 @@ dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
struct siw_pbl *siw_pbl_alloc(u32 num_buf)
{
struct siw_pbl *pbl;
- int buf_size = sizeof(*pbl);
if (num_buf == 0)
return ERR_PTR(-EINVAL);
- buf_size += ((num_buf - 1) * sizeof(struct siw_pble));
-
- pbl = kzalloc(buf_size, GFP_KERNEL);
+ pbl = kzalloc(struct_size(pbl, pbe, num_buf), GFP_KERNEL);
if (!pbl)
return ERR_PTR(-ENOMEM);
@@ -397,7 +394,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
if (!writable)
foll_flags |= FOLL_FORCE;
- down_read(&mm_s->mmap_sem);
+ mmap_read_lock(mm_s);
mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -441,7 +438,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
num_pages -= got;
}
out_sem_up:
- up_read(&mm_s->mmap_sem);
+ mmap_read_unlock(mm_s);
if (rv > 0)
return umem;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index aeb842bc7a1e..987e2ba05dbc 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -136,7 +136,6 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
attr->max_cq = sdev->attrs.max_cq;
attr->max_cqe = sdev->attrs.max_cqe;
attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
- attr->max_fmr = sdev->attrs.max_fmr;
attr->max_mr = sdev->attrs.max_mr;
attr->max_mw = sdev->attrs.max_mw;
attr->max_mr_size = ~0ull;
diff --git a/drivers/infiniband/ulp/Makefile b/drivers/infiniband/ulp/Makefile
index 437813c7b481..4d0004b58377 100644
--- a/drivers/infiniband/ulp/Makefile
+++ b/drivers/infiniband/ulp/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_INFINIBAND_SRPT) += srpt/
obj-$(CONFIG_INFINIBAND_ISER) += iser/
obj-$(CONFIG_INFINIBAND_ISERT) += isert/
obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic/
+obj-$(CONFIG_INFINIBAND_RTRS) += rtrs/
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ceec24d45185..3cfb682b91b0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -86,7 +86,7 @@ struct workqueue_struct *ipoib_workqueue;
struct ib_sa_client ipoib_sa_client;
-static void ipoib_add_one(struct ib_device *device);
+static int ipoib_add_one(struct ib_device *device);
static void ipoib_remove_one(struct ib_device *device, void *client_data);
static void ipoib_neigh_reclaim(struct rcu_head *rp);
static struct net_device *ipoib_get_net_dev_by_params(
@@ -479,9 +479,6 @@ static struct net_device *ipoib_get_net_dev_by_params(
if (ret)
return NULL;
- if (!dev_list)
- return NULL;
-
/* See if we can find a unique device matching the L2 parameters */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, NULL, &net_dev);
@@ -529,6 +526,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
"will cause multicast packet drops\n");
netdev_update_features(dev);
dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
+ netif_set_real_num_tx_queues(dev, 1);
rtnl_unlock();
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
@@ -540,6 +538,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
netdev_update_features(dev);
dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
rtnl_unlock();
ipoib_flush_paths(dev);
return (!rtnl_trylock()) ? -EBUSY : 0;
@@ -1860,7 +1859,7 @@ static int ipoib_parent_init(struct net_device *ndev)
priv->port);
return result;
}
- priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+ priv->max_ib_mtu = rdma_mtu_from_attr(priv->ca, priv->port, &attr);
result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
if (result) {
@@ -1901,6 +1900,7 @@ static int ipoib_ndo_init(struct net_device *ndev)
{
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
int rc;
+ struct rdma_netdev *rn = netdev_priv(ndev);
if (priv->parent) {
ipoib_child_init(ndev);
@@ -1913,6 +1913,7 @@ static int ipoib_ndo_init(struct net_device *ndev)
/* MTU will be reset when mcast join happens */
ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
+ rn->mtu = priv->mcast_mtu;
ndev->max_mtu = IPOIB_CM_MTU;
ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
@@ -2074,9 +2075,17 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_do_ioctl = ipoib_ioctl,
};
+static const struct net_device_ops ipoib_netdev_default_pf = {
+ .ndo_init = ipoib_dev_init_default,
+ .ndo_uninit = ipoib_dev_uninit_default,
+ .ndo_open = ipoib_ib_dev_open_default,
+ .ndo_stop = ipoib_ib_dev_stop_default,
+};
+
void ipoib_setup_common(struct net_device *dev)
{
dev->header_ops = &ipoib_header_ops;
+ dev->netdev_ops = &ipoib_netdev_default_pf;
ipoib_set_ethtool_ops(dev);
@@ -2126,13 +2135,6 @@ static void ipoib_build_priv(struct net_device *dev)
INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
}
-static const struct net_device_ops ipoib_netdev_default_pf = {
- .ndo_init = ipoib_dev_init_default,
- .ndo_uninit = ipoib_dev_uninit_default,
- .ndo_open = ipoib_ib_dev_open_default,
- .ndo_stop = ipoib_ib_dev_stop_default,
-};
-
static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
const char *name)
{
@@ -2170,7 +2172,6 @@ int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
if (rc != -EOPNOTSUPP)
goto out;
- dev->netdev_ops = &ipoib_netdev_default_pf;
rn->send = ipoib_send;
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
@@ -2516,7 +2517,7 @@ sysfs_failed:
return ERR_PTR(-ENOMEM);
}
-static void ipoib_add_one(struct ib_device *device)
+static int ipoib_add_one(struct ib_device *device)
{
struct list_head *dev_list;
struct net_device *dev;
@@ -2526,7 +2527,7 @@ static void ipoib_add_one(struct ib_device *device)
dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
if (!dev_list)
- return;
+ return -ENOMEM;
INIT_LIST_HEAD(dev_list);
@@ -2543,10 +2544,11 @@ static void ipoib_add_one(struct ib_device *device)
if (!count) {
kfree(dev_list);
- return;
+ return -EOPNOTSUPP;
}
ib_set_client_data(device, &ipoib_client, dev_list);
+ return 0;
}
static void ipoib_remove_one(struct ib_device *device, void *client_data)
@@ -2554,9 +2556,6 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
struct list_head *dev_list = client_data;
- if (!dev_list)
- return;
-
list_for_each_entry_safe(priv, tmp, dev_list, list) {
LIST_HEAD(head);
ipoib_parent_unregister_pre(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b9e9562f5034..9bfa514473d5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -135,12 +135,11 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
kfree(mcast);
}
-static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
- int can_sleep)
+static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev)
{
struct ipoib_mcast *mcast;
- mcast = kzalloc(sizeof(*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
if (!mcast)
return NULL;
@@ -218,6 +217,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
struct rdma_ah_attr av;
int ret;
int set_qkey = 0;
+ int mtu;
mcast->mcmember = *mcmember;
@@ -240,13 +240,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
priv->broadcast->mcmember.flow_label = mcmember->flow_label;
priv->broadcast->mcmember.hop_limit = mcmember->hop_limit;
/* assume if the admin and the mcast are the same both can be changed */
+ mtu = rdma_mtu_enum_to_int(priv->ca, priv->port,
+ priv->broadcast->mcmember.mtu);
if (priv->mcast_mtu == priv->admin_mtu)
- priv->admin_mtu =
- priv->mcast_mtu =
- IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
- else
- priv->mcast_mtu =
- IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+ priv->admin_mtu = IPOIB_UD_MTU(mtu);
+ priv->mcast_mtu = IPOIB_UD_MTU(mtu);
+ rn->mtu = priv->mcast_mtu;
priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
spin_unlock_irq(&priv->lock);
@@ -599,7 +598,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (!priv->broadcast) {
struct ipoib_mcast *broadcast;
- broadcast = ipoib_mcast_alloc(dev, 0);
+ broadcast = ipoib_mcast_alloc(dev);
if (!broadcast) {
ipoib_warn(priv, "failed to allocate broadcast group\n");
/*
@@ -782,7 +781,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
mgid);
- mcast = ipoib_mcast_alloc(dev, 0);
+ mcast = ipoib_mcast_alloc(dev);
if (!mcast) {
ipoib_warn(priv, "unable to allocate memory "
"for multicast structure\n");
@@ -936,7 +935,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
mgid.raw);
- nmcast = ipoib_mcast_alloc(dev, 0);
+ nmcast = ipoib_mcast_alloc(dev);
if (!nmcast) {
ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
continue;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b69304d28f06..587252fd6f57 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -206,6 +206,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
+ if (priv->hca_caps & IB_DEVICE_RDMA_NETDEV_OPA)
+ init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE;
+
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
pr_warn("%s: failed to create QP\n", ca->name);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 8ac8e18fbe0c..30865605e098 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -97,6 +97,7 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
{
struct net_device *ndev = priv->dev;
int result;
+ struct rdma_netdev *rn = netdev_priv(ndev);
ASSERT_RTNL();
@@ -117,6 +118,8 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
goto out_early;
}
+ rn->mtu = priv->mcast_mtu;
+
priv->parent = ppriv->dev;
priv->pkey = pkey;
priv->child_type = type;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 029c00163442..1d77c7f42e38 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -65,7 +65,6 @@
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
-#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>
#define DRV_NAME "iser"
@@ -313,33 +312,6 @@ struct iser_comp {
};
/**
- * struct iser_reg_ops - Memory registration operations
- * per-device registration schemes
- *
- * @alloc_reg_res: Allocate registration resources
- * @free_reg_res: Free registration resources
- * @reg_mem: Register memory buffers
- * @unreg_mem: Un-register memory buffers
- * @reg_desc_get: Get a registration descriptor for pool
- * @reg_desc_put: Get a registration descriptor to pool
- */
-struct iser_reg_ops {
- int (*alloc_reg_res)(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size);
- void (*free_reg_res)(struct ib_conn *ib_conn);
- int (*reg_mem)(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *reg);
- void (*unreg_mem)(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
- struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn);
- void (*reg_desc_put)(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
-};
-
-/**
* struct iser_device - iSER device handle
*
* @ib_device: RDMA device
@@ -351,8 +323,6 @@ struct iser_reg_ops {
* @comps_used: Number of completion contexts used, Min between online
* cpus and device max completion vectors
* @comps: Dinamically allocated array of completion handlers
- * @reg_ops: Registration ops
- * @remote_inv_sup: Remote invalidate is supported on this device
*/
struct iser_device {
struct ib_device *ib_device;
@@ -362,26 +332,18 @@ struct iser_device {
int refcount;
int comps_used;
struct iser_comp *comps;
- const struct iser_reg_ops *reg_ops;
- bool remote_inv_sup;
};
/**
* struct iser_reg_resources - Fast registration resources
*
* @mr: memory region
- * @fmr_pool: pool of fmrs
* @sig_mr: signature memory region
- * @page_vec: fast reg page list used by fmr pool
* @mr_valid: is mr valid indicator
*/
struct iser_reg_resources {
- union {
- struct ib_mr *mr;
- struct ib_fmr_pool *fmr_pool;
- };
+ struct ib_mr *mr;
struct ib_mr *sig_mr;
- struct iser_page_vec *page_vec;
u8 mr_valid:1;
};
@@ -403,7 +365,7 @@ struct iser_fr_desc {
* struct iser_fr_pool - connection fast registration pool
*
* @list: list of fastreg descriptors
- * @lock: protects fmr/fastreg pool
+ * @lock: protects fastreg pool
* @size: size of the pool
*/
struct iser_fr_pool {
@@ -518,12 +480,6 @@ struct iscsi_iser_task {
struct iser_data_buf prot[ISER_DIRS_NUM];
};
-struct iser_page_vec {
- u64 *pages;
- int npages;
- struct ib_mr fake_mr;
-};
-
/**
* struct iser_global - iSER global context
*
@@ -548,8 +504,6 @@ extern int iser_pi_guard;
extern unsigned int iser_max_sectors;
extern bool iser_always_reg;
-int iser_assign_reg_ops(struct iser_device *device);
-
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task);
@@ -591,22 +545,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
enum iser_data_dir cmd_dir);
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir,
- bool all_imm);
-void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir);
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir,
+ bool all_imm);
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir);
int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *src_addr,
struct sockaddr *dst_addr,
int non_blocking);
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
-void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
-
int iser_post_recvl(struct iser_conn *iser_conn);
int iser_post_recvm(struct iser_conn *iser_conn, int count);
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
@@ -625,26 +574,12 @@ int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session);
-int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size);
-void iser_free_fmr_pool(struct ib_conn *ib_conn);
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
unsigned int size);
void iser_free_fastreg_pool(struct ib_conn *ib_conn);
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector);
-struct iser_fr_desc *
-iser_reg_desc_get_fr(struct ib_conn *ib_conn);
-void
-iser_reg_desc_put_fr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
-struct iser_fr_desc *
-iser_reg_desc_get_fmr(struct ib_conn *ib_conn);
-void
-iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
static inline struct iser_conn *
to_iser_conn(struct ib_conn *ib_conn)
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 4a7045bb0831..27a6f75a9912 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -72,7 +72,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
return err;
}
- err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN, false);
+ err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
return err;
@@ -126,8 +126,8 @@ iser_prepare_write_cmd(struct iscsi_task *task,
return err;
}
- err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT,
- buf_out->data_len == imm_sz);
+ err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
+ buf_out->data_len == imm_sz);
if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n");
return err;
@@ -250,8 +250,8 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
- if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max,
- iser_conn->pages_per_mr))
+ if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
+ iser_conn->pages_per_mr))
goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn))
@@ -293,7 +293,7 @@ rx_desc_dma_map_failed:
rx_desc_alloc_fail:
iser_free_login_buf(iser_conn);
alloc_login_buf_fail:
- device->reg_ops->free_reg_res(ib_conn);
+ iser_free_fastreg_pool(ib_conn);
create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
@@ -306,8 +306,7 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
- if (device->reg_ops->free_reg_res)
- device->reg_ops->free_reg_res(ib_conn);
+ iser_free_fastreg_pool(ib_conn);
rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
@@ -768,7 +767,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
int prot_count = scsi_prot_sg_count(iser_task->sc);
if (iser_task->dir[ISER_DIR_IN]) {
- iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
+ iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_IN],
DMA_FROM_DEVICE);
@@ -779,7 +778,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
}
if (iser_task->dir[ISER_DIR_OUT]) {
- iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
+ iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_OUT],
DMA_TO_DEVICE);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 999ef7cdd05e..d4e057fac219 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -38,62 +38,13 @@
#include <linux/scatterlist.h>
#include "iscsi_iser.h"
-static
-int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *mem_reg);
-static
-int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *mem_reg);
-
-static const struct iser_reg_ops fastreg_ops = {
- .alloc_reg_res = iser_alloc_fastreg_pool,
- .free_reg_res = iser_free_fastreg_pool,
- .reg_mem = iser_fast_reg_mr,
- .unreg_mem = iser_unreg_mem_fastreg,
- .reg_desc_get = iser_reg_desc_get_fr,
- .reg_desc_put = iser_reg_desc_put_fr,
-};
-
-static const struct iser_reg_ops fmr_ops = {
- .alloc_reg_res = iser_alloc_fmr_pool,
- .free_reg_res = iser_free_fmr_pool,
- .reg_mem = iser_fast_reg_fmr,
- .unreg_mem = iser_unreg_mem_fmr,
- .reg_desc_get = iser_reg_desc_get_fmr,
- .reg_desc_put = iser_reg_desc_put_fmr,
-};
void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
{
iser_err_comp(wc, "memreg");
}
-int iser_assign_reg_ops(struct iser_device *device)
-{
- struct ib_device *ib_dev = device->ib_device;
-
- /* Assign function handles - based on FMR support */
- if (ib_dev->ops.alloc_fmr && ib_dev->ops.dealloc_fmr &&
- ib_dev->ops.map_phys_fmr && ib_dev->ops.unmap_fmr) {
- iser_info("FMR supported, using FMR for registration\n");
- device->reg_ops = &fmr_ops;
- } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
- iser_info("FastReg supported, using FastReg for registration\n");
- device->reg_ops = &fastreg_ops;
- device->remote_inv_sup = iser_always_reg;
- } else {
- iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
- return -1;
- }
-
- return 0;
-}
-
-struct iser_fr_desc *
+static struct iser_fr_desc *
iser_reg_desc_get_fr(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
@@ -109,7 +60,7 @@ iser_reg_desc_get_fr(struct ib_conn *ib_conn)
return desc;
}
-void
+static void
iser_reg_desc_put_fr(struct ib_conn *ib_conn,
struct iser_fr_desc *desc)
{
@@ -121,44 +72,6 @@ iser_reg_desc_put_fr(struct ib_conn *ib_conn,
spin_unlock_irqrestore(&fr_pool->lock, flags);
}
-struct iser_fr_desc *
-iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
-{
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
-
- return list_first_entry(&fr_pool->list,
- struct iser_fr_desc, list);
-}
-
-void
-iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc)
-{
-}
-
-static void iser_data_buf_dump(struct iser_data_buf *data,
- struct ib_device *ibdev)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(data->sg, sg, data->dma_nents, i)
- iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
- "off:0x%x sz:0x%x dma_len:0x%x\n",
- i, (unsigned long)sg_dma_address(sg),
- sg_page(sg), sg->offset, sg->length, sg_dma_len(sg));
-}
-
-static void iser_dump_page_vec(struct iser_page_vec *page_vec)
-{
- int i;
-
- iser_err("page vec npages %d data length %lld\n",
- page_vec->npages, page_vec->fake_mr.length);
- for (i = 0; i < page_vec->npages; i++)
- iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
-}
-
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
@@ -213,84 +126,9 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
return 0;
}
-static int iser_set_page(struct ib_mr *mr, u64 addr)
-{
- struct iser_page_vec *page_vec =
- container_of(mr, struct iser_page_vec, fake_mr);
-
- page_vec->pages[page_vec->npages++] = addr;
-
- return 0;
-}
-
-static
-int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *reg)
-{
- struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
- struct iser_page_vec *page_vec = rsc->page_vec;
- struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
- struct ib_pool_fmr *fmr;
- int ret, plen;
-
- page_vec->npages = 0;
- page_vec->fake_mr.page_size = SZ_4K;
- plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
- mem->dma_nents, NULL, iser_set_page);
- if (unlikely(plen < mem->dma_nents)) {
- iser_err("page vec too short to hold this SG\n");
- iser_data_buf_dump(mem, device->ib_device);
- iser_dump_page_vec(page_vec);
- return -EINVAL;
- }
-
- fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages,
- page_vec->npages, page_vec->pages[0]);
- if (IS_ERR(fmr)) {
- ret = PTR_ERR(fmr);
- iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
- return ret;
- }
-
- reg->sge.lkey = fmr->fmr->lkey;
- reg->rkey = fmr->fmr->rkey;
- reg->sge.addr = page_vec->fake_mr.iova;
- reg->sge.length = page_vec->fake_mr.length;
- reg->mem_h = fmr;
-
- iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
- " length=0x%x\n", reg->sge.lkey, reg->rkey,
- reg->sge.addr, reg->sge.length);
-
- return 0;
-}
-
-/**
- * Unregister (previosuly registered using FMR) memory.
- * If memory is non-FMR does nothing.
- */
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
-{
- struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
-
- if (!reg->mem_h)
- return;
-
- iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
-
- ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
-
- reg->mem_h = NULL;
-}
-
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
- struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
struct iser_fr_desc *desc;
struct ib_mr_status mr_status;
@@ -312,7 +150,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
&mr_status);
}
- device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, desc);
+ iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->mem_h);
reg->mem_h = NULL;
}
@@ -509,15 +347,14 @@ iser_reg_data_sg(struct iscsi_iser_task *task,
if (use_dma_key)
return iser_reg_dma(device, mem, reg);
- return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
+ return iser_fast_reg_mr(task, mem, &desc->rsc, reg);
}
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir,
- bool all_imm)
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir,
+ bool all_imm)
{
struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
struct iser_data_buf *mem = &task->data[dir];
struct iser_mem_reg *reg = &task->rdma_reg[dir];
struct iser_fr_desc *desc = NULL;
@@ -528,7 +365,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
if (!use_dma_key) {
- desc = device->reg_ops->reg_desc_get(ib_conn);
+ desc = iser_reg_desc_get_fr(ib_conn);
reg->mem_h = desc;
}
@@ -549,15 +386,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
err_reg:
if (desc)
- device->reg_ops->reg_desc_put(ib_conn, desc);
+ iser_reg_desc_put_fr(ib_conn, desc);
return err;
}
-void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir)
-{
- struct iser_device *device = task->iser_conn->ib_conn.device;
-
- device->reg_ops->unreg_mem(task, dir);
-}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 127887c6c03f..c1f44c41f501 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -68,11 +68,12 @@ static void iser_event_handler(struct ib_event_handler *handler,
static int iser_create_device_ib_res(struct iser_device *device)
{
struct ib_device *ib_dev = device->ib_device;
- int ret, i, max_cqe;
+ int i, max_cqe;
- ret = iser_assign_reg_ops(device);
- if (ret)
- return ret;
+ if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ iser_err("IB device does not support memory registrations\n");
+ return -1;
+ }
device->comps_used = min_t(int, num_online_cpus(),
ib_dev->num_comp_vectors);
@@ -147,96 +148,6 @@ static void iser_free_device_ib_res(struct iser_device *device)
device->pd = NULL;
}
-/**
- * iser_alloc_fmr_pool - Creates FMR pool and page_vector
- * @ib_conn: connection RDMA resources
- * @cmds_max: max number of SCSI commands for this connection
- * @size: max number of pages per map request
- *
- * Return: 0 on success, or errno code on failure
- */
-int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size)
-{
- struct iser_device *device = ib_conn->device;
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
- struct iser_page_vec *page_vec;
- struct iser_fr_desc *desc;
- struct ib_fmr_pool *fmr_pool;
- struct ib_fmr_pool_param params;
- int ret;
-
- INIT_LIST_HEAD(&fr_pool->list);
- spin_lock_init(&fr_pool->lock);
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size),
- GFP_KERNEL);
- if (!page_vec) {
- ret = -ENOMEM;
- goto err_frpl;
- }
-
- page_vec->pages = (u64 *)(page_vec + 1);
-
- params.page_shift = ilog2(SZ_4K);
- params.max_pages_per_fmr = size;
- /* make the pool size twice the max number of SCSI commands *
- * the ML is expected to queue, watermark for unmap at 50% */
- params.pool_size = cmds_max * 2;
- params.dirty_watermark = cmds_max;
- params.cache = 0;
- params.flush_function = NULL;
- params.access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
-
- fmr_pool = ib_create_fmr_pool(device->pd, &params);
- if (IS_ERR(fmr_pool)) {
- ret = PTR_ERR(fmr_pool);
- iser_err("FMR allocation failed, err %d\n", ret);
- goto err_fmr;
- }
-
- desc->rsc.page_vec = page_vec;
- desc->rsc.fmr_pool = fmr_pool;
- list_add(&desc->list, &fr_pool->list);
-
- return 0;
-
-err_fmr:
- kfree(page_vec);
-err_frpl:
- kfree(desc);
-
- return ret;
-}
-
-/**
- * iser_free_fmr_pool - releases the FMR pool and page vec
- * @ib_conn: connection RDMA resources
- */
-void iser_free_fmr_pool(struct ib_conn *ib_conn)
-{
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
- struct iser_fr_desc *desc;
-
- desc = list_first_entry(&fr_pool->list,
- struct iser_fr_desc, list);
- list_del(&desc->list);
-
- iser_info("freeing conn %p fmr pool %p\n",
- ib_conn, desc->rsc.fmr_pool);
-
- ib_destroy_fmr_pool(desc->rsc.fmr_pool);
- kfree(desc->rsc.page_vec);
- kfree(desc);
-}
-
static struct iser_fr_desc *
iser_create_fastreg_desc(struct iser_device *device,
struct ib_pd *pd,
@@ -667,13 +578,12 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
u32 max_num_sg;
/*
- * FRs without SG_GAPS or FMRs can only map up to a (device) page per
- * entry, but if the first entry is misaligned we'll end up using two
- * entries (head and tail) for a single page worth data, so one
- * additional entry is required.
+ * FRs without SG_GAPS can only map up to a (device) page per entry,
+ * but if the first entry is misaligned we'll end up using two entries
+ * (head and tail) for a single page worth data, so one additional
+ * entry is required.
*/
- if ((attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) &&
- (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ if (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
reserved_mr_pages = 0;
else
reserved_mr_pages = 1;
@@ -684,14 +594,8 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
max_num_sg = attr->max_fast_reg_page_list_len;
sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
- if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
- sup_sg_tablesize =
- min_t(
- uint, ISCSI_ISER_MAX_SG_TABLESIZE,
- max_num_sg - reserved_mr_pages);
- else
- sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
-
+ sup_sg_tablesize = min_t(uint, ISCSI_ISER_MAX_SG_TABLESIZE,
+ max_num_sg - reserved_mr_pages);
iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
iser_conn->pages_per_mr =
iser_conn->scsi_sg_tablesize + reserved_mr_pages;
@@ -755,7 +659,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
struct iser_cm_hdr req_hdr;
struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
+ struct ib_device *ib_dev = ib_conn->device->ib_device;
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
@@ -766,14 +670,14 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
goto failure;
memset(&conn_param, 0, sizeof conn_param);
- conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom;
+ conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
conn_param.initiator_depth = 1;
conn_param.retry_count = 7;
conn_param.rnr_retry_count = 6;
memset(&req_hdr, 0, sizeof(req_hdr));
req_hdr.flags = ISER_ZBVA_NOT_SUP;
- if (!device->remote_inv_sup)
+ if (!iser_always_reg)
req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
conn_param.private_data = (void *)&req_hdr;
conn_param.private_data_len = sizeof(struct iser_cm_hdr);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index a1a035270cab..b7df38ee8ae0 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -15,6 +15,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
+#include <rdma/ib_cm.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -502,7 +503,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
if (!np->enabled) {
spin_unlock_bh(&np->np_thread_lock);
isert_dbg("iscsi_np is not enabled, reject connect request\n");
- return rdma_reject(cma_id, NULL, 0);
+ return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
}
spin_unlock_bh(&np->np_thread_lock);
@@ -553,7 +554,7 @@ out_rsp_dma_map:
isert_free_login_buf(isert_conn);
out:
kfree(isert_conn);
- rdma_reject(cma_id, NULL, 0);
+ rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
return ret;
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 6e8d650c17c7..874a8eb7638c 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -113,7 +113,7 @@ struct opa_vnic_vema_port {
struct mutex lock;
};
-static void opa_vnic_vema_add_one(struct ib_device *device);
+static int opa_vnic_vema_add_one(struct ib_device *device);
static void opa_vnic_vema_rem_one(struct ib_device *device,
void *client_data);
@@ -989,18 +989,18 @@ static void opa_vnic_ctrl_config_dev(struct opa_vnic_ctrl_port *cport, bool en)
*
* Allocate the vnic control port and initialize it.
*/
-static void opa_vnic_vema_add_one(struct ib_device *device)
+static int opa_vnic_vema_add_one(struct ib_device *device)
{
struct opa_vnic_ctrl_port *cport;
int rc, size = sizeof(*cport);
if (!rdma_cap_opa_vnic(device))
- return;
+ return -EOPNOTSUPP;
size += device->phys_port_cnt * sizeof(struct opa_vnic_vema_port);
cport = kzalloc(size, GFP_KERNEL);
if (!cport)
- return;
+ return -ENOMEM;
cport->num_ports = device->phys_port_cnt;
cport->ibdev = device;
@@ -1012,6 +1012,7 @@ static void opa_vnic_vema_add_one(struct ib_device *device)
ib_set_client_data(device, &opa_vnic_client, cport);
opa_vnic_ctrl_config_dev(cport, true);
+ return 0;
}
/**
@@ -1026,9 +1027,6 @@ static void opa_vnic_vema_rem_one(struct ib_device *device,
{
struct opa_vnic_ctrl_port *cport = client_data;
- if (!cport)
- return;
-
c_info("removing VNIC client\n");
opa_vnic_ctrl_config_dev(cport, false);
vema_unregister(cport);
diff --git a/drivers/infiniband/ulp/rtrs/Kconfig b/drivers/infiniband/ulp/rtrs/Kconfig
new file mode 100644
index 000000000000..9092b62e6dc8
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config INFINIBAND_RTRS
+ tristate
+ depends on INFINIBAND_ADDR_TRANS
+
+config INFINIBAND_RTRS_CLIENT
+ tristate "RTRS client module"
+ depends on INFINIBAND_ADDR_TRANS
+ select INFINIBAND_RTRS
+ help
+ RDMA transport client module.
+
+ RDMA Transport (RTRS) client implements a reliable transport layer
+ and also multipathing functionality and that it is intended to be
+ the base layer for a block storage initiator over RDMA.
+
+config INFINIBAND_RTRS_SERVER
+ tristate "RTRS server module"
+ depends on INFINIBAND_ADDR_TRANS
+ select INFINIBAND_RTRS
+ help
+ RDMA transport server module.
+
+ RDMA Transport (RTRS) server module processing connection and IO
+ requests received from the RTRS client module, it will pass the
+ IO requests to its user eg. RNBD_server.
diff --git a/drivers/infiniband/ulp/rtrs/Makefile b/drivers/infiniband/ulp/rtrs/Makefile
new file mode 100644
index 000000000000..3898509be270
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+rtrs-client-y := rtrs-clt.o \
+ rtrs-clt-stats.o \
+ rtrs-clt-sysfs.o
+
+rtrs-server-y := rtrs-srv.o \
+ rtrs-srv-stats.o \
+ rtrs-srv-sysfs.o
+
+rtrs-core-y := rtrs.o
+
+obj-$(CONFIG_INFINIBAND_RTRS) += rtrs-core.o
+obj-$(CONFIG_INFINIBAND_RTRS_CLIENT) += rtrs-client.o
+obj-$(CONFIG_INFINIBAND_RTRS_SERVER) += rtrs-server.o
diff --git a/drivers/infiniband/ulp/rtrs/README b/drivers/infiniband/ulp/rtrs/README
new file mode 100644
index 000000000000..5d9ea142e5dd
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/README
@@ -0,0 +1,213 @@
+****************************
+RDMA Transport (RTRS)
+****************************
+
+RTRS (RDMA Transport) is a reliable high speed transport library
+which provides support to establish optimal number of connections
+between client and server machines using RDMA (InfiniBand, RoCE, iWarp)
+transport. It is optimized to transfer (read/write) IO blocks.
+
+In its core interface it follows the BIO semantics of providing the
+possibility to either write data from an sg list to the remote side
+or to request ("read") data transfer from the remote side into a given
+sg list.
+
+RTRS provides I/O fail-over and load-balancing capabilities by using
+multipath I/O (see "add_path" and "mp_policy" configuration entries in
+Documentation/ABI/testing/sysfs-class-rtrs-client).
+
+RTRS is used by the RNBD (RDMA Network Block Device) modules.
+
+==================
+Transport protocol
+==================
+
+Overview
+--------
+An established connection between a client and a server is called rtrs
+session. A session is associated with a set of memory chunks reserved on the
+server side for a given client for rdma transfer. A session
+consists of multiple paths, each representing a separate physical link
+between client and server. Those are used for load balancing and failover.
+Each path consists of as many connections (QPs) as there are cpus on
+the client.
+
+When processing an incoming write or read request, rtrs client uses memory
+chunks reserved for him on the server side. Their number, size and addresses
+need to be exchanged between client and server during the connection
+establishment phase. Apart from the memory related information client needs to
+inform the server about the session name and identify each path and connection
+individually.
+
+On an established session client sends to server write or read messages.
+Server uses immediate field to tell the client which request is being
+acknowledged and for errno. Client uses immediate field to tell the server
+which of the memory chunks has been accessed and at which offset the message
+can be found.
+
+Module parameter always_invalidate is introduced for the security problem
+discussed in LPC RDMA MC 2019. When always_invalidate=Y, on the server side we
+invalidate each rdma buffer before we hand it over to RNBD server and
+then pass it to the block layer. A new rkey is generated and registered for the
+buffer after it returns back from the block layer and RNBD server.
+The new rkey is sent back to the client along with the IO result.
+The procedure is the default behaviour of the driver. This invalidation and
+registration on each IO causes performance drop of up to 20%. A user of the
+driver may choose to load the modules with this mechanism switched off
+(always_invalidate=N), if he understands and can take the risk of a malicious
+client being able to corrupt memory of a server it is connected to. This might
+be a reasonable option in a scenario where all the clients and all the servers
+are located within a secure datacenter.
+
+
+Connection establishment
+------------------------
+
+1. Client starts establishing connections belonging to a path of a session one
+by one via attaching RTRS_MSG_CON_REQ messages to the rdma_connect requests.
+Those include uuid of the session and uuid of the path to be
+established. They are used by the server to find a persisting session/path or
+to create a new one when necessary. The message also contains the protocol
+version and magic for compatibility, total number of connections per session
+(as many as cpus on the client), the id of the current connection and
+the reconnect counter, which is used to resolve the situations where
+client is trying to reconnect a path, while server is still destroying the old
+one.
+
+2. Server accepts the connection requests one by one and attaches
+RTRS_MSG_CONN_RSP messages to the rdma_accept. Apart from magic and
+protocol version, the messages include error code, queue depth supported by
+the server (number of memory chunks which are going to be allocated for that
+session) and the maximum size of one io, RTRS_MSG_NEW_RKEY_F flags is set
+when always_invalidate=Y.
+
+3. After all connections of a path are established client sends to server the
+RTRS_MSG_INFO_REQ message, containing the name of the session. This message
+requests the address information from the server.
+
+4. Server replies to the session info request message with RTRS_MSG_INFO_RSP,
+which contains the addresses and keys of the RDMA buffers allocated for that
+session.
+
+5. Session becomes connected after all paths to be established are connected
+(i.e. steps 1-4 finished for all paths requested for a session)
+
+6. Server and client exchange periodically heartbeat messages (empty rdma
+messages with an immediate field) which are used to detect a crash on remote
+side or network outage in an absence of IO.
+
+7. On any RDMA related error or in the case of a heartbeat timeout, the
+corresponding path is disconnected, all the inflight IO are failed over to a
+healthy path, if any, and the reconnect mechanism is triggered.
+
+CLT SRV
+*for each connection belonging to a path and for each path:
+RTRS_MSG_CON_REQ ------------------->
+ <------------------- RTRS_MSG_CON_RSP
+...
+*after all connections are established:
+RTRS_MSG_INFO_REQ ------------------->
+ <------------------- RTRS_MSG_INFO_RSP
+*heartbeat is started from both sides:
+ -------------------> [RTRS_HB_MSG_IMM]
+[RTRS_HB_MSG_ACK] <-------------------
+[RTRS_HB_MSG_IMM] <-------------------
+ -------------------> [RTRS_HB_MSG_ACK]
+
+IO path
+-------
+
+* Write (always_invalidate=N) *
+
+1. When processing a write request client selects one of the memory chunks
+on the server side and rdma writes there the user data, user header and the
+RTRS_MSG_RDMA_WRITE message. Apart from the type (write), the message only
+contains size of the user header. The client tells the server which chunk has
+been accessed and at what offset the RTRS_MSG_RDMA_WRITE can be found by
+using the IMM field.
+
+2. When confirming a write request server sends an "empty" rdma message with
+an immediate field. The 32 bit field is used to specify the outstanding
+inflight IO and for the error code.
+
+CLT SRV
+usr_data + usr_hdr + rtrs_msg_rdma_write -----------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <----------------- (id + errno)
+
+* Write (always_invalidate=Y) *
+
+1. When processing a write request client selects one of the memory chunks
+on the server side and rdma writes there the user data, user header and the
+RTRS_MSG_RDMA_WRITE message. Apart from the type (write), the message only
+contains size of the user header. The client tells the server which chunk has
+been accessed and at what offset the RTRS_MSG_RDMA_WRITE can be found by
+using the IMM field, Server invalidate rkey associated to the memory chunks
+first, when it finishes, pass the IO to RNBD server module.
+
+2. When confirming a write request server sends an "empty" rdma message with
+an immediate field. The 32 bit field is used to specify the outstanding
+inflight IO and for the error code. The new rkey is sent back using
+SEND_WITH_IMM WR, client When it recived new rkey message, it validates
+the message and finished IO after update rkey for the rbuffer, then post
+back the recv buffer for later use.
+
+CLT SRV
+usr_data + usr_hdr + rtrs_msg_rdma_write -----------------> [RTRS_IO_REQ_IMM]
+[RTRS_MSG_RKEY_RSP] <----------------- (RTRS_MSG_RKEY_RSP)
+[RTRS_IO_RSP_IMM] <----------------- (id + errno)
+
+
+* Read (always_invalidate=N)*
+
+1. When processing a read request client selects one of the memory chunks
+on the server side and rdma writes there the user header and the
+RTRS_MSG_RDMA_READ message. This message contains the type (read), size of
+the user header, flags (specifying if memory invalidation is necessary) and the
+list of addresses along with keys for the data to be read into.
+
+2. When confirming a read request server transfers the requested data first,
+attaches an invalidation message if requested and finally an "empty" rdma
+message with an immediate field. The 32 bit field is used to specify the
+outstanding inflight IO and the error code.
+
+CLT SRV
+usr_hdr + rtrs_msg_rdma_read --------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <-------------- usr_data + (id + errno)
+or in case client requested invalidation:
+[RTRS_IO_RSP_IMM_W_INV] <-------------- usr_data + (INV) + (id + errno)
+
+* Read (always_invalidate=Y)*
+
+1. When processing a read request client selects one of the memory chunks
+on the server side and rdma writes there the user header and the
+RTRS_MSG_RDMA_READ message. This message contains the type (read), size of
+the user header, flags (specifying if memory invalidation is necessary) and the
+list of addresses along with keys for the data to be read into.
+Server invalidate rkey associated to the memory chunks first, when it finishes,
+passes the IO to RNBD server module.
+
+2. When confirming a read request server transfers the requested data first,
+attaches an invalidation message if requested and finally an "empty" rdma
+message with an immediate field. The 32 bit field is used to specify the
+outstanding inflight IO and the error code. The new rkey is sent back using
+SEND_WITH_IMM WR, client When it recived new rkey message, it validates
+the message and finished IO after update rkey for the rbuffer, then post
+back the recv buffer for later use.
+
+CLT SRV
+usr_hdr + rtrs_msg_rdma_read --------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <-------------- usr_data + (id + errno)
+[RTRS_MSG_RKEY_RSP] <----------------- (RTRS_MSG_RKEY_RSP)
+or in case client requested invalidation:
+[RTRS_IO_RSP_IMM_W_INV] <-------------- usr_data + (INV) + (id + errno)
+=========================================
+Contributors List(in alphabetical order)
+=========================================
+Danil Kipnis <danil.kipnis@profitbricks.com>
+Fabian Holler <mail@fholler.de>
+Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
+Jack Wang <jinpu.wang@profitbricks.com>
+Kleber Souza <kleber.souza@profitbricks.com>
+Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
+Milind Dumbare <Milind.dumbare@gmail.com>
+Roman Penyaev <roman.penyaev@profitbricks.com>
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
new file mode 100644
index 000000000000..26bbe5d6dff5
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-clt.h"
+
+void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_stats *stats = sess->stats;
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+ s = this_cpu_ptr(stats->pcpu_stats);
+ if (unlikely(con->cpu != cpu)) {
+ s->cpu_migr.to++;
+
+ /* Careful here, override s pointer */
+ s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
+ atomic_inc(&s->cpu_migr.from);
+ }
+}
+
+void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ s = this_cpu_ptr(stats->pcpu_stats);
+ s->rdma.failover_cnt++;
+}
+
+int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats,
+ char *buf, size_t len)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ size_t used;
+ int cpu;
+
+ used = scnprintf(buf, len, " ");
+ for_each_possible_cpu(cpu)
+ used += scnprintf(buf + used, len - used, " CPU%u", cpu);
+
+ used += scnprintf(buf + used, len - used, "\nfrom:");
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ used += scnprintf(buf + used, len - used, " %d",
+ atomic_read(&s->cpu_migr.from));
+ }
+
+ used += scnprintf(buf + used, len - used, "\nto :");
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ used += scnprintf(buf + used, len - used, " %d",
+ s->cpu_migr.to);
+ }
+ used += scnprintf(buf + used, len - used, "\n");
+
+ return used;
+}
+
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len)
+{
+ return scnprintf(buf, len, "%d %d\n",
+ stats->reconnects.successful_cnt,
+ stats->reconnects.fail_cnt);
+}
+
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
+ char *page, size_t len)
+{
+ struct rtrs_clt_stats_rdma sum;
+ struct rtrs_clt_stats_rdma *r;
+ int cpu;
+
+ memset(&sum, 0, sizeof(sum));
+
+ for_each_possible_cpu(cpu) {
+ r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
+
+ sum.dir[READ].cnt += r->dir[READ].cnt;
+ sum.dir[READ].size_total += r->dir[READ].size_total;
+ sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
+ sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
+ sum.failover_cnt += r->failover_cnt;
+ }
+
+ return scnprintf(page, len, "%llu %llu %llu %llu %u %llu\n",
+ sum.dir[READ].cnt, sum.dir[READ].size_total,
+ sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
+ atomic_read(&stats->inflight), sum.failover_cnt);
+}
+
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s,
+ char *page, size_t len)
+{
+ return scnprintf(page, len, "echo 1 to reset all statistics\n");
+}
+
+int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
+{
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ if (!enable)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ memset(&s->rdma, 0, sizeof(s->rdma));
+ }
+
+ return 0;
+}
+
+int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
+{
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ if (!enable)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
+ }
+
+ return 0;
+}
+
+int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
+{
+ if (!enable)
+ return -EINVAL;
+
+ memset(&stats->reconnects, 0, sizeof(stats->reconnects));
+
+ return 0;
+}
+
+int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
+{
+ if (enable) {
+ rtrs_clt_reset_rdma_stats(s, enable);
+ rtrs_clt_reset_cpu_migr_stats(s, enable);
+ rtrs_clt_reset_reconnects_stat(s, enable);
+ atomic_set(&s->inflight, 0);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
+ size_t size, int d)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ s = this_cpu_ptr(stats->pcpu_stats);
+ s->rdma.dir[d].cnt++;
+ s->rdma.dir[d].size_total += size;
+}
+
+void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_stats *stats = sess->stats;
+ unsigned int len;
+
+ len = req->usr_len + req->data_len;
+ rtrs_clt_update_rdma_stats(stats, len, dir);
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_inc(&stats->inflight);
+}
+
+int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
+{
+ stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
+ if (!stats->pcpu_stats)
+ return -ENOMEM;
+
+ /*
+ * successful_cnt will be set to 0 after session
+ * is established for the first time
+ */
+ stats->reconnects.successful_cnt = -1;
+
+ return 0;
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
new file mode 100644
index 000000000000..298b747d0330
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-pri.h"
+#include "rtrs-clt.h"
+#include "rtrs-log.h"
+
+#define MIN_MAX_RECONN_ATT -1
+#define MAX_MAX_RECONN_ATT 9999
+
+static void rtrs_clt_sess_release(struct kobject *kobj)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+
+ free_sess(sess);
+}
+
+static struct kobj_type ktype_sess = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_clt_sess_release
+};
+
+static void rtrs_clt_sess_stats_release(struct kobject *kobj)
+{
+ struct rtrs_clt_stats *stats;
+
+ stats = container_of(kobj, struct rtrs_clt_stats, kobj_stats);
+
+ free_percpu(stats->pcpu_stats);
+
+ kfree(stats);
+}
+
+static struct kobj_type ktype_stats = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_clt_sess_stats_release,
+};
+
+static ssize_t max_reconnect_attempts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+
+ return sprintf(page, "%d\n", rtrs_clt_get_max_reconnect_attempts(clt));
+}
+
+static ssize_t max_reconnect_attempts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int value;
+ int ret;
+ struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+
+ ret = kstrtoint(buf, 10, &value);
+ if (ret) {
+ rtrs_err(clt, "%s: failed to convert string '%s' to int\n",
+ attr->attr.name, buf);
+ return ret;
+ }
+ if (value > MAX_MAX_RECONN_ATT ||
+ value < MIN_MAX_RECONN_ATT) {
+ rtrs_err(clt,
+ "%s: invalid range (provided: '%s', accepted: min: %d, max: %d)\n",
+ attr->attr.name, buf, MIN_MAX_RECONN_ATT,
+ MAX_MAX_RECONN_ATT);
+ return -EINVAL;
+ }
+ rtrs_clt_set_max_reconnect_attempts(clt, value);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(max_reconnect_attempts);
+
+static ssize_t mpath_policy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt *clt;
+
+ clt = container_of(dev, struct rtrs_clt, dev);
+
+ switch (clt->mp_policy) {
+ case MP_POLICY_RR:
+ return sprintf(page, "round-robin (RR: %d)\n", clt->mp_policy);
+ case MP_POLICY_MIN_INFLIGHT:
+ return sprintf(page, "min-inflight (MI: %d)\n", clt->mp_policy);
+ default:
+ return sprintf(page, "Unknown (%d)\n", clt->mp_policy);
+ }
+}
+
+static ssize_t mpath_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct rtrs_clt *clt;
+ int value;
+ int ret;
+
+ clt = container_of(dev, struct rtrs_clt, dev);
+
+ ret = kstrtoint(buf, 10, &value);
+ if (!ret && (value == MP_POLICY_RR ||
+ value == MP_POLICY_MIN_INFLIGHT)) {
+ clt->mp_policy = value;
+ return count;
+ }
+
+ if (!strncasecmp(buf, "round-robin", 11) ||
+ !strncasecmp(buf, "rr", 2))
+ clt->mp_policy = MP_POLICY_RR;
+ else if (!strncasecmp(buf, "min-inflight", 12) ||
+ !strncasecmp(buf, "mi", 2))
+ clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(mpath_policy);
+
+static ssize_t add_path_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ return scnprintf(page, PAGE_SIZE,
+ "Usage: echo [<source addr>@]<destination addr> > %s\n\n*addr ::= [ ip:<ipv4|ipv6> | gid:<gid> ]\n",
+ attr->attr.name);
+}
+
+static ssize_t add_path_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sockaddr_storage srcaddr, dstaddr;
+ struct rtrs_addr addr = {
+ .src = &srcaddr,
+ .dst = &dstaddr
+ };
+ struct rtrs_clt *clt;
+ const char *nl;
+ size_t len;
+ int err;
+
+ clt = container_of(dev, struct rtrs_clt, dev);
+
+ nl = strchr(buf, '\n');
+ if (nl)
+ len = nl - buf;
+ else
+ len = count;
+ err = rtrs_addr_to_sockaddr(buf, len, clt->port, &addr);
+ if (err)
+ return -EINVAL;
+
+ err = rtrs_clt_create_path_from_sysfs(clt, &addr);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(add_path);
+
+static ssize_t rtrs_clt_state_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (sess->state == RTRS_CLT_CONNECTED)
+ return sprintf(page, "connected\n");
+
+ return sprintf(page, "disconnected\n");
+}
+
+static struct kobj_attribute rtrs_clt_state_attr =
+ __ATTR(state, 0444, rtrs_clt_state_show, NULL);
+
+static ssize_t rtrs_clt_reconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_sess *sess;
+ int ret;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_reconnect_from_sysfs(sess);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_reconnect_attr =
+ __ATTR(reconnect, 0644, rtrs_clt_reconnect_show,
+ rtrs_clt_reconnect_store);
+
+static ssize_t rtrs_clt_disconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_sess *sess;
+ int ret;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_disconnect_from_sysfs(sess);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_disconnect_attr =
+ __ATTR(disconnect, 0644, rtrs_clt_disconnect_show,
+ rtrs_clt_disconnect_store);
+
+static ssize_t rtrs_clt_remove_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_sess *sess;
+ int ret;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_remove_path_from_sysfs(sess, &attr->attr);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_remove_path_attr =
+ __ATTR(remove_path, 0644, rtrs_clt_remove_path_show,
+ rtrs_clt_remove_path_store);
+
+STAT_ATTR(struct rtrs_clt_stats, cpu_migration,
+ rtrs_clt_stats_migration_cnt_to_str,
+ rtrs_clt_reset_cpu_migr_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, reconnects,
+ rtrs_clt_stats_reconnects_to_str,
+ rtrs_clt_reset_reconnects_stat);
+
+STAT_ATTR(struct rtrs_clt_stats, rdma,
+ rtrs_clt_stats_rdma_to_str,
+ rtrs_clt_reset_rdma_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, reset_all,
+ rtrs_clt_reset_all_help,
+ rtrs_clt_reset_all_stats);
+
+static struct attribute *rtrs_clt_stats_attrs[] = {
+ &cpu_migration_attr.attr,
+ &reconnects_attr.attr,
+ &rdma_attr.attr,
+ &reset_all_attr.attr,
+ NULL
+};
+
+static struct attribute_group rtrs_clt_stats_attr_group = {
+ .attrs = rtrs_clt_stats_attrs,
+};
+
+static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, typeof(*sess), kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%u\n", sess->hca_port);
+}
+
+static struct kobj_attribute rtrs_clt_hca_port_attr =
+ __ATTR(hca_port, 0444, rtrs_clt_hca_port_show, NULL);
+
+static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", sess->hca_name);
+}
+
+static struct kobj_attribute rtrs_clt_hca_name_attr =
+ __ATTR(hca_name, 0444, rtrs_clt_hca_name_show, NULL);
+
+static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_clt_src_addr_attr =
+ __ATTR(src_addr, 0444, rtrs_clt_src_addr_show, NULL);
+
+static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_clt_dst_addr_attr =
+ __ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL);
+
+static struct attribute *rtrs_clt_sess_attrs[] = {
+ &rtrs_clt_hca_name_attr.attr,
+ &rtrs_clt_hca_port_attr.attr,
+ &rtrs_clt_src_addr_attr.attr,
+ &rtrs_clt_dst_addr_attr.attr,
+ &rtrs_clt_state_attr.attr,
+ &rtrs_clt_reconnect_attr.attr,
+ &rtrs_clt_disconnect_attr.attr,
+ &rtrs_clt_remove_path_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_clt_sess_attr_group = {
+ .attrs = rtrs_clt_sess_attrs,
+};
+
+int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ char str[NAME_MAX];
+ int err, cnt;
+
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ str, sizeof(str));
+ cnt += scnprintf(str + cnt, sizeof(str) - cnt, "@");
+ sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ str + cnt, sizeof(str) - cnt);
+
+ err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths,
+ "%s", str);
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
+ return err;
+ }
+ err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+ if (err) {
+ pr_err("sysfs_create_group(): %d\n", err);
+ goto put_kobj;
+ }
+ err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
+ &sess->kobj, "stats");
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
+ goto remove_group;
+ }
+
+ err = sysfs_create_group(&sess->stats->kobj_stats,
+ &rtrs_clt_stats_attr_group);
+ if (err) {
+ pr_err("failed to create stats sysfs group, err: %d\n", err);
+ goto put_kobj_stats;
+ }
+
+ return 0;
+
+put_kobj_stats:
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+remove_group:
+ sysfs_remove_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+put_kobj:
+ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+
+ return err;
+}
+
+void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self)
+{
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+ if (sysfs_self)
+ sysfs_remove_file_self(&sess->kobj, sysfs_self);
+ kobject_del(&sess->kobj);
+}
+
+static struct attribute *rtrs_clt_attrs[] = {
+ &dev_attr_max_reconnect_attempts.attr,
+ &dev_attr_mpath_policy.attr,
+ &dev_attr_add_path.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_clt_attr_group = {
+ .attrs = rtrs_clt_attrs,
+};
+
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt)
+{
+ return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+}
+
+void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt)
+{
+ if (clt->kobj_paths) {
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+ }
+}
+
+void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt)
+{
+ sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
new file mode 100644
index 000000000000..564388a85603
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -0,0 +1,2992 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/rculist.h>
+
+#include "rtrs-clt.h"
+#include "rtrs-log.h"
+
+#define RTRS_CONNECT_TIMEOUT_MS 30000
+/*
+ * Wait a bit before trying to reconnect after a failure
+ * in order to give server time to finish clean up which
+ * leads to "false positives" failed reconnect attempts
+ */
+#define RTRS_RECONNECT_BACKOFF 1000
+
+MODULE_DESCRIPTION("RDMA Transport Client");
+MODULE_LICENSE("GPL");
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
+static struct rtrs_rdma_dev_pd dev_pd = {
+ .ops = &dev_pd_ops
+};
+
+static struct workqueue_struct *rtrs_wq;
+static struct class *rtrs_clt_dev_class;
+
+static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt)
+{
+ struct rtrs_clt_sess *sess;
+ bool connected = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sess, &clt->paths_list, s.entry)
+ connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED;
+ rcu_read_unlock();
+
+ return connected;
+}
+
+static struct rtrs_permit *
+__rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
+{
+ size_t max_depth = clt->queue_depth;
+ struct rtrs_permit *permit;
+ int bit;
+
+ /*
+ * Adapted from null_blk get_tag(). Callers from different cpus may
+ * grab the same bit, since find_first_zero_bit is not atomic.
+ * But then the test_and_set_bit_lock will fail for all the
+ * callers but one, so that they will loop again.
+ * This way an explicit spinlock is not required.
+ */
+ do {
+ bit = find_first_zero_bit(clt->permits_map, max_depth);
+ if (unlikely(bit >= max_depth))
+ return NULL;
+ } while (unlikely(test_and_set_bit_lock(bit, clt->permits_map)));
+
+ permit = get_permit(clt, bit);
+ WARN_ON(permit->mem_id != bit);
+ permit->cpu_id = raw_smp_processor_id();
+ permit->con_type = con_type;
+
+ return permit;
+}
+
+static inline void __rtrs_put_permit(struct rtrs_clt *clt,
+ struct rtrs_permit *permit)
+{
+ clear_bit_unlock(permit->mem_id, clt->permits_map);
+}
+
+/**
+ * rtrs_clt_get_permit() - allocates permit for future RDMA operation
+ * @clt: Current session
+ * @con_type: Type of connection to use with the permit
+ * @can_wait: Wait type
+ *
+ * Description:
+ * Allocates permit for the following RDMA operation. Permit is used
+ * to preallocate all resources and to propagate memory pressure
+ * up earlier.
+ *
+ * Context:
+ * Can sleep if @wait == RTRS_TAG_WAIT
+ */
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
+ enum rtrs_clt_con_type con_type,
+ int can_wait)
+{
+ struct rtrs_permit *permit;
+ DEFINE_WAIT(wait);
+
+ permit = __rtrs_get_permit(clt, con_type);
+ if (likely(permit) || !can_wait)
+ return permit;
+
+ do {
+ prepare_to_wait(&clt->permits_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ permit = __rtrs_get_permit(clt, con_type);
+ if (likely(permit))
+ break;
+
+ io_schedule();
+ } while (1);
+
+ finish_wait(&clt->permits_wait, &wait);
+
+ return permit;
+}
+EXPORT_SYMBOL(rtrs_clt_get_permit);
+
+/**
+ * rtrs_clt_put_permit() - puts allocated permit
+ * @clt: Current session
+ * @permit: Permit to be freed
+ *
+ * Context:
+ * Does not matter
+ */
+void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
+{
+ if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
+ return;
+
+ __rtrs_put_permit(clt, permit);
+
+ /*
+ * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
+ * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
+ * it must have added itself to &clt->permits_wait before
+ * __rtrs_put_permit() finished.
+ * Hence it is safe to guard wake_up() with a waitqueue_active() test.
+ */
+ if (waitqueue_active(&clt->permits_wait))
+ wake_up(&clt->permits_wait);
+}
+EXPORT_SYMBOL(rtrs_clt_put_permit);
+
+void *rtrs_permit_to_pdu(struct rtrs_permit *permit)
+{
+ return permit + 1;
+}
+EXPORT_SYMBOL(rtrs_permit_to_pdu);
+
+/**
+ * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
+ * @sess: client session pointer
+ * @permit: permit for the allocation of the RDMA buffer
+ * Note:
+ * IO connection starts from 1.
+ * 0 connection is for user messages.
+ */
+static
+struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
+ struct rtrs_permit *permit)
+{
+ int id = 0;
+
+ if (likely(permit->con_type == RTRS_IO_CON))
+ id = (permit->cpu_id % (sess->s.con_num - 1)) + 1;
+
+ return to_clt_con(sess->s.con[id]);
+}
+
+/**
+ * __rtrs_clt_change_state() - change the session state through session state
+ * machine.
+ *
+ * @sess: client session to change the state of.
+ * @new_state: state to change to.
+ *
+ * returns true if successful, false if the requested state can not be set.
+ *
+ * Locks:
+ * state_wq lock must be hold.
+ */
+static bool __rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state new_state)
+{
+ enum rtrs_clt_state old_state;
+ bool changed = false;
+
+ lockdep_assert_held(&sess->state_wq.lock);
+
+ old_state = sess->state;
+ switch (new_state) {
+ case RTRS_CLT_CONNECTING:
+ switch (old_state) {
+ case RTRS_CLT_RECONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_RECONNECTING:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTED:
+ case RTRS_CLT_CONNECTING_ERR:
+ case RTRS_CLT_CLOSED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CONNECTED:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CONNECTING_ERR:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CLOSING:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ case RTRS_CLT_CONNECTING_ERR:
+ case RTRS_CLT_RECONNECTING:
+ case RTRS_CLT_CONNECTED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CLOSED:
+ switch (old_state) {
+ case RTRS_CLT_CLOSING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_DEAD:
+ switch (old_state) {
+ case RTRS_CLT_CLOSED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (changed) {
+ sess->state = new_state;
+ wake_up_locked(&sess->state_wq);
+ }
+
+ return changed;
+}
+
+static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state old_state,
+ enum rtrs_clt_state new_state)
+{
+ bool changed = false;
+
+ spin_lock_irq(&sess->state_wq.lock);
+ if (sess->state == old_state)
+ changed = __rtrs_clt_change_state(sess, new_state);
+ spin_unlock_irq(&sess->state_wq.lock);
+
+ return changed;
+}
+
+static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ if (rtrs_clt_change_state_from_to(sess,
+ RTRS_CLT_CONNECTED,
+ RTRS_CLT_RECONNECTING)) {
+ struct rtrs_clt *clt = sess->clt;
+ unsigned int delay_ms;
+
+ /*
+ * Normal scenario, reconnect if we were successfully connected
+ */
+ delay_ms = clt->reconnect_delay_sec * 1000;
+ queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ msecs_to_jiffies(delay_ms));
+ } else {
+ /*
+ * Error can happen just on establishing new connection,
+ * so notify waiter with error state, waiter is responsible
+ * for cleaning the rest and reconnect if needed.
+ */
+ rtrs_clt_change_state_from_to(sess,
+ RTRS_CLT_CONNECTING,
+ RTRS_CLT_CONNECTING_ERR);
+ }
+}
+
+static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+}
+
+static struct ib_cqe fast_reg_cqe = {
+ .done = rtrs_clt_fast_reg_done
+};
+
+static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ bool notify, bool can_wait);
+
+static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_io_req *req =
+ container_of(wc->wr_cqe, typeof(*req), inv_cqe);
+ struct rtrs_clt_con *con = cq->cq_context;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+ req->need_inv = false;
+ if (likely(req->need_inv_comp))
+ complete(&req->inv_comp);
+ else
+ /* Complete request from INV callback */
+ complete_rdma_req(req, req->inv_errno, true, false);
+}
+
+static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &req->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+ req->inv_cqe.done = rtrs_clt_inv_rkey_done;
+
+ return ib_post_send(con->c.qp, &wr, NULL);
+}
+
+static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ bool notify, bool can_wait)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_clt_sess *sess;
+ int err;
+
+ if (WARN_ON(!req->in_use))
+ return;
+ if (WARN_ON(!req->con))
+ return;
+ sess = to_clt_sess(con->c.sess);
+
+ if (req->sg_cnt) {
+ if (unlikely(req->dir == DMA_FROM_DEVICE && req->need_inv)) {
+ /*
+ * We are here to invalidate read requests
+ * ourselves. In normal scenario server should
+ * send INV for all read requests, but
+ * we are here, thus two things could happen:
+ *
+ * 1. this is failover, when errno != 0
+ * and can_wait == 1,
+ *
+ * 2. something totally bad happened and
+ * server forgot to send INV, so we
+ * should do that ourselves.
+ */
+
+ if (likely(can_wait)) {
+ req->need_inv_comp = true;
+ } else {
+ /* This should be IO path, so always notify */
+ WARN_ON(!notify);
+ /* Save errno for INV callback */
+ req->inv_errno = errno;
+ }
+
+ err = rtrs_inv_rkey(req);
+ if (unlikely(err)) {
+ rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
+ req->mr->rkey, err);
+ } else if (likely(can_wait)) {
+ wait_for_completion(&req->inv_comp);
+ } else {
+ /*
+ * Something went wrong, so request will be
+ * completed from INV callback.
+ */
+ WARN_ON_ONCE(1);
+
+ return;
+ }
+ }
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&sess->stats->inflight);
+
+ req->in_use = false;
+ req->con = NULL;
+
+ if (notify)
+ req->conf(req->priv, errno);
+}
+
+static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf, u32 off,
+ u32 imm, struct ib_send_wr *wr)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ enum ib_send_flags flags;
+ struct ib_sge sge;
+
+ if (unlikely(!req->sg_size)) {
+ rtrs_wrn(con->c.sess,
+ "Doing RDMA Write failed, no data supplied\n");
+ return -EINVAL;
+ }
+
+ /* user data and user message in the first list element */
+ sge.addr = req->iu->dma_addr;
+ sge.length = req->sg_size;
+ sge.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
+ 0 : IB_SEND_SIGNALED;
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ req->sg_size, DMA_TO_DEVICE);
+
+ return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
+ rbuf->rkey, rbuf->addr + off,
+ imm, flags, wr);
+}
+
+static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
+ s16 errno, bool w_inval)
+{
+ struct rtrs_clt_io_req *req;
+
+ if (WARN_ON(msg_id >= sess->queue_depth))
+ return;
+
+ req = &sess->reqs[msg_id];
+ /* Drop need_inv if server responded with send with invalidation */
+ req->need_inv &= !w_inval;
+ complete_rdma_req(req, errno, true, false);
+}
+
+static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+{
+ struct rtrs_iu *iu;
+ int err;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
+ iu = container_of(wc->wr_cqe, struct rtrs_iu,
+ cqe);
+ err = rtrs_iu_post_recv(&con->c, iu);
+ if (unlikely(err)) {
+ rtrs_err(con->c.sess, "post iu failed %d\n", err);
+ rtrs_rdma_error_recovery(con);
+ }
+}
+
+static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_msg_rkey_rsp *msg;
+ u32 imm_type, imm_payload;
+ bool w_inval = false;
+ struct rtrs_iu *iu;
+ u32 buf_id;
+ int err;
+
+ WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+
+ if (unlikely(wc->byte_len < sizeof(*msg))) {
+ rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP)) {
+ rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto out;
+ }
+ buf_id = le16_to_cpu(msg->buf_id);
+ if (WARN_ON(buf_id >= sess->queue_depth))
+ goto out;
+
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
+ if (likely(imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM)) {
+ u32 msg_id;
+
+ w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
+ rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
+
+ if (WARN_ON(buf_id != msg_id))
+ goto out;
+ sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
+ process_io_rsp(sess, msg_id, err, w_inval);
+ }
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ return rtrs_clt_recv_done(con, wc);
+out:
+ rtrs_rdma_error_recovery(con);
+}
+
+static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static struct ib_cqe io_comp_cqe = {
+ .done = rtrs_clt_rdma_done
+};
+
+/*
+ * Post x2 empty WRs: first is for this RDMA with IMM,
+ * second is for RECV with INV, which happened earlier.
+ */
+static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
+{
+ struct ib_recv_wr wr_arr[2], *wr;
+ int i;
+
+ memset(wr_arr, 0, sizeof(wr_arr));
+ for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
+ wr = &wr_arr[i];
+ wr->wr_cqe = cqe;
+ if (i)
+ /* Chain backwards */
+ wr->next = &wr_arr[i - 1];
+ }
+
+ return ib_post_recv(con->qp, wr, NULL);
+}
+
+static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ u32 imm_type, imm_payload;
+ bool w_inval = false;
+ int err;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ rtrs_err(sess->clt, "RDMA failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+ return;
+ }
+ rtrs_clt_update_wc_stats(con);
+
+ switch (wc->opcode) {
+ case IB_WC_RECV_RDMA_WITH_IMM:
+ /*
+ * post_recv() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
+ return;
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
+ &imm_type, &imm_payload);
+ if (likely(imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM)) {
+ u32 msg_id;
+
+ w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
+ rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
+
+ process_io_rsp(sess, msg_id, err, w_inval);
+ } else if (imm_type == RTRS_HB_MSG_IMM) {
+ WARN_ON(con->c.cid);
+ rtrs_send_hb_ack(&sess->s);
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F)
+ return rtrs_clt_recv_done(con, wc);
+ } else if (imm_type == RTRS_HB_ACK_IMM) {
+ WARN_ON(con->c.cid);
+ sess->s.hb_missed_cnt = 0;
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F)
+ return rtrs_clt_recv_done(con, wc);
+ } else {
+ rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
+ imm_type);
+ }
+ if (w_inval)
+ /*
+ * Post x2 empty WRs: first is for this RDMA with IMM,
+ * second is for RECV with INV, which happened earlier.
+ */
+ err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
+ else
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (unlikely(err)) {
+ rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
+ err);
+ rtrs_rdma_error_recovery(con);
+ break;
+ }
+ break;
+ case IB_WC_RECV:
+ /*
+ * Key invalidations from server side
+ */
+ WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
+ wc->wc_flags & IB_WC_WITH_IMM));
+ WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
+ if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
+ return rtrs_clt_recv_done(con, wc);
+
+ return rtrs_clt_rkey_rsp_done(con, wc);
+ }
+ break;
+ case IB_WC_RDMA_WRITE:
+ /*
+ * post_send() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ break;
+
+ default:
+ rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode);
+ return;
+ }
+}
+
+static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
+{
+ int err, i;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ for (i = 0; i < q_size; i++) {
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
+ struct rtrs_iu *iu = &con->rsp_ius[i];
+
+ err = rtrs_iu_post_recv(&con->c, iu);
+ } else {
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ }
+ if (unlikely(err))
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_sess(struct rtrs_clt_sess *sess)
+{
+ size_t q_size = 0;
+ int err, cid;
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (cid == 0)
+ q_size = SERVICE_CON_QUEUE_DEPTH;
+ else
+ q_size = sess->queue_depth;
+
+ /*
+ * x2 for RDMA read responses + FR key invalidations,
+ * RDMA writes do not require any FR registrations.
+ */
+ q_size *= 2;
+
+ err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
+ if (unlikely(err)) {
+ rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+struct path_it {
+ int i;
+ struct list_head skip_list;
+ struct rtrs_clt *clt;
+ struct rtrs_clt_sess *(*next_path)(struct path_it *it);
+};
+
+/**
+ * list_next_or_null_rr_rcu - get next list element in round-robin fashion.
+ * @head: the head for the list.
+ * @ptr: the list head to take the next element from.
+ * @type: the type of the struct this is embedded in.
+ * @memb: the name of the list_head within the struct.
+ *
+ * Next element returned in round-robin fashion, i.e. head will be skipped,
+ * but if list is observed as empty, NULL will be returned.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+#define list_next_or_null_rr_rcu(head, ptr, type, memb) \
+({ \
+ list_next_or_null_rcu(head, ptr, type, memb) ?: \
+ list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \
+ type, memb); \
+})
+
+/**
+ * get_next_path_rr() - Returns path in round-robin fashion.
+ * @it: the path pointer
+ *
+ * Related to @MP_POLICY_RR
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ */
+static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
+{
+ struct rtrs_clt_sess __rcu **ppcpu_path;
+ struct rtrs_clt_sess *path;
+ struct rtrs_clt *clt;
+
+ clt = it->clt;
+
+ /*
+ * Here we use two RCU objects: @paths_list and @pcpu_path
+ * pointer. See rtrs_clt_remove_path_from_arr() for details
+ * how that is handled.
+ */
+
+ ppcpu_path = this_cpu_ptr(clt->pcpu_path);
+ path = rcu_dereference(*ppcpu_path);
+ if (unlikely(!path))
+ path = list_first_or_null_rcu(&clt->paths_list,
+ typeof(*path), s.entry);
+ else
+ path = list_next_or_null_rr_rcu(&clt->paths_list,
+ &path->s.entry,
+ typeof(*path),
+ s.entry);
+ rcu_assign_pointer(*ppcpu_path, path);
+
+ return path;
+}
+
+/**
+ * get_next_path_min_inflight() - Returns path with minimal inflight count.
+ * @it: the path pointer
+ *
+ * Related to @MP_POLICY_MIN_INFLIGHT
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ */
+static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
+{
+ struct rtrs_clt_sess *min_path = NULL;
+ struct rtrs_clt *clt = it->clt;
+ struct rtrs_clt_sess *sess;
+ int min_inflight = INT_MAX;
+ int inflight;
+
+ list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
+ if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
+ continue;
+
+ inflight = atomic_read(&sess->stats->inflight);
+
+ if (inflight < min_inflight) {
+ min_inflight = inflight;
+ min_path = sess;
+ }
+ }
+
+ /*
+ * add the path to the skip list, so that next time we can get
+ * a different one
+ */
+ if (min_path)
+ list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
+
+ return min_path;
+}
+
+static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
+{
+ INIT_LIST_HEAD(&it->skip_list);
+ it->clt = clt;
+ it->i = 0;
+
+ if (clt->mp_policy == MP_POLICY_RR)
+ it->next_path = get_next_path_rr;
+ else
+ it->next_path = get_next_path_min_inflight;
+}
+
+static inline void path_it_deinit(struct path_it *it)
+{
+ struct list_head *skip, *tmp;
+ /*
+ * The skip_list is used only for the MIN_INFLIGHT policy.
+ * We need to remove paths from it, so that next IO can insert
+ * paths (->mp_skip_entry) into a skip_list again.
+ */
+ list_for_each_safe(skip, tmp, &it->skip_list)
+ list_del_init(skip);
+}
+
+/**
+ * rtrs_clt_init_req() Initialize an rtrs_clt_io_req holding information
+ * about an inflight IO.
+ * The user buffer holding user control message (not data) is copied into
+ * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
+ * also hold the control message of rtrs.
+ * @req: an io request holding information about IO.
+ * @sess: client session
+ * @conf: conformation callback function to notify upper layer.
+ * @permit: permit for allocation of RDMA remote buffer
+ * @priv: private pointer
+ * @vec: kernel vector containing control message
+ * @usr_len: length of the user message
+ * @sg: scater list for IO data
+ * @sg_cnt: number of scater list entries
+ * @data_len: length of the IO data
+ * @dir: direction of the IO.
+ */
+static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
+ struct rtrs_clt_sess *sess,
+ void (*conf)(void *priv, int errno),
+ struct rtrs_permit *permit, void *priv,
+ const struct kvec *vec, size_t usr_len,
+ struct scatterlist *sg, size_t sg_cnt,
+ size_t data_len, int dir)
+{
+ struct iov_iter iter;
+ size_t len;
+
+ req->permit = permit;
+ req->in_use = true;
+ req->usr_len = usr_len;
+ req->data_len = data_len;
+ req->sglist = sg;
+ req->sg_cnt = sg_cnt;
+ req->priv = priv;
+ req->dir = dir;
+ req->con = rtrs_permit_to_clt_con(sess, permit);
+ req->conf = conf;
+ req->need_inv = false;
+ req->need_inv_comp = false;
+ req->inv_errno = 0;
+
+ iov_iter_kvec(&iter, READ, vec, 1, usr_len);
+ len = _copy_from_iter(req->iu->buf, usr_len, &iter);
+ WARN_ON(len != usr_len);
+
+ reinit_completion(&req->inv_comp);
+}
+
+static struct rtrs_clt_io_req *
+rtrs_clt_get_req(struct rtrs_clt_sess *sess,
+ void (*conf)(void *priv, int errno),
+ struct rtrs_permit *permit, void *priv,
+ const struct kvec *vec, size_t usr_len,
+ struct scatterlist *sg, size_t sg_cnt,
+ size_t data_len, int dir)
+{
+ struct rtrs_clt_io_req *req;
+
+ req = &sess->reqs[permit->mem_id];
+ rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len,
+ sg, sg_cnt, data_len, dir);
+ return req;
+}
+
+static struct rtrs_clt_io_req *
+rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
+ struct rtrs_clt_io_req *fail_req)
+{
+ struct rtrs_clt_io_req *req;
+ struct kvec vec = {
+ .iov_base = fail_req->iu->buf,
+ .iov_len = fail_req->usr_len
+ };
+
+ req = &alive_sess->reqs[fail_req->permit->mem_id];
+ rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit,
+ fail_req->priv, &vec, fail_req->usr_len,
+ fail_req->sglist, fail_req->sg_cnt,
+ fail_req->data_len, fail_req->dir);
+ return req;
+}
+
+static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf,
+ u32 size, u32 imm)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct ib_sge *sge = req->sge;
+ enum ib_send_flags flags;
+ struct scatterlist *sg;
+ size_t num_sge;
+ int i;
+
+ for_each_sg(req->sglist, sg, req->sg_cnt, i) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ }
+ sge[i].addr = req->iu->dma_addr;
+ sge[i].length = size;
+ sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+
+ num_sge = 1 + req->sg_cnt;
+
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
+ 0 : IB_SEND_SIGNALED;
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ size, DMA_TO_DEVICE);
+
+ return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
+ rbuf->rkey, rbuf->addr, imm,
+ flags, NULL);
+}
+
+static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_msg_rdma_write *msg;
+
+ struct rtrs_rbuf *rbuf;
+ int ret, count = 0;
+ u32 imm, buf_id;
+
+ const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
+
+ if (unlikely(tsize > sess->chunk_size)) {
+ rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
+ tsize, sess->chunk_size);
+ return -EMSGSIZE;
+ }
+ if (req->sg_cnt) {
+ count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ if (unlikely(!count)) {
+ rtrs_wrn(s, "Write request failed, map failed\n");
+ return -EINVAL;
+ }
+ }
+ /* put rtrs msg after sg and user message */
+ msg = req->iu->buf + req->usr_len;
+ msg->type = cpu_to_le16(RTRS_MSG_WRITE);
+ msg->usr_len = cpu_to_le16(req->usr_len);
+
+ /* rtrs message on server side will be after user data and message */
+ imm = req->permit->mem_off + req->data_len + req->usr_len;
+ imm = rtrs_to_io_req_imm(imm);
+ buf_id = req->permit->mem_id;
+ req->sg_size = tsize;
+ rbuf = &sess->rbufs[buf_id];
+
+ /*
+ * Update stats now, after request is successfully sent it is not
+ * safe anymore to touch it.
+ */
+ rtrs_clt_update_all_stats(req, WRITE);
+
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf,
+ req->usr_len + sizeof(*msg),
+ imm);
+ if (unlikely(ret)) {
+ rtrs_err(s, "Write request failed: %d\n", ret);
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&sess->stats->inflight);
+ if (req->sg_cnt)
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+
+ return ret;
+}
+
+static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
+{
+ int nr;
+
+ /* Align the MR to a 4K page size to match the block virt boundary */
+ nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
+ if (nr < 0)
+ return nr;
+ if (unlikely(nr < req->sg_cnt))
+ return -EINVAL;
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ return nr;
+}
+
+static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_msg_rdma_read *msg;
+ struct rtrs_ib_dev *dev;
+
+ struct ib_reg_wr rwr;
+ struct ib_send_wr *wr = NULL;
+
+ int ret, count = 0;
+ u32 imm, buf_id;
+
+ const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
+
+ s = &sess->s;
+ dev = sess->s.dev;
+
+ if (unlikely(tsize > sess->chunk_size)) {
+ rtrs_wrn(s,
+ "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
+ tsize, sess->chunk_size);
+ return -EMSGSIZE;
+ }
+
+ if (req->sg_cnt) {
+ count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
+ req->dir);
+ if (unlikely(!count)) {
+ rtrs_wrn(s,
+ "Read request failed, dma map failed\n");
+ return -EINVAL;
+ }
+ }
+ /* put our message into req->buf after user message*/
+ msg = req->iu->buf + req->usr_len;
+ msg->type = cpu_to_le16(RTRS_MSG_READ);
+ msg->usr_len = cpu_to_le16(req->usr_len);
+
+ if (count) {
+ ret = rtrs_map_sg_fr(req, count);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Read request failed, failed to map fast reg. data, err: %d\n",
+ ret);
+ ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
+ req->dir);
+ return ret;
+ }
+ rwr = (struct ib_reg_wr) {
+ .wr.opcode = IB_WR_REG_MR,
+ .wr.wr_cqe = &fast_reg_cqe,
+ .mr = req->mr,
+ .key = req->mr->rkey,
+ .access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE),
+ };
+ wr = &rwr.wr;
+
+ msg->sg_cnt = cpu_to_le16(1);
+ msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
+
+ msg->desc[0].addr = cpu_to_le64(req->mr->iova);
+ msg->desc[0].key = cpu_to_le32(req->mr->rkey);
+ msg->desc[0].len = cpu_to_le32(req->mr->length);
+
+ /* Further invalidation is required */
+ req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
+
+ } else {
+ msg->sg_cnt = 0;
+ msg->flags = 0;
+ }
+ /*
+ * rtrs message will be after the space reserved for disk data and
+ * user message
+ */
+ imm = req->permit->mem_off + req->data_len + req->usr_len;
+ imm = rtrs_to_io_req_imm(imm);
+ buf_id = req->permit->mem_id;
+
+ req->sg_size = sizeof(*msg);
+ req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
+ req->sg_size += req->usr_len;
+
+ /*
+ * Update stats now, after request is successfully sent it is not
+ * safe anymore to touch it.
+ */
+ rtrs_clt_update_all_stats(req, READ);
+
+ ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
+ req->data_len, imm, wr);
+ if (unlikely(ret)) {
+ rtrs_err(s, "Read request failed: %d\n", ret);
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&sess->stats->inflight);
+ req->need_inv = false;
+ if (req->sg_cnt)
+ ib_dma_unmap_sg(dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+
+ return ret;
+}
+
+/**
+ * rtrs_clt_failover_req() Try to find an active path for a failed request
+ * @clt: clt context
+ * @fail_req: a failed io request.
+ */
+static int rtrs_clt_failover_req(struct rtrs_clt *clt,
+ struct rtrs_clt_io_req *fail_req)
+{
+ struct rtrs_clt_sess *alive_sess;
+ struct rtrs_clt_io_req *req;
+ int err = -ECONNABORTED;
+ struct path_it it;
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num;
+ it.i++) {
+ if (unlikely(READ_ONCE(alive_sess->state) !=
+ RTRS_CLT_CONNECTED))
+ continue;
+ req = rtrs_clt_get_copy_req(alive_sess, fail_req);
+ if (req->dir == DMA_TO_DEVICE)
+ err = rtrs_clt_write_req(req);
+ else
+ err = rtrs_clt_read_req(req);
+ if (unlikely(err)) {
+ req->in_use = false;
+ continue;
+ }
+ /* Success path */
+ rtrs_clt_inc_failover_cnt(alive_sess->stats);
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return err;
+}
+
+static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_io_req *req;
+ int i, err;
+
+ if (!sess->reqs)
+ return;
+ for (i = 0; i < sess->queue_depth; ++i) {
+ req = &sess->reqs[i];
+ if (!req->in_use)
+ continue;
+
+ /*
+ * Safely (without notification) complete failed request.
+ * After completion this request is still useble and can
+ * be failovered to another path.
+ */
+ complete_rdma_req(req, -ECONNABORTED, false, true);
+
+ err = rtrs_clt_failover_req(clt, req);
+ if (unlikely(err))
+ /* Failover failed, notify anyway */
+ req->conf(req->priv, err);
+ }
+}
+
+static void free_sess_reqs(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_io_req *req;
+ int i;
+
+ if (!sess->reqs)
+ return;
+ for (i = 0; i < sess->queue_depth; ++i) {
+ req = &sess->reqs[i];
+ if (req->mr)
+ ib_dereg_mr(req->mr);
+ kfree(req->sge);
+ rtrs_iu_free(req->iu, DMA_TO_DEVICE,
+ sess->s.dev->ib_dev, 1);
+ }
+ kfree(sess->reqs);
+ sess->reqs = NULL;
+}
+
+static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_io_req *req;
+ struct rtrs_clt *clt = sess->clt;
+ int i, err = -ENOMEM;
+
+ sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
+ GFP_KERNEL);
+ if (!sess->reqs)
+ return -ENOMEM;
+
+ for (i = 0; i < sess->queue_depth; ++i) {
+ req = &sess->reqs[i];
+ req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL,
+ sess->s.dev->ib_dev,
+ DMA_TO_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!req->iu)
+ goto out;
+
+ req->sge = kmalloc_array(clt->max_segments + 1,
+ sizeof(*req->sge), GFP_KERNEL);
+ if (!req->sge)
+ goto out;
+
+ req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+ sess->max_pages_per_mr);
+ if (IS_ERR(req->mr)) {
+ err = PTR_ERR(req->mr);
+ req->mr = NULL;
+ pr_err("Failed to alloc sess->max_pages_per_mr %d\n",
+ sess->max_pages_per_mr);
+ goto out;
+ }
+
+ init_completion(&req->inv_comp);
+ }
+
+ return 0;
+
+out:
+ free_sess_reqs(sess);
+
+ return err;
+}
+
+static int alloc_permits(struct rtrs_clt *clt)
+{
+ unsigned int chunk_bits;
+ int err, i;
+
+ clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth),
+ sizeof(long), GFP_KERNEL);
+ if (!clt->permits_map) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
+ if (!clt->permits) {
+ err = -ENOMEM;
+ goto err_map;
+ }
+ chunk_bits = ilog2(clt->queue_depth - 1) + 1;
+ for (i = 0; i < clt->queue_depth; i++) {
+ struct rtrs_permit *permit;
+
+ permit = get_permit(clt, i);
+ permit->mem_id = i;
+ permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
+ }
+
+ return 0;
+
+err_map:
+ kfree(clt->permits_map);
+ clt->permits_map = NULL;
+out_err:
+ return err;
+}
+
+static void free_permits(struct rtrs_clt *clt)
+{
+ kfree(clt->permits_map);
+ clt->permits_map = NULL;
+ kfree(clt->permits);
+ clt->permits = NULL;
+}
+
+static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
+{
+ struct ib_device *ib_dev;
+ u64 max_pages_per_mr;
+ int mr_page_shift;
+
+ ib_dev = sess->s.dev->ib_dev;
+
+ /*
+ * Use the smallest page size supported by the HCA, down to a
+ * minimum of 4096 bytes. We're unlikely to build large sglists
+ * out of smaller entries.
+ */
+ mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
+ max_pages_per_mr = ib_dev->attrs.max_mr_size;
+ do_div(max_pages_per_mr, (1ull << mr_page_shift));
+ sess->max_pages_per_mr =
+ min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
+ ib_dev->attrs.max_fast_reg_page_list_len);
+ sess->max_send_sge = ib_dev->attrs.max_send_sge;
+}
+
+static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state new_state,
+ enum rtrs_clt_state *old_state)
+{
+ bool changed;
+
+ spin_lock_irq(&sess->state_wq.lock);
+ *old_state = sess->state;
+ changed = __rtrs_clt_change_state(sess, new_state);
+ spin_unlock_irq(&sess->state_wq.lock);
+
+ return changed;
+}
+
+static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state new_state)
+{
+ enum rtrs_clt_state old_state;
+
+ return rtrs_clt_change_state_get_old(sess, new_state, &old_state);
+}
+
+static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
+{
+ struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
+
+ rtrs_rdma_error_recovery(con);
+}
+
+static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
+{
+ rtrs_init_hb(&sess->s, &io_comp_cqe,
+ RTRS_HB_INTERVAL_MS,
+ RTRS_HB_MISSED_MAX,
+ rtrs_clt_hb_err_handler,
+ rtrs_wq);
+}
+
+static void rtrs_clt_start_hb(struct rtrs_clt_sess *sess)
+{
+ rtrs_start_hb(&sess->s);
+}
+
+static void rtrs_clt_stop_hb(struct rtrs_clt_sess *sess)
+{
+ rtrs_stop_hb(&sess->s);
+}
+
+static void rtrs_clt_reconnect_work(struct work_struct *work);
+static void rtrs_clt_close_work(struct work_struct *work);
+
+static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
+ const struct rtrs_addr *path,
+ size_t con_num, u16 max_segments,
+ size_t max_segment_size)
+{
+ struct rtrs_clt_sess *sess;
+ int err = -ENOMEM;
+ int cpu;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ goto err;
+
+ /* Extra connection for user messages */
+ con_num += 1;
+
+ sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
+ if (!sess->s.con)
+ goto err_free_sess;
+
+ sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
+ if (!sess->stats)
+ goto err_free_con;
+
+ mutex_init(&sess->init_mutex);
+ uuid_gen(&sess->s.uuid);
+ memcpy(&sess->s.dst_addr, path->dst,
+ rdma_addr_size((struct sockaddr *)path->dst));
+
+ /*
+ * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
+ * checks the sa_family to be non-zero. If user passed src_addr=NULL
+ * the sess->src_addr will contain only zeros, which is then fine.
+ */
+ if (path->src)
+ memcpy(&sess->s.src_addr, path->src,
+ rdma_addr_size((struct sockaddr *)path->src));
+ strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
+ sess->s.con_num = con_num;
+ sess->clt = clt;
+ sess->max_pages_per_mr = max_segments * max_segment_size >> 12;
+ init_waitqueue_head(&sess->state_wq);
+ sess->state = RTRS_CLT_CONNECTING;
+ atomic_set(&sess->connected_cnt, 0);
+ INIT_WORK(&sess->close_work, rtrs_clt_close_work);
+ INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work);
+ rtrs_clt_init_hb(sess);
+
+ sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry));
+ if (!sess->mp_skip_entry)
+ goto err_free_stats;
+
+ for_each_possible_cpu(cpu)
+ INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu));
+
+ err = rtrs_clt_init_stats(sess->stats);
+ if (err)
+ goto err_free_percpu;
+
+ return sess;
+
+err_free_percpu:
+ free_percpu(sess->mp_skip_entry);
+err_free_stats:
+ kfree(sess->stats);
+err_free_con:
+ kfree(sess->s.con);
+err_free_sess:
+ kfree(sess);
+err:
+ return ERR_PTR(err);
+}
+
+void free_sess(struct rtrs_clt_sess *sess)
+{
+ free_percpu(sess->mp_skip_entry);
+ mutex_destroy(&sess->init_mutex);
+ kfree(sess->s.con);
+ kfree(sess->rbufs);
+ kfree(sess);
+}
+
+static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
+{
+ struct rtrs_clt_con *con;
+
+ con = kzalloc(sizeof(*con), GFP_KERNEL);
+ if (!con)
+ return -ENOMEM;
+
+ /* Map first two connections to the first CPU */
+ con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
+ con->c.cid = cid;
+ con->c.sess = &sess->s;
+ atomic_set(&con->io_cnt, 0);
+
+ sess->s.con[cid] = &con->c;
+
+ return 0;
+}
+
+static void destroy_con(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ sess->s.con[con->c.cid] = NULL;
+ kfree(con);
+}
+
+static int create_con_cq_qp(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ u16 wr_queue_size;
+ int err, cq_vector;
+ struct rtrs_msg_rkey_rsp *rsp;
+
+ /*
+ * This function can fail, but still destroy_con_cq_qp() should
+ * be called, this is because create_con_cq_qp() is called on cm
+ * event path, thus caller/waiter never knows: have we failed before
+ * create_con_cq_qp() or after. To solve this dilemma without
+ * creating any additional flags just allow destroy_con_cq_qp() be
+ * called many times.
+ */
+
+ if (con->c.cid == 0) {
+ /*
+ * One completion for each receive and two for each send
+ * (send request + registration)
+ * + 2 for drain and heartbeat
+ * in case qp gets into error state
+ */
+ wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
+ /* We must be the first here */
+ if (WARN_ON(sess->s.dev))
+ return -EINVAL;
+
+ /*
+ * The whole session uses device from user connection.
+ * Be careful not to close user connection before ib dev
+ * is gracefully put.
+ */
+ sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
+ &dev_pd);
+ if (!sess->s.dev) {
+ rtrs_wrn(sess->clt,
+ "rtrs_ib_dev_find_get_or_add(): no memory\n");
+ return -ENOMEM;
+ }
+ sess->s.dev_ref = 1;
+ query_fast_reg_mode(sess);
+ } else {
+ /*
+ * Here we assume that session members are correctly set.
+ * This is always true if user connection (cid == 0) is
+ * established first.
+ */
+ if (WARN_ON(!sess->s.dev))
+ return -EINVAL;
+ if (WARN_ON(!sess->queue_depth))
+ return -EINVAL;
+
+ /* Shared between connections */
+ sess->s.dev_ref++;
+ wr_queue_size =
+ min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+ /* QD * (REQ + RSP + FR REGS or INVS) + drain */
+ sess->queue_depth * 3 + 1);
+ }
+ /* alloc iu to recv new rkey reply when server reports flags set */
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+ con->rsp_ius = rtrs_iu_alloc(wr_queue_size, sizeof(*rsp),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!con->rsp_ius)
+ return -ENOMEM;
+ con->queue_size = wr_queue_size;
+ }
+ cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
+ err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
+ cq_vector, wr_queue_size, wr_queue_size,
+ IB_POLL_SOFTIRQ);
+ /*
+ * In case of error we do not bother to clean previous allocations,
+ * since destroy_con_cq_qp() must be called.
+ */
+ return err;
+}
+
+static void destroy_con_cq_qp(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ /*
+ * Be careful here: destroy_con_cq_qp() can be called even
+ * create_con_cq_qp() failed, see comments there.
+ */
+
+ rtrs_cq_qp_destroy(&con->c);
+ if (con->rsp_ius) {
+ rtrs_iu_free(con->rsp_ius, DMA_FROM_DEVICE,
+ sess->s.dev->ib_dev, con->queue_size);
+ con->rsp_ius = NULL;
+ con->queue_size = 0;
+ }
+ if (sess->s.dev_ref && !--sess->s.dev_ref) {
+ rtrs_ib_dev_put(sess->s.dev);
+ sess->s.dev = NULL;
+ }
+}
+
+static void stop_cm(struct rtrs_clt_con *con)
+{
+ rdma_disconnect(con->c.cm_id);
+ if (con->c.qp)
+ ib_drain_qp(con->c.qp);
+}
+
+static void destroy_cm(struct rtrs_clt_con *con)
+{
+ rdma_destroy_id(con->c.cm_id);
+ con->c.cm_id = NULL;
+}
+
+static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
+{
+ struct rtrs_sess *s = con->c.sess;
+ int err;
+
+ err = create_con_cq_qp(con);
+ if (err) {
+ rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
+ return err;
+ }
+ err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
+ if (err) {
+ rtrs_err(s, "Resolving route failed, err: %d\n", err);
+ destroy_con_cq_qp(con);
+ }
+
+ return err;
+}
+
+static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt *clt = sess->clt;
+ struct rtrs_msg_conn_req msg;
+ struct rdma_conn_param param;
+
+ int err;
+
+ param = (struct rdma_conn_param) {
+ .retry_count = 7,
+ .rnr_retry_count = 7,
+ .private_data = &msg,
+ .private_data_len = sizeof(msg),
+ };
+
+ msg = (struct rtrs_msg_conn_req) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .cid = cpu_to_le16(con->c.cid),
+ .cid_num = cpu_to_le16(sess->s.con_num),
+ .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
+ };
+ uuid_copy(&msg.sess_uuid, &sess->s.uuid);
+ uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
+
+ err = rdma_connect(con->c.cm_id, &param);
+ if (err)
+ rtrs_err(clt, "rdma_connect(): %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt *clt = sess->clt;
+ const struct rtrs_msg_conn_rsp *msg;
+ u16 version, queue_depth;
+ int errno;
+ u8 len;
+
+ msg = ev->param.conn.private_data;
+ len = ev->param.conn.private_data_len;
+ if (len < sizeof(*msg)) {
+ rtrs_err(clt, "Invalid RTRS connection response\n");
+ return -ECONNRESET;
+ }
+ if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
+ rtrs_err(clt, "Invalid RTRS magic\n");
+ return -ECONNRESET;
+ }
+ version = le16_to_cpu(msg->version);
+ if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
+ rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
+ version >> 8, RTRS_PROTO_VER_MAJOR);
+ return -ECONNRESET;
+ }
+ errno = le16_to_cpu(msg->errno);
+ if (errno) {
+ rtrs_err(clt, "Invalid RTRS message: errno %d\n",
+ errno);
+ return -ECONNRESET;
+ }
+ if (con->c.cid == 0) {
+ queue_depth = le16_to_cpu(msg->queue_depth);
+
+ if (queue_depth > MAX_SESS_QUEUE_DEPTH) {
+ rtrs_err(clt, "Invalid RTRS message: queue=%d\n",
+ queue_depth);
+ return -ECONNRESET;
+ }
+ if (!sess->rbufs || sess->queue_depth < queue_depth) {
+ kfree(sess->rbufs);
+ sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
+ GFP_KERNEL);
+ if (!sess->rbufs)
+ return -ENOMEM;
+ }
+ sess->queue_depth = queue_depth;
+ sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
+ sess->max_io_size = le32_to_cpu(msg->max_io_size);
+ sess->flags = le32_to_cpu(msg->flags);
+ sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
+
+ /*
+ * Global queue depth and IO size is always a minimum.
+ * If while a reconnection server sends us a value a bit
+ * higher - client does not care and uses cached minimum.
+ *
+ * Since we can have several sessions (paths) restablishing
+ * connections in parallel, use lock.
+ */
+ mutex_lock(&clt->paths_mutex);
+ clt->queue_depth = min_not_zero(sess->queue_depth,
+ clt->queue_depth);
+ clt->max_io_size = min_not_zero(sess->max_io_size,
+ clt->max_io_size);
+ mutex_unlock(&clt->paths_mutex);
+
+ /*
+ * Cache the hca_port and hca_name for sysfs
+ */
+ sess->hca_port = con->c.cm_id->port_num;
+ scnprintf(sess->hca_name, sizeof(sess->hca_name),
+ sess->s.dev->ib_dev->name);
+ sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
+ }
+
+ return 0;
+}
+
+static inline void flag_success_on_conn(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ atomic_inc(&sess->connected_cnt);
+ con->cm_err = 1;
+}
+
+static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_sess *s = con->c.sess;
+ const struct rtrs_msg_conn_rsp *msg;
+ const char *rej_msg;
+ int status, errno;
+ u8 data_len;
+
+ status = ev->status;
+ rej_msg = rdma_reject_msg(con->c.cm_id, status);
+ msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
+
+ if (msg && data_len >= sizeof(*msg)) {
+ errno = (int16_t)le16_to_cpu(msg->errno);
+ if (errno == -EBUSY)
+ rtrs_err(s,
+ "Previous session is still exists on the server, please reconnect later\n");
+ else
+ rtrs_err(s,
+ "Connect rejected: status %d (%s), rtrs errno %d\n",
+ status, rej_msg, errno);
+ } else {
+ rtrs_err(s,
+ "Connect rejected but with malformed message: status %d (%s)\n",
+ status, rej_msg);
+ }
+
+ return -ECONNRESET;
+}
+
+static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
+{
+ if (rtrs_clt_change_state(sess, RTRS_CLT_CLOSING))
+ queue_work(rtrs_wq, &sess->close_work);
+ if (wait)
+ flush_work(&sess->close_work);
+}
+
+static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
+{
+ if (con->cm_err == 1) {
+ struct rtrs_clt_sess *sess;
+
+ sess = to_clt_sess(con->c.sess);
+ if (atomic_dec_and_test(&sess->connected_cnt))
+
+ wake_up(&sess->state_wq);
+ }
+ con->cm_err = cm_err;
+}
+
+static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_clt_con *con = cm_id->context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ int cm_err = 0;
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ cm_err = rtrs_rdma_addr_resolved(con);
+ break;
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ cm_err = rtrs_rdma_route_resolved(con);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ con->cm_err = rtrs_rdma_conn_established(con, ev);
+ if (likely(!con->cm_err)) {
+ /*
+ * Report success and wake up. Here we abuse state_wq,
+ * i.e. wake up without state change, but we set cm_err.
+ */
+ flag_success_on_conn(con);
+ wake_up(&sess->state_wq);
+ return 0;
+ }
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ cm_err = rtrs_rdma_conn_rejected(con, ev);
+ break;
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ rtrs_wrn(s, "CM error event %d\n", ev->event);
+ cm_err = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ cm_err = -EHOSTUNREACH;
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ cm_err = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ /*
+ * Device removal is a special case. Queue close and return 0.
+ */
+ rtrs_clt_close_conns(sess, false);
+ return 0;
+ default:
+ rtrs_err(s, "Unexpected RDMA CM event (%d)\n", ev->event);
+ cm_err = -ECONNRESET;
+ break;
+ }
+
+ if (cm_err) {
+ /*
+ * cm error makes sense only on connection establishing,
+ * in other cases we rely on normal procedure of reconnecting.
+ */
+ flag_error_on_conn(con, cm_err);
+ rtrs_rdma_error_recovery(con);
+ }
+
+ return 0;
+}
+
+static int create_cm(struct rtrs_clt_con *con)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rdma_cm_id *cm_id;
+ int err;
+
+ cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
+ sess->s.dst_addr.ss_family == AF_IB ?
+ RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ err = PTR_ERR(cm_id);
+ rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
+
+ return err;
+ }
+ con->c.cm_id = cm_id;
+ con->cm_err = 0;
+ /* allow the port to be reused */
+ err = rdma_set_reuseaddr(cm_id, 1);
+ if (err != 0) {
+ rtrs_err(s, "Set address reuse failed, err: %d\n", err);
+ goto destroy_cm;
+ }
+ err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr,
+ (struct sockaddr *)&sess->s.dst_addr,
+ RTRS_CONNECT_TIMEOUT_MS);
+ if (err) {
+ rtrs_err(s, "Failed to resolve address, err: %d\n", err);
+ goto destroy_cm;
+ }
+ /*
+ * Combine connection status and session events. This is needed
+ * for waiting two possible cases: cm_err has something meaningful
+ * or session state was really changed to error by device removal.
+ */
+ err = wait_event_interruptible_timeout(
+ sess->state_wq,
+ con->cm_err || sess->state != RTRS_CLT_CONNECTING,
+ msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
+ if (err == 0 || err == -ERESTARTSYS) {
+ if (err == 0)
+ err = -ETIMEDOUT;
+ /* Timedout or interrupted */
+ goto errr;
+ }
+ if (con->cm_err < 0) {
+ err = con->cm_err;
+ goto errr;
+ }
+ if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) {
+ /* Device removal */
+ err = -ECONNABORTED;
+ goto errr;
+ }
+
+ return 0;
+
+errr:
+ stop_cm(con);
+ /* Is safe to call destroy if cq_qp is not inited */
+ destroy_con_cq_qp(con);
+destroy_cm:
+ destroy_cm(con);
+
+ return err;
+}
+
+static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ int up;
+
+ /*
+ * We can fire RECONNECTED event only when all paths were
+ * connected on rtrs_clt_open(), then each was disconnected
+ * and the first one connected again. That's why this nasty
+ * game with counter value.
+ */
+
+ mutex_lock(&clt->paths_ev_mutex);
+ up = ++clt->paths_up;
+ /*
+ * Here it is safe to access paths num directly since up counter
+ * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
+ * in progress, thus paths removals are impossible.
+ */
+ if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
+ clt->paths_up = clt->paths_num;
+ else if (up == 1)
+ clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
+ mutex_unlock(&clt->paths_ev_mutex);
+
+ /* Mark session as established */
+ sess->established = true;
+ sess->reconnect_attempts = 0;
+ sess->stats->reconnects.successful_cnt++;
+}
+
+static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+
+ if (!sess->established)
+ return;
+
+ sess->established = false;
+ mutex_lock(&clt->paths_ev_mutex);
+ WARN_ON(!clt->paths_up);
+ if (--clt->paths_up == 0)
+ clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
+ mutex_unlock(&clt->paths_ev_mutex);
+}
+
+static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_con *con;
+ unsigned int cid;
+
+ WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED);
+
+ /*
+ * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
+ * exactly in between. Start destroying after it finishes.
+ */
+ mutex_lock(&sess->init_mutex);
+ mutex_unlock(&sess->init_mutex);
+
+ /*
+ * All IO paths must observe !CONNECTED state before we
+ * free everything.
+ */
+ synchronize_rcu();
+
+ rtrs_clt_stop_hb(sess);
+
+ /*
+ * The order it utterly crucial: firstly disconnect and complete all
+ * rdma requests with error (thus set in_use=false for requests),
+ * then fail outstanding requests checking in_use for each, and
+ * eventually notify upper layer about session disconnection.
+ */
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (!sess->s.con[cid])
+ break;
+ con = to_clt_con(sess->s.con[cid]);
+ stop_cm(con);
+ }
+ fail_all_outstanding_reqs(sess);
+ free_sess_reqs(sess);
+ rtrs_clt_sess_down(sess);
+
+ /*
+ * Wait for graceful shutdown, namely when peer side invokes
+ * rdma_disconnect(). 'connected_cnt' is decremented only on
+ * CM events, thus if other side had crashed and hb has detected
+ * something is wrong, here we will stuck for exactly timeout ms,
+ * since CM does not fire anything. That is fine, we are not in
+ * hurry.
+ */
+ wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt),
+ msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (!sess->s.con[cid])
+ break;
+ con = to_clt_con(sess->s.con[cid]);
+ destroy_con_cq_qp(con);
+ destroy_cm(con);
+ destroy_con(con);
+ }
+}
+
+static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path,
+ struct rtrs_clt_sess *sess,
+ struct rtrs_clt_sess *next)
+{
+ struct rtrs_clt_sess **ppcpu_path;
+
+ /* Call cmpxchg() without sparse warnings */
+ ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
+ return sess == cmpxchg(ppcpu_path, sess, next);
+}
+
+static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *next;
+ bool wait_for_grace = false;
+ int cpu;
+
+ mutex_lock(&clt->paths_mutex);
+ list_del_rcu(&sess->s.entry);
+
+ /* Make sure everybody observes path removal. */
+ synchronize_rcu();
+
+ /*
+ * At this point nobody sees @sess in the list, but still we have
+ * dangling pointer @pcpu_path which _can_ point to @sess. Since
+ * nobody can observe @sess in the list, we guarantee that IO path
+ * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
+ * to @sess, but can never again become @sess.
+ */
+
+ /*
+ * Decrement paths number only after grace period, because
+ * caller of do_each_path() must firstly observe list without
+ * path and only then decremented paths number.
+ *
+ * Otherwise there can be the following situation:
+ * o Two paths exist and IO is coming.
+ * o One path is removed:
+ * CPU#0 CPU#1
+ * do_each_path(): rtrs_clt_remove_path_from_arr():
+ * path = get_next_path()
+ * ^^^ list_del_rcu(path)
+ * [!CONNECTED path] clt->paths_num--
+ * ^^^^^^^^^
+ * load clt->paths_num from 2 to 1
+ * ^^^^^^^^^
+ * sees 1
+ *
+ * path is observed as !CONNECTED, but do_each_path() loop
+ * ends, because expression i < clt->paths_num is false.
+ */
+ clt->paths_num--;
+
+ /*
+ * Get @next connection from current @sess which is going to be
+ * removed. If @sess is the last element, then @next is NULL.
+ */
+ rcu_read_lock();
+ next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry,
+ typeof(*next), s.entry);
+ rcu_read_unlock();
+
+ /*
+ * @pcpu paths can still point to the path which is going to be
+ * removed, so change the pointer manually.
+ */
+ for_each_possible_cpu(cpu) {
+ struct rtrs_clt_sess __rcu **ppcpu_path;
+
+ ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
+ if (rcu_dereference_protected(*ppcpu_path,
+ lockdep_is_held(&clt->paths_mutex)) != sess)
+ /*
+ * synchronize_rcu() was called just after deleting
+ * entry from the list, thus IO code path cannot
+ * change pointer back to the pointer which is going
+ * to be removed, we are safe here.
+ */
+ continue;
+
+ /*
+ * We race with IO code path, which also changes pointer,
+ * thus we have to be careful not to overwrite it.
+ */
+ if (xchg_sessions(ppcpu_path, sess, next))
+ /*
+ * @ppcpu_path was successfully replaced with @next,
+ * that means that someone could also pick up the
+ * @sess and dereferencing it right now, so wait for
+ * a grace period is required.
+ */
+ wait_for_grace = true;
+ }
+ if (wait_for_grace)
+ synchronize_rcu();
+
+ mutex_unlock(&clt->paths_mutex);
+}
+
+static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess,
+ struct rtrs_addr *addr)
+{
+ struct rtrs_clt *clt = sess->clt;
+
+ mutex_lock(&clt->paths_mutex);
+ clt->paths_num++;
+
+ list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+ mutex_unlock(&clt->paths_mutex);
+}
+
+static void rtrs_clt_close_work(struct work_struct *work)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(work, struct rtrs_clt_sess, close_work);
+
+ cancel_delayed_work_sync(&sess->reconnect_dwork);
+ rtrs_clt_stop_and_destroy_conns(sess);
+ rtrs_clt_change_state(sess, RTRS_CLT_CLOSED);
+}
+
+static int init_conns(struct rtrs_clt_sess *sess)
+{
+ unsigned int cid;
+ int err;
+
+ /*
+ * On every new session connections increase reconnect counter
+ * to avoid clashes with previous sessions not yet closed
+ * sessions on a server side.
+ */
+ sess->s.recon_cnt++;
+
+ /* Establish all RDMA connections */
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ err = create_con(sess, cid);
+ if (err)
+ goto destroy;
+
+ err = create_cm(to_clt_con(sess->s.con[cid]));
+ if (err) {
+ destroy_con(to_clt_con(sess->s.con[cid]));
+ goto destroy;
+ }
+ }
+ err = alloc_sess_reqs(sess);
+ if (err)
+ goto destroy;
+
+ rtrs_clt_start_hb(sess);
+
+ return 0;
+
+destroy:
+ while (cid--) {
+ struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
+
+ stop_cm(con);
+ destroy_con_cq_qp(con);
+ destroy_cm(con);
+ destroy_con(con);
+ }
+ /*
+ * If we've never taken async path and got an error, say,
+ * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
+ * manually to keep reconnecting.
+ */
+ rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+
+ return err;
+}
+
+static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_iu *iu;
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ rtrs_iu_free(iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(sess->clt, "Sess info request send failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+ return;
+ }
+
+ rtrs_clt_update_wc_stats(con);
+}
+
+static int process_info_rsp(struct rtrs_clt_sess *sess,
+ const struct rtrs_msg_info_rsp *msg)
+{
+ unsigned int sg_cnt, total_len;
+ int i, sgi;
+
+ sg_cnt = le16_to_cpu(msg->sg_cnt);
+ if (unlikely(!sg_cnt))
+ return -EINVAL;
+ /*
+ * Check if IB immediate data size is enough to hold the mem_id and
+ * the offset inside the memory chunk.
+ */
+ if (unlikely((ilog2(sg_cnt - 1) + 1) +
+ (ilog2(sess->chunk_size - 1) + 1) >
+ MAX_IMM_PAYL_BITS)) {
+ rtrs_err(sess->clt,
+ "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
+ MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
+ return -EINVAL;
+ }
+ if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) {
+ rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
+ sg_cnt);
+ return -EINVAL;
+ }
+ total_len = 0;
+ for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
+ const struct rtrs_sg_desc *desc = &msg->desc[sgi];
+ u32 len, rkey;
+ u64 addr;
+
+ addr = le64_to_cpu(desc->addr);
+ rkey = le32_to_cpu(desc->key);
+ len = le32_to_cpu(desc->len);
+
+ total_len += len;
+
+ if (unlikely(!len || (len % sess->chunk_size))) {
+ rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
+ len);
+ return -EINVAL;
+ }
+ for ( ; len && i < sess->queue_depth; i++) {
+ sess->rbufs[i].addr = addr;
+ sess->rbufs[i].rkey = rkey;
+
+ len -= sess->chunk_size;
+ addr += sess->chunk_size;
+ }
+ }
+ /* Sanity check */
+ if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) {
+ rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
+ return -EINVAL;
+ }
+ if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) {
+ rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_msg_info_rsp *msg;
+ enum rtrs_clt_state state;
+ struct rtrs_iu *iu;
+ size_t rx_sz;
+ int err;
+
+ state = RTRS_CLT_CONNECTING_ERR;
+
+ WARN_ON(con->c.cid);
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ goto out;
+ }
+ WARN_ON(wc->opcode != IB_WC_RECV);
+
+ if (unlikely(wc->byte_len < sizeof(*msg))) {
+ rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP)) {
+ rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto out;
+ }
+ rx_sz = sizeof(*msg);
+ rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
+ if (unlikely(wc->byte_len < rx_sz)) {
+ rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ err = process_info_rsp(sess, msg);
+ if (unlikely(err))
+ goto out;
+
+ err = post_recv_sess(sess);
+ if (unlikely(err))
+ goto out;
+
+ state = RTRS_CLT_CONNECTED;
+
+out:
+ rtrs_clt_update_wc_stats(con);
+ rtrs_iu_free(iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_clt_change_state(sess, state);
+}
+
+static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);
+ struct rtrs_msg_info_req *msg;
+ struct rtrs_iu *tx_iu, *rx_iu;
+ size_t rx_sz;
+ int err;
+
+ rx_sz = sizeof(struct rtrs_msg_info_rsp);
+ rx_sz += sizeof(u64) * MAX_SESS_QUEUE_DEPTH;
+
+ tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
+ sess->s.dev->ib_dev, DMA_TO_DEVICE,
+ rtrs_clt_info_req_done);
+ rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
+ if (unlikely(!tx_iu || !rx_iu)) {
+ err = -ENOMEM;
+ goto out;
+ }
+ /* Prepare for getting info response */
+ err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
+ if (unlikely(err)) {
+ rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
+ goto out;
+ }
+ rx_iu = NULL;
+
+ msg = tx_iu->buf;
+ msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
+ memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname));
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ tx_iu->size, DMA_TO_DEVICE);
+
+ /* Send info request */
+ err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
+ if (unlikely(err)) {
+ rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
+ goto out;
+ }
+ tx_iu = NULL;
+
+ /* Wait for state change */
+ wait_event_interruptible_timeout(sess->state_wq,
+ sess->state != RTRS_CLT_CONNECTING,
+ msecs_to_jiffies(
+ RTRS_CONNECT_TIMEOUT_MS));
+ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) {
+ if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
+ err = -ECONNRESET;
+ else
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+out:
+ if (tx_iu)
+ rtrs_iu_free(tx_iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ if (rx_iu)
+ rtrs_iu_free(rx_iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ if (unlikely(err))
+ /* If we've never taken async path because of malloc problems */
+ rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+
+ return err;
+}
+
+/**
+ * init_sess() - establishes all session connections and does handshake
+ * @sess: client session.
+ * In case of error full close or reconnect procedure should be taken,
+ * because reconnect or close async works can be started.
+ */
+static int init_sess(struct rtrs_clt_sess *sess)
+{
+ int err;
+
+ mutex_lock(&sess->init_mutex);
+ err = init_conns(sess);
+ if (err) {
+ rtrs_err(sess->clt, "init_conns(), err: %d\n", err);
+ goto out;
+ }
+ err = rtrs_send_sess_info(sess);
+ if (err) {
+ rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err);
+ goto out;
+ }
+ rtrs_clt_sess_up(sess);
+out:
+ mutex_unlock(&sess->init_mutex);
+
+ return err;
+}
+
+static void rtrs_clt_reconnect_work(struct work_struct *work)
+{
+ struct rtrs_clt_sess *sess;
+ struct rtrs_clt *clt;
+ unsigned int delay_ms;
+ int err;
+
+ sess = container_of(to_delayed_work(work), struct rtrs_clt_sess,
+ reconnect_dwork);
+ clt = sess->clt;
+
+ if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING)
+ return;
+
+ if (sess->reconnect_attempts >= clt->max_reconnect_attempts) {
+ /* Close a session completely if max attempts is reached */
+ rtrs_clt_close_conns(sess, false);
+ return;
+ }
+ sess->reconnect_attempts++;
+
+ /* Stop everything */
+ rtrs_clt_stop_and_destroy_conns(sess);
+ msleep(RTRS_RECONNECT_BACKOFF);
+ if (rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING)) {
+ err = init_sess(sess);
+ if (err)
+ goto reconnect_again;
+ }
+
+ return;
+
+reconnect_again:
+ if (rtrs_clt_change_state(sess, RTRS_CLT_RECONNECTING)) {
+ sess->stats->reconnects.fail_cnt++;
+ delay_ms = clt->reconnect_delay_sec * 1000;
+ queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ msecs_to_jiffies(delay_ms));
+ }
+}
+
+static void rtrs_clt_dev_release(struct device *dev)
+{
+ struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+
+ kfree(clt);
+}
+
+static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
+ u16 port, size_t pdu_sz, void *priv,
+ void (*link_ev)(void *priv,
+ enum rtrs_clt_link_ev ev),
+ unsigned int max_segments,
+ size_t max_segment_size,
+ unsigned int reconnect_delay_sec,
+ unsigned int max_reconnect_attempts)
+{
+ struct rtrs_clt *clt;
+ int err;
+
+ if (!paths_num || paths_num > MAX_PATHS_NUM)
+ return ERR_PTR(-EINVAL);
+
+ if (strlen(sessname) >= sizeof(clt->sessname))
+ return ERR_PTR(-EINVAL);
+
+ clt = kzalloc(sizeof(*clt), GFP_KERNEL);
+ if (!clt)
+ return ERR_PTR(-ENOMEM);
+
+ clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
+ if (!clt->pcpu_path) {
+ kfree(clt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ uuid_gen(&clt->paths_uuid);
+ INIT_LIST_HEAD_RCU(&clt->paths_list);
+ clt->paths_num = paths_num;
+ clt->paths_up = MAX_PATHS_NUM;
+ clt->port = port;
+ clt->pdu_sz = pdu_sz;
+ clt->max_segments = max_segments;
+ clt->max_segment_size = max_segment_size;
+ clt->reconnect_delay_sec = reconnect_delay_sec;
+ clt->max_reconnect_attempts = max_reconnect_attempts;
+ clt->priv = priv;
+ clt->link_ev = link_ev;
+ clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ strlcpy(clt->sessname, sessname, sizeof(clt->sessname));
+ init_waitqueue_head(&clt->permits_wait);
+ mutex_init(&clt->paths_ev_mutex);
+ mutex_init(&clt->paths_mutex);
+
+ clt->dev.class = rtrs_clt_dev_class;
+ clt->dev.release = rtrs_clt_dev_release;
+ err = dev_set_name(&clt->dev, "%s", sessname);
+ if (err) {
+ free_percpu(clt->pcpu_path);
+ kfree(clt);
+ return ERR_PTR(err);
+ }
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+ */
+ dev_set_uevent_suppress(&clt->dev, true);
+ err = device_register(&clt->dev);
+ if (err) {
+ free_percpu(clt->pcpu_path);
+ put_device(&clt->dev);
+ return ERR_PTR(err);
+ }
+
+ clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
+ if (!clt->kobj_paths) {
+ free_percpu(clt->pcpu_path);
+ device_unregister(&clt->dev);
+ return NULL;
+ }
+ err = rtrs_clt_create_sysfs_root_files(clt);
+ if (err) {
+ free_percpu(clt->pcpu_path);
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+ device_unregister(&clt->dev);
+ return ERR_PTR(err);
+ }
+ dev_set_uevent_suppress(&clt->dev, false);
+ kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
+
+ return clt;
+}
+
+static void wait_for_inflight_permits(struct rtrs_clt *clt)
+{
+ if (clt->permits_map) {
+ size_t sz = clt->queue_depth;
+
+ wait_event(clt->permits_wait,
+ find_first_bit(clt->permits_map, sz) >= sz);
+ }
+}
+
+static void free_clt(struct rtrs_clt *clt)
+{
+ wait_for_inflight_permits(clt);
+ free_permits(clt);
+ free_percpu(clt->pcpu_path);
+ mutex_destroy(&clt->paths_ev_mutex);
+ mutex_destroy(&clt->paths_mutex);
+ /* release callback will free clt in last put */
+ device_unregister(&clt->dev);
+}
+
+/**
+ * rtrs_clt_open() - Open a session to an RTRS server
+ * @ops: holds the link event callback and the private pointer.
+ * @sessname: name of the session
+ * @paths: Paths to be established defined by their src and dst addresses
+ * @paths_num: Number of elements in the @paths array
+ * @port: port to be used by the RTRS session
+ * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
+ * @reconnect_delay_sec: time between reconnect tries
+ * @max_segments: Max. number of segments per IO request
+ * @max_segment_size: Max. size of one segment
+ * @max_reconnect_attempts: Number of times to reconnect on error before giving
+ * up, 0 for * disabled, -1 for forever
+ *
+ * Starts session establishment with the rtrs_server. The function can block
+ * up to ~2000ms before it returns.
+ *
+ * Return a valid pointer on success otherwise PTR_ERR.
+ */
+struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *sessname,
+ const struct rtrs_addr *paths,
+ size_t paths_num, u16 port,
+ size_t pdu_sz, u8 reconnect_delay_sec,
+ u16 max_segments,
+ size_t max_segment_size,
+ s16 max_reconnect_attempts)
+{
+ struct rtrs_clt_sess *sess, *tmp;
+ struct rtrs_clt *clt;
+ int err, i;
+
+ clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
+ ops->link_ev,
+ max_segments, max_segment_size, reconnect_delay_sec,
+ max_reconnect_attempts);
+ if (IS_ERR(clt)) {
+ err = PTR_ERR(clt);
+ goto out;
+ }
+ for (i = 0; i < paths_num; i++) {
+ struct rtrs_clt_sess *sess;
+
+ sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
+ max_segments, max_segment_size);
+ if (IS_ERR(sess)) {
+ err = PTR_ERR(sess);
+ goto close_all_sess;
+ }
+ list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+
+ err = init_sess(sess);
+ if (err) {
+ list_del_rcu(&sess->s.entry);
+ rtrs_clt_close_conns(sess, true);
+ free_sess(sess);
+ goto close_all_sess;
+ }
+
+ err = rtrs_clt_create_sess_files(sess);
+ if (err) {
+ list_del_rcu(&sess->s.entry);
+ rtrs_clt_close_conns(sess, true);
+ free_sess(sess);
+ goto close_all_sess;
+ }
+ }
+ err = alloc_permits(clt);
+ if (err)
+ goto close_all_sess;
+
+ return clt;
+
+close_all_sess:
+ list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_sess_files(sess, NULL);
+ rtrs_clt_close_conns(sess, true);
+ kobject_put(&sess->kobj);
+ }
+ rtrs_clt_destroy_sysfs_root_files(clt);
+ rtrs_clt_destroy_sysfs_root_folders(clt);
+ free_clt(clt);
+
+out:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(rtrs_clt_open);
+
+/**
+ * rtrs_clt_close() - Close a session
+ * @clt: Session handle. Session is freed upon return.
+ */
+void rtrs_clt_close(struct rtrs_clt *clt)
+{
+ struct rtrs_clt_sess *sess, *tmp;
+
+ /* Firstly forbid sysfs access */
+ rtrs_clt_destroy_sysfs_root_files(clt);
+ rtrs_clt_destroy_sysfs_root_folders(clt);
+
+ /* Now it is safe to iterate over all paths without locks */
+ list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_sess_files(sess, NULL);
+ rtrs_clt_close_conns(sess, true);
+ kobject_put(&sess->kobj);
+ }
+ free_clt(clt);
+}
+EXPORT_SYMBOL(rtrs_clt_close);
+
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
+{
+ enum rtrs_clt_state old_state;
+ int err = -EBUSY;
+ bool changed;
+
+ changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING,
+ &old_state);
+ if (changed) {
+ sess->reconnect_attempts = 0;
+ queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0);
+ }
+ if (changed || old_state == RTRS_CLT_RECONNECTING) {
+ /*
+ * flush_delayed_work() queues pending work for immediate
+ * execution, so do the flush if we have queued something
+ * right now or work is pending.
+ */
+ flush_delayed_work(&sess->reconnect_dwork);
+ err = (READ_ONCE(sess->state) ==
+ RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
+ }
+
+ return err;
+}
+
+int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess)
+{
+ rtrs_clt_close_conns(sess, true);
+
+ return 0;
+}
+
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self)
+{
+ enum rtrs_clt_state old_state;
+ bool changed;
+
+ /*
+ * Continue stopping path till state was changed to DEAD or
+ * state was observed as DEAD:
+ * 1. State was changed to DEAD - we were fast and nobody
+ * invoked rtrs_clt_reconnect(), which can again start
+ * reconnecting.
+ * 2. State was observed as DEAD - we have someone in parallel
+ * removing the path.
+ */
+ do {
+ rtrs_clt_close_conns(sess, true);
+ changed = rtrs_clt_change_state_get_old(sess,
+ RTRS_CLT_DEAD,
+ &old_state);
+ } while (!changed && old_state != RTRS_CLT_DEAD);
+
+ if (likely(changed)) {
+ rtrs_clt_destroy_sess_files(sess, sysfs_self);
+ rtrs_clt_remove_path_from_arr(sess);
+ kobject_put(&sess->kobj);
+ }
+
+ return 0;
+}
+
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value)
+{
+ clt->max_reconnect_attempts = (unsigned int)value;
+}
+
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
+{
+ return (int)clt->max_reconnect_attempts;
+}
+
+/**
+ * rtrs_clt_request() - Request data transfer to/from server via RDMA.
+ *
+ * @dir: READ/WRITE
+ * @ops: callback function to be called as confirmation, and the pointer.
+ * @clt: Session
+ * @permit: Preallocated permit
+ * @vec: Message that is sent to server together with the request.
+ * Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
+ * Since the msg is copied internally it can be allocated on stack.
+ * @nr: Number of elements in @vec.
+ * @data_len: length of data sent to/from server
+ * @sg: Pages to be sent/received to/from server.
+ * @sg_cnt: Number of elements in the @sg
+ *
+ * Return:
+ * 0: Success
+ * <0: Error
+ *
+ * On dir=READ rtrs client will request a data transfer from Server to client.
+ * The data that the server will respond with will be stored in @sg when
+ * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
+ * On dir=WRITE rtrs client will rdma write data in sg to server side.
+ */
+int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
+ struct rtrs_clt *clt, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t data_len,
+ struct scatterlist *sg, unsigned int sg_cnt)
+{
+ struct rtrs_clt_io_req *req;
+ struct rtrs_clt_sess *sess;
+
+ enum dma_data_direction dma_dir;
+ int err = -ECONNABORTED, i;
+ size_t usr_len, hdr_len;
+ struct path_it it;
+
+ /* Get kvec length */
+ for (i = 0, usr_len = 0; i < nr; i++)
+ usr_len += vec[i].iov_len;
+
+ if (dir == READ) {
+ hdr_len = sizeof(struct rtrs_msg_rdma_read) +
+ sg_cnt * sizeof(struct rtrs_sg_desc);
+ dma_dir = DMA_FROM_DEVICE;
+ } else {
+ hdr_len = sizeof(struct rtrs_msg_rdma_write);
+ dma_dir = DMA_TO_DEVICE;
+ }
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
+ continue;
+
+ if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) {
+ rtrs_wrn_rl(sess->clt,
+ "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
+ dir == READ ? "Read" : "Write",
+ usr_len, hdr_len, sess->max_hdr_size);
+ err = -EMSGSIZE;
+ break;
+ }
+ req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv,
+ vec, usr_len, sg, sg_cnt, data_len,
+ dma_dir);
+ if (dir == READ)
+ err = rtrs_clt_read_req(req);
+ else
+ err = rtrs_clt_write_req(req);
+ if (unlikely(err)) {
+ req->in_use = false;
+ continue;
+ }
+ /* Success path */
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return err;
+}
+EXPORT_SYMBOL(rtrs_clt_request);
+
+/**
+ * rtrs_clt_query() - queries RTRS session attributes
+ *@clt: session pointer
+ *@attr: query results for session attributes.
+ * Returns:
+ * 0 on success
+ * -ECOMM no connection to the server
+ */
+int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
+{
+ if (!rtrs_clt_is_connected(clt))
+ return -ECOMM;
+
+ attr->queue_depth = clt->queue_depth;
+ attr->max_io_size = clt->max_io_size;
+ attr->sess_kobj = &clt->dev.kobj;
+ strlcpy(attr->sessname, clt->sessname, sizeof(attr->sessname));
+
+ return 0;
+}
+EXPORT_SYMBOL(rtrs_clt_query);
+
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+ struct rtrs_addr *addr)
+{
+ struct rtrs_clt_sess *sess;
+ int err;
+
+ sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments,
+ clt->max_segment_size);
+ if (IS_ERR(sess))
+ return PTR_ERR(sess);
+
+ /*
+ * It is totally safe to add path in CONNECTING state: coming
+ * IO will never grab it. Also it is very important to add
+ * path before init, since init fires LINK_CONNECTED event.
+ */
+ rtrs_clt_add_path_to_arr(sess, addr);
+
+ err = init_sess(sess);
+ if (err)
+ goto close_sess;
+
+ err = rtrs_clt_create_sess_files(sess);
+ if (err)
+ goto close_sess;
+
+ return 0;
+
+close_sess:
+ rtrs_clt_remove_path_from_arr(sess);
+ rtrs_clt_close_conns(sess, true);
+ free_sess(sess);
+
+ return err;
+}
+
+static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
+{
+ if (!(dev->ib_dev->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ pr_err("Memory registrations not supported.\n");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
+ .init = rtrs_clt_ib_dev_init
+};
+
+static int __init rtrs_client_init(void)
+{
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+
+ rtrs_clt_dev_class = class_create(THIS_MODULE, "rtrs-client");
+ if (IS_ERR(rtrs_clt_dev_class)) {
+ pr_err("Failed to create rtrs-client dev class\n");
+ return PTR_ERR(rtrs_clt_dev_class);
+ }
+ rtrs_wq = alloc_workqueue("rtrs_client_wq", WQ_MEM_RECLAIM, 0);
+ if (!rtrs_wq) {
+ class_destroy(rtrs_clt_dev_class);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit rtrs_client_exit(void)
+{
+ destroy_workqueue(rtrs_wq);
+ class_destroy(rtrs_clt_dev_class);
+ rtrs_rdma_dev_pd_deinit(&dev_pd);
+}
+
+module_init(rtrs_client_init);
+module_exit(rtrs_client_exit);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
new file mode 100644
index 000000000000..167acd3c90fc
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_CLT_H
+#define RTRS_CLT_H
+
+#include <linux/device.h>
+#include "rtrs-pri.h"
+
+/**
+ * enum rtrs_clt_state - Client states.
+ */
+enum rtrs_clt_state {
+ RTRS_CLT_CONNECTING,
+ RTRS_CLT_CONNECTING_ERR,
+ RTRS_CLT_RECONNECTING,
+ RTRS_CLT_CONNECTED,
+ RTRS_CLT_CLOSING,
+ RTRS_CLT_CLOSED,
+ RTRS_CLT_DEAD,
+};
+
+enum rtrs_mp_policy {
+ MP_POLICY_RR,
+ MP_POLICY_MIN_INFLIGHT,
+};
+
+/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
+struct rtrs_clt_stats_reconnects {
+ int successful_cnt;
+ int fail_cnt;
+};
+
+/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
+struct rtrs_clt_stats_cpu_migr {
+ atomic_t from;
+ int to;
+};
+
+/* stats for Read and write operation.
+ * see Documentation/ABI/testing/sysfs-class-rtrs-client for details
+ */
+struct rtrs_clt_stats_rdma {
+ struct {
+ u64 cnt;
+ u64 size_total;
+ } dir[2];
+
+ u64 failover_cnt;
+};
+
+struct rtrs_clt_stats_pcpu {
+ struct rtrs_clt_stats_cpu_migr cpu_migr;
+ struct rtrs_clt_stats_rdma rdma;
+};
+
+struct rtrs_clt_stats {
+ struct kobject kobj_stats;
+ struct rtrs_clt_stats_pcpu __percpu *pcpu_stats;
+ struct rtrs_clt_stats_reconnects reconnects;
+ atomic_t inflight;
+};
+
+struct rtrs_clt_con {
+ struct rtrs_con c;
+ struct rtrs_iu *rsp_ius;
+ u32 queue_size;
+ unsigned int cpu;
+ atomic_t io_cnt;
+ int cm_err;
+};
+
+/**
+ * rtrs_permit - permits the memory allocation for future RDMA operation.
+ * Combine with irq pinning to keep IO on same CPU.
+ */
+struct rtrs_permit {
+ enum rtrs_clt_con_type con_type;
+ unsigned int cpu_id;
+ unsigned int mem_id;
+ unsigned int mem_off;
+};
+
+/**
+ * rtrs_clt_io_req - describes one inflight IO request
+ */
+struct rtrs_clt_io_req {
+ struct list_head list;
+ struct rtrs_iu *iu;
+ struct scatterlist *sglist; /* list holding user data */
+ unsigned int sg_cnt;
+ unsigned int sg_size;
+ unsigned int data_len;
+ unsigned int usr_len;
+ void *priv;
+ bool in_use;
+ struct rtrs_clt_con *con;
+ struct rtrs_sg_desc *desc;
+ struct ib_sge *sge;
+ struct rtrs_permit *permit;
+ enum dma_data_direction dir;
+ void (*conf)(void *priv, int errno);
+ unsigned long start_jiffies;
+
+ struct ib_mr *mr;
+ struct ib_cqe inv_cqe;
+ struct completion inv_comp;
+ int inv_errno;
+ bool need_inv_comp;
+ bool need_inv;
+};
+
+struct rtrs_rbuf {
+ u64 addr;
+ u32 rkey;
+};
+
+struct rtrs_clt_sess {
+ struct rtrs_sess s;
+ struct rtrs_clt *clt;
+ wait_queue_head_t state_wq;
+ enum rtrs_clt_state state;
+ atomic_t connected_cnt;
+ struct mutex init_mutex;
+ struct rtrs_clt_io_req *reqs;
+ struct delayed_work reconnect_dwork;
+ struct work_struct close_work;
+ unsigned int reconnect_attempts;
+ bool established;
+ struct rtrs_rbuf *rbufs;
+ size_t max_io_size;
+ u32 max_hdr_size;
+ u32 chunk_size;
+ size_t queue_depth;
+ u32 max_pages_per_mr;
+ int max_send_sge;
+ u32 flags;
+ struct kobject kobj;
+ struct rtrs_clt_stats *stats;
+ /* cache hca_port and hca_name to display in sysfs */
+ u8 hca_port;
+ char hca_name[IB_DEVICE_NAME_MAX];
+ struct list_head __percpu
+ *mp_skip_entry;
+};
+
+struct rtrs_clt {
+ struct list_head paths_list; /* rcu protected list */
+ size_t paths_num;
+ struct rtrs_clt_sess
+ __rcu * __percpu *pcpu_path;
+ uuid_t paths_uuid;
+ int paths_up;
+ struct mutex paths_mutex;
+ struct mutex paths_ev_mutex;
+ char sessname[NAME_MAX];
+ u16 port;
+ unsigned int max_reconnect_attempts;
+ unsigned int reconnect_delay_sec;
+ unsigned int max_segments;
+ size_t max_segment_size;
+ void *permits;
+ unsigned long *permits_map;
+ size_t queue_depth;
+ size_t max_io_size;
+ wait_queue_head_t permits_wait;
+ size_t pdu_sz;
+ void *priv;
+ void (*link_ev)(void *priv,
+ enum rtrs_clt_link_ev ev);
+ struct device dev;
+ struct kobject *kobj_paths;
+ enum rtrs_mp_policy mp_policy;
+};
+
+static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c)
+{
+ return container_of(c, struct rtrs_clt_con, c);
+}
+
+static inline struct rtrs_clt_sess *to_clt_sess(struct rtrs_sess *s)
+{
+ return container_of(s, struct rtrs_clt_sess, s);
+}
+
+static inline int permit_size(struct rtrs_clt *clt)
+{
+ return sizeof(struct rtrs_permit) + clt->pdu_sz;
+}
+
+static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx)
+{
+ return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx);
+}
+
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess);
+int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess);
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+ struct rtrs_addr *addr);
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self);
+
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value);
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt);
+void free_sess(struct rtrs_clt_sess *sess);
+
+/* rtrs-clt-stats.c */
+
+int rtrs_clt_init_stats(struct rtrs_clt_stats *stats);
+
+void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *s);
+
+void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con);
+void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir);
+
+int rtrs_clt_reset_rdma_lat_distr_stats(struct rtrs_clt_stats *stats,
+ bool enable);
+ssize_t rtrs_clt_stats_rdma_lat_distr_to_str(struct rtrs_clt_stats *stats,
+ char *page, size_t len);
+int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len);
+int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len);
+int rtrs_clt_reset_wc_comp_stats(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_wc_completion_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len);
+int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable);
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
+ char *page, size_t len);
+int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *stats, bool enable);
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
+ char *page, size_t len);
+
+/* rtrs-clt-sysfs.c */
+
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt);
+void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt);
+void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt);
+
+int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess);
+void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self);
+
+#endif /* RTRS_CLT_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-log.h b/drivers/infiniband/ulp/rtrs/rtrs-log.h
new file mode 100644
index 000000000000..53c785b992f2
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-log.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RTRS_LOG_H
+#define RTRS_LOG_H
+
+#define rtrs_log(fn, obj, fmt, ...) \
+ fn("<%s>: " fmt, obj->sessname, ##__VA_ARGS__)
+
+#define rtrs_err(obj, fmt, ...) \
+ rtrs_log(pr_err, obj, fmt, ##__VA_ARGS__)
+#define rtrs_err_rl(obj, fmt, ...) \
+ rtrs_log(pr_err_ratelimited, obj, fmt, ##__VA_ARGS__)
+#define rtrs_wrn(obj, fmt, ...) \
+ rtrs_log(pr_warn, obj, fmt, ##__VA_ARGS__)
+#define rtrs_wrn_rl(obj, fmt, ...) \
+ rtrs_log(pr_warn_ratelimited, obj, fmt, ##__VA_ARGS__)
+#define rtrs_info(obj, fmt, ...) \
+ rtrs_log(pr_info, obj, fmt, ##__VA_ARGS__)
+#define rtrs_info_rl(obj, fmt, ...) \
+ rtrs_log(pr_info_ratelimited, obj, fmt, ##__VA_ARGS__)
+
+#endif /* RTRS_LOG_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
new file mode 100644
index 000000000000..0a93c87ef92b
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_PRI_H
+#define RTRS_PRI_H
+
+#include <linux/uuid.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib.h>
+
+#include "rtrs.h"
+
+#define RTRS_PROTO_VER_MAJOR 2
+#define RTRS_PROTO_VER_MINOR 0
+
+#define RTRS_PROTO_VER_STRING __stringify(RTRS_PROTO_VER_MAJOR) "." \
+ __stringify(RTRS_PROTO_VER_MINOR)
+
+enum rtrs_imm_const {
+ MAX_IMM_TYPE_BITS = 4,
+ MAX_IMM_TYPE_MASK = ((1 << MAX_IMM_TYPE_BITS) - 1),
+ MAX_IMM_PAYL_BITS = 28,
+ MAX_IMM_PAYL_MASK = ((1 << MAX_IMM_PAYL_BITS) - 1),
+};
+
+enum rtrs_imm_type {
+ RTRS_IO_REQ_IMM = 0, /* client to server */
+ RTRS_IO_RSP_IMM = 1, /* server to client */
+ RTRS_IO_RSP_W_INV_IMM = 2, /* server to client */
+
+ RTRS_HB_MSG_IMM = 8, /* HB: HeartBeat */
+ RTRS_HB_ACK_IMM = 9,
+
+ RTRS_LAST_IMM,
+};
+
+enum {
+ SERVICE_CON_QUEUE_DEPTH = 512,
+
+ MAX_PATHS_NUM = 128,
+
+ /*
+ * With the size of struct rtrs_permit allocated on the client, 4K
+ * is the maximum number of rtrs_permits we can allocate. This number is
+ * also used on the client to allocate the IU for the user connection
+ * to receive the RDMA addresses from the server.
+ */
+ MAX_SESS_QUEUE_DEPTH = 4096,
+
+ RTRS_HB_INTERVAL_MS = 5000,
+ RTRS_HB_MISSED_MAX = 5,
+
+ RTRS_MAGIC = 0x1BBD,
+ RTRS_PROTO_VER = (RTRS_PROTO_VER_MAJOR << 8) | RTRS_PROTO_VER_MINOR,
+};
+
+struct rtrs_ib_dev;
+
+struct rtrs_rdma_dev_pd_ops {
+ struct rtrs_ib_dev *(*alloc)(void);
+ void (*free)(struct rtrs_ib_dev *dev);
+ int (*init)(struct rtrs_ib_dev *dev);
+ void (*deinit)(struct rtrs_ib_dev *dev);
+};
+
+struct rtrs_rdma_dev_pd {
+ struct mutex mutex;
+ struct list_head list;
+ enum ib_pd_flags pd_flags;
+ const struct rtrs_rdma_dev_pd_ops *ops;
+};
+
+struct rtrs_ib_dev {
+ struct ib_device *ib_dev;
+ struct ib_pd *ib_pd;
+ struct kref ref;
+ struct list_head entry;
+ struct rtrs_rdma_dev_pd *pool;
+};
+
+struct rtrs_con {
+ struct rtrs_sess *sess;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct rdma_cm_id *cm_id;
+ unsigned int cid;
+};
+
+struct rtrs_sess {
+ struct list_head entry;
+ struct sockaddr_storage dst_addr;
+ struct sockaddr_storage src_addr;
+ char sessname[NAME_MAX];
+ uuid_t uuid;
+ struct rtrs_con **con;
+ unsigned int con_num;
+ unsigned int recon_cnt;
+ struct rtrs_ib_dev *dev;
+ int dev_ref;
+ struct ib_cqe *hb_cqe;
+ void (*hb_err_handler)(struct rtrs_con *con);
+ struct workqueue_struct *hb_wq;
+ struct delayed_work hb_dwork;
+ unsigned int hb_interval_ms;
+ unsigned int hb_missed_cnt;
+ unsigned int hb_missed_max;
+};
+
+/* rtrs information unit */
+struct rtrs_iu {
+ struct list_head list;
+ struct ib_cqe cqe;
+ dma_addr_t dma_addr;
+ void *buf;
+ size_t size;
+ enum dma_data_direction direction;
+};
+
+/**
+ * enum rtrs_msg_types - RTRS message types, see also rtrs/README
+ * @RTRS_MSG_INFO_REQ: Client additional info request to the server
+ * @RTRS_MSG_INFO_RSP: Server additional info response to the client
+ * @RTRS_MSG_WRITE: Client writes data per RDMA to server
+ * @RTRS_MSG_READ: Client requests data transfer from server
+ * @RTRS_MSG_RKEY_RSP: Server refreshed rkey for rbuf
+ */
+enum rtrs_msg_types {
+ RTRS_MSG_INFO_REQ,
+ RTRS_MSG_INFO_RSP,
+ RTRS_MSG_WRITE,
+ RTRS_MSG_READ,
+ RTRS_MSG_RKEY_RSP,
+};
+
+/**
+ * enum rtrs_msg_flags - RTRS message flags.
+ * @RTRS_NEED_INVAL: Send invalidation in response.
+ * @RTRS_MSG_NEW_RKEY_F: Send refreshed rkey in response.
+ */
+enum rtrs_msg_flags {
+ RTRS_MSG_NEED_INVAL_F = 1 << 0,
+ RTRS_MSG_NEW_RKEY_F = 1 << 1,
+};
+
+/**
+ * struct rtrs_sg_desc - RDMA-Buffer entry description
+ * @addr: Address of RDMA destination buffer
+ * @key: Authorization rkey to write to the buffer
+ * @len: Size of the buffer
+ */
+struct rtrs_sg_desc {
+ __le64 addr;
+ __le32 key;
+ __le32 len;
+};
+
+/**
+ * struct rtrs_msg_conn_req - Client connection request to the server
+ * @magic: RTRS magic
+ * @version: RTRS protocol version
+ * @cid: Current connection id
+ * @cid_num: Number of connections per session
+ * @recon_cnt: Reconnections counter
+ * @sess_uuid: UUID of a session (path)
+ * @paths_uuid: UUID of a group of sessions (paths)
+ *
+ * NOTE: max size 56 bytes, see man rdma_connect().
+ */
+struct rtrs_msg_conn_req {
+ /* Is set to 0 by cma.c in case of AF_IB, do not touch that.
+ * see https://www.spinics.net/lists/linux-rdma/msg22397.html
+ */
+ u8 __cma_version;
+ /* On sender side that should be set to 0, or cma_save_ip_info()
+ * extract garbage and will fail.
+ */
+ u8 __ip_version;
+ __le16 magic;
+ __le16 version;
+ __le16 cid;
+ __le16 cid_num;
+ __le16 recon_cnt;
+ uuid_t sess_uuid;
+ uuid_t paths_uuid;
+ u8 reserved[12];
+};
+
+/**
+ * struct rtrs_msg_conn_rsp - Server connection response to the client
+ * @magic: RTRS magic
+ * @version: RTRS protocol version
+ * @errno: If rdma_accept() then 0, if rdma_reject() indicates error
+ * @queue_depth: max inflight messages (queue-depth) in this session
+ * @max_io_size: max io size server supports
+ * @max_hdr_size: max msg header size server supports
+ *
+ * NOTE: size is 56 bytes, max possible is 136 bytes, see man rdma_accept().
+ */
+struct rtrs_msg_conn_rsp {
+ __le16 magic;
+ __le16 version;
+ __le16 errno;
+ __le16 queue_depth;
+ __le32 max_io_size;
+ __le32 max_hdr_size;
+ __le32 flags;
+ u8 reserved[36];
+};
+
+/**
+ * struct rtrs_msg_info_req
+ * @type: @RTRS_MSG_INFO_REQ
+ * @sessname: Session name chosen by client
+ */
+struct rtrs_msg_info_req {
+ __le16 type;
+ u8 sessname[NAME_MAX];
+ u8 reserved[15];
+};
+
+/**
+ * struct rtrs_msg_info_rsp
+ * @type: @RTRS_MSG_INFO_RSP
+ * @sg_cnt: Number of @desc entries
+ * @desc: RDMA buffers where the client can write to server
+ */
+struct rtrs_msg_info_rsp {
+ __le16 type;
+ __le16 sg_cnt;
+ u8 reserved[4];
+ struct rtrs_sg_desc desc[];
+};
+
+/**
+ * struct rtrs_msg_rkey_rsp
+ * @type: @RTRS_MSG_RKEY_RSP
+ * @buf_id: RDMA buf_id of the new rkey
+ * @rkey: new remote key for RDMA buffers id from server
+ */
+struct rtrs_msg_rkey_rsp {
+ __le16 type;
+ __le16 buf_id;
+ __le32 rkey;
+};
+
+/**
+ * struct rtrs_msg_rdma_read - RDMA data transfer request from client
+ * @type: always @RTRS_MSG_READ
+ * @usr_len: length of user payload
+ * @sg_cnt: number of @desc entries
+ * @desc: RDMA buffers where the server can write the result to
+ */
+struct rtrs_msg_rdma_read {
+ __le16 type;
+ __le16 usr_len;
+ __le16 flags;
+ __le16 sg_cnt;
+ struct rtrs_sg_desc desc[];
+};
+
+/**
+ * struct_msg_rdma_write - Message transferred to server with RDMA-Write
+ * @type: always @RTRS_MSG_WRITE
+ * @usr_len: length of user payload
+ */
+struct rtrs_msg_rdma_write {
+ __le16 type;
+ __le16 usr_len;
+};
+
+/**
+ * struct_msg_rdma_hdr - header for read or write request
+ * @type: @RTRS_MSG_WRITE | @RTRS_MSG_READ
+ */
+struct rtrs_msg_rdma_hdr {
+ __le16 type;
+};
+
+/* rtrs.c */
+
+struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t t,
+ struct ib_device *dev, enum dma_data_direction,
+ void (*done)(struct ib_cq *cq, struct ib_wc *wc));
+void rtrs_iu_free(struct rtrs_iu *iu, enum dma_data_direction dir,
+ struct ib_device *dev, u32 queue_size);
+int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu);
+int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
+ struct ib_send_wr *head);
+int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
+ struct ib_sge *sge, unsigned int num_sge,
+ u32 rkey, u64 rdma_addr, u32 imm_data,
+ enum ib_send_flags flags,
+ struct ib_send_wr *head);
+
+int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
+int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
+ u32 imm_data, enum ib_send_flags flags,
+ struct ib_send_wr *head);
+
+int rtrs_cq_qp_create(struct rtrs_sess *rtrs_sess, struct rtrs_con *con,
+ u32 max_send_sge, int cq_vector, u16 cq_size,
+ u16 wr_queue_size, enum ib_poll_context poll_ctx);
+void rtrs_cq_qp_destroy(struct rtrs_con *con);
+
+void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+ unsigned int interval_ms, unsigned int missed_max,
+ void (*err_handler)(struct rtrs_con *con),
+ struct workqueue_struct *wq);
+void rtrs_start_hb(struct rtrs_sess *sess);
+void rtrs_stop_hb(struct rtrs_sess *sess);
+void rtrs_send_hb_ack(struct rtrs_sess *sess);
+
+void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
+ struct rtrs_rdma_dev_pd *pool);
+void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool);
+
+struct rtrs_ib_dev *rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
+ struct rtrs_rdma_dev_pd *pool);
+int rtrs_ib_dev_put(struct rtrs_ib_dev *dev);
+
+static inline u32 rtrs_to_imm(u32 type, u32 payload)
+{
+ BUILD_BUG_ON(MAX_IMM_PAYL_BITS + MAX_IMM_TYPE_BITS != 32);
+ BUILD_BUG_ON(RTRS_LAST_IMM > (1<<MAX_IMM_TYPE_BITS));
+ return ((type & MAX_IMM_TYPE_MASK) << MAX_IMM_PAYL_BITS) |
+ (payload & MAX_IMM_PAYL_MASK);
+}
+
+static inline void rtrs_from_imm(u32 imm, u32 *type, u32 *payload)
+{
+ *payload = imm & MAX_IMM_PAYL_MASK;
+ *type = imm >> MAX_IMM_PAYL_BITS;
+}
+
+static inline u32 rtrs_to_io_req_imm(u32 addr)
+{
+ return rtrs_to_imm(RTRS_IO_REQ_IMM, addr);
+}
+
+static inline u32 rtrs_to_io_rsp_imm(u32 msg_id, int errno, bool w_inval)
+{
+ enum rtrs_imm_type type;
+ u32 payload;
+
+ /* 9 bits for errno, 19 bits for msg_id */
+ payload = (abs(errno) & 0x1ff) << 19 | (msg_id & 0x7ffff);
+ type = w_inval ? RTRS_IO_RSP_W_INV_IMM : RTRS_IO_RSP_IMM;
+
+ return rtrs_to_imm(type, payload);
+}
+
+static inline void rtrs_from_io_rsp_imm(u32 payload, u32 *msg_id, int *errno)
+{
+ /* 9 bits for errno, 19 bits for msg_id */
+ *msg_id = payload & 0x7ffff;
+ *errno = -(int)((payload >> 19) & 0x1ff);
+}
+
+#define STAT_STORE_FUNC(type, set_value, reset) \
+static ssize_t set_value##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int ret = -EINVAL; \
+ type *stats = container_of(kobj, type, kobj_stats); \
+ \
+ if (sysfs_streq(buf, "1")) \
+ ret = reset(stats, true); \
+ else if (sysfs_streq(buf, "0")) \
+ ret = reset(stats, false); \
+ if (ret) \
+ return ret; \
+ \
+ return count; \
+}
+
+#define STAT_SHOW_FUNC(type, get_value, print) \
+static ssize_t get_value##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ type *stats = container_of(kobj, type, kobj_stats); \
+ \
+ return print(stats, page, PAGE_SIZE); \
+}
+
+#define STAT_ATTR(type, stat, print, reset) \
+STAT_STORE_FUNC(type, stat, reset) \
+STAT_SHOW_FUNC(type, stat, print) \
+static struct kobj_attribute stat##_attr = __ATTR_RW(stat)
+
+#endif /* RTRS_PRI_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
new file mode 100644
index 000000000000..e102b1368d0c
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-srv.h"
+
+int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
+{
+ if (enable) {
+ struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+
+ memset(r, 0, sizeof(*r));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
+ char *page, size_t len)
+{
+ struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+ struct rtrs_srv_sess *sess = stats->sess;
+
+ return scnprintf(page, len, "%lld %lld %lld %lld %u\n",
+ (s64)atomic64_read(&r->dir[READ].cnt),
+ (s64)atomic64_read(&r->dir[READ].size_total),
+ (s64)atomic64_read(&r->dir[WRITE].cnt),
+ (s64)atomic64_read(&r->dir[WRITE].size_total),
+ atomic_read(&sess->ids_inflight));
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
new file mode 100644
index 000000000000..3d7877534bcc
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-pri.h"
+#include "rtrs-srv.h"
+#include "rtrs-log.h"
+
+static void rtrs_srv_release(struct kobject *kobj)
+{
+ struct rtrs_srv_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ kfree(sess);
+}
+
+static struct kobj_type ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_srv_release,
+};
+
+static ssize_t rtrs_srv_disconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_sess *s;
+ char str[MAXHOSTNAMELEN];
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ s = &sess->s;
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(s, "%s: invalid value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+
+ sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str));
+
+ rtrs_info(s, "disconnect for path %s requested\n", str);
+ close_sess(sess);
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_srv_disconnect_attr =
+ __ATTR(disconnect, 0644,
+ rtrs_srv_disconnect_show, rtrs_srv_disconnect_store);
+
+static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_con *usr_con;
+
+ sess = container_of(kobj, typeof(*sess), kobj);
+ usr_con = sess->s.con[0];
+
+ return scnprintf(page, PAGE_SIZE, "%u\n",
+ usr_con->cm_id->port_num);
+}
+
+static struct kobj_attribute rtrs_srv_hca_port_attr =
+ __ATTR(hca_port, 0444, rtrs_srv_hca_port_show, NULL);
+
+static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n",
+ sess->s.dev->ib_dev->name);
+}
+
+static struct kobj_attribute rtrs_srv_hca_name_attr =
+ __ATTR(hca_name, 0444, rtrs_srv_hca_name_show, NULL);
+
+static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_srv_src_addr_attr =
+ __ATTR(src_addr, 0444, rtrs_srv_src_addr_show, NULL);
+
+static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_srv_dst_addr_attr =
+ __ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL);
+
+static struct attribute *rtrs_srv_sess_attrs[] = {
+ &rtrs_srv_hca_name_attr.attr,
+ &rtrs_srv_hca_port_attr.attr,
+ &rtrs_srv_src_addr_attr.attr,
+ &rtrs_srv_dst_addr_attr.attr,
+ &rtrs_srv_disconnect_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_srv_sess_attr_group = {
+ .attrs = rtrs_srv_sess_attrs,
+};
+
+STAT_ATTR(struct rtrs_srv_stats, rdma,
+ rtrs_srv_stats_rdma_to_str,
+ rtrs_srv_reset_rdma_stats);
+
+static struct attribute *rtrs_srv_stats_attrs[] = {
+ &rdma_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_srv_stats_attr_group = {
+ .attrs = rtrs_srv_stats_attrs,
+};
+
+static void rtrs_srv_dev_release(struct device *dev)
+{
+ struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
+
+ kfree(srv);
+}
+
+static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ int err = 0;
+
+ mutex_lock(&srv->paths_mutex);
+ if (srv->dev_ref++) {
+ /*
+ * Device needs to be registered only on the first session
+ */
+ goto unlock;
+ }
+ srv->dev.class = rtrs_dev_class;
+ srv->dev.release = rtrs_srv_dev_release;
+ err = dev_set_name(&srv->dev, "%s", sess->s.sessname);
+ if (err)
+ goto unlock;
+
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+ */
+ dev_set_uevent_suppress(&srv->dev, true);
+ err = device_register(&srv->dev);
+ if (err) {
+ pr_err("device_register(): %d\n", err);
+ goto put;
+ }
+ srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj);
+ if (!srv->kobj_paths) {
+ err = -ENOMEM;
+ pr_err("kobject_create_and_add(): %d\n", err);
+ device_unregister(&srv->dev);
+ goto unlock;
+ }
+ dev_set_uevent_suppress(&srv->dev, false);
+ kobject_uevent(&srv->dev.kobj, KOBJ_ADD);
+ goto unlock;
+
+put:
+ put_device(&srv->dev);
+unlock:
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+
+static void
+rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+
+ mutex_lock(&srv->paths_mutex);
+ if (!--srv->dev_ref) {
+ kobject_del(srv->kobj_paths);
+ kobject_put(srv->kobj_paths);
+ mutex_unlock(&srv->paths_mutex);
+ device_unregister(&srv->dev);
+ } else {
+ mutex_unlock(&srv->paths_mutex);
+ }
+}
+
+static void rtrs_srv_sess_stats_release(struct kobject *kobj)
+{
+ struct rtrs_srv_stats *stats;
+
+ stats = container_of(kobj, struct rtrs_srv_stats, kobj_stats);
+
+ kfree(stats);
+}
+
+static struct kobj_type ktype_stats = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_srv_sess_stats_release,
+};
+
+static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
+{
+ int err;
+ struct rtrs_sess *s = &sess->s;
+
+ err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
+ &sess->kobj, "stats");
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ return err;
+ }
+ err = sysfs_create_group(&sess->stats->kobj_stats,
+ &rtrs_srv_stats_attr_group);
+ if (err) {
+ rtrs_err(s, "sysfs_create_group(): %d\n", err);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+
+ return err;
+}
+
+int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *s = &sess->s;
+ char str[NAME_MAX];
+ int err, cnt;
+
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ str, sizeof(str));
+ cnt += scnprintf(str + cnt, sizeof(str) - cnt, "@");
+ sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ str + cnt, sizeof(str) - cnt);
+
+ err = rtrs_srv_create_once_sysfs_root_folders(sess);
+ if (err)
+ return err;
+
+ err = kobject_init_and_add(&sess->kobj, &ktype, srv->kobj_paths,
+ "%s", str);
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ goto destroy_root;
+ }
+ err = sysfs_create_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ if (err) {
+ rtrs_err(s, "sysfs_create_group(): %d\n", err);
+ goto put_kobj;
+ }
+ err = rtrs_srv_create_stats_files(sess);
+ if (err)
+ goto remove_group;
+
+ return 0;
+
+remove_group:
+ sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+put_kobj:
+ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+destroy_root:
+ rtrs_srv_destroy_once_sysfs_root_folders(sess);
+
+ return err;
+}
+
+void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess)
+{
+ if (sess->kobj.state_in_sysfs) {
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+
+ rtrs_srv_destroy_once_sysfs_root_folders(sess);
+ }
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
new file mode 100644
index 000000000000..0d9241f5d9e6
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -0,0 +1,2178 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+
+#include "rtrs-srv.h"
+#include "rtrs-log.h"
+#include <rdma/ib_cm.h>
+
+MODULE_DESCRIPTION("RDMA Transport Server");
+MODULE_LICENSE("GPL");
+
+/* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */
+#define DEFAULT_MAX_CHUNK_SIZE (128 << 10)
+#define DEFAULT_SESS_QUEUE_DEPTH 512
+#define MAX_HDR_SIZE PAGE_SIZE
+
+/* We guarantee to serve 10 paths at least */
+#define CHUNK_POOL_SZ 10
+
+static struct rtrs_rdma_dev_pd dev_pd;
+static mempool_t *chunk_pool;
+struct class *rtrs_dev_class;
+
+static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
+static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
+
+static bool always_invalidate = true;
+module_param(always_invalidate, bool, 0444);
+MODULE_PARM_DESC(always_invalidate,
+ "Invalidate memory registration for contiguous memory regions before accessing.");
+
+module_param_named(max_chunk_size, max_chunk_size, int, 0444);
+MODULE_PARM_DESC(max_chunk_size,
+ "Max size for each IO request, when change the unit is in byte (default: "
+ __stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)");
+
+module_param_named(sess_queue_depth, sess_queue_depth, int, 0444);
+MODULE_PARM_DESC(sess_queue_depth,
+ "Number of buffers for pending I/O requests to allocate per session. Maximum: "
+ __stringify(MAX_SESS_QUEUE_DEPTH) " (default: "
+ __stringify(DEFAULT_SESS_QUEUE_DEPTH) ")");
+
+static cpumask_t cq_affinity_mask = { CPU_BITS_ALL };
+
+static struct workqueue_struct *rtrs_wq;
+
+static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
+{
+ return container_of(c, struct rtrs_srv_con, c);
+}
+
+static inline struct rtrs_srv_sess *to_srv_sess(struct rtrs_sess *s)
+{
+ return container_of(s, struct rtrs_srv_sess, s);
+}
+
+static bool __rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state)
+{
+ enum rtrs_srv_state old_state;
+ bool changed = false;
+
+ lockdep_assert_held(&sess->state_lock);
+ old_state = sess->state;
+ switch (new_state) {
+ case RTRS_SRV_CONNECTED:
+ switch (old_state) {
+ case RTRS_SRV_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_SRV_CLOSING:
+ switch (old_state) {
+ case RTRS_SRV_CONNECTING:
+ case RTRS_SRV_CONNECTED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_SRV_CLOSED:
+ switch (old_state) {
+ case RTRS_SRV_CLOSING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (changed)
+ sess->state = new_state;
+
+ return changed;
+}
+
+static bool rtrs_srv_change_state_get_old(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state,
+ enum rtrs_srv_state *old_state)
+{
+ bool changed;
+
+ spin_lock_irq(&sess->state_lock);
+ *old_state = sess->state;
+ changed = __rtrs_srv_change_state(sess, new_state);
+ spin_unlock_irq(&sess->state_lock);
+
+ return changed;
+}
+
+static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state)
+{
+ enum rtrs_srv_state old_state;
+
+ return rtrs_srv_change_state_get_old(sess, new_state, &old_state);
+}
+
+static void free_id(struct rtrs_srv_op *id)
+{
+ if (!id)
+ return;
+ kfree(id);
+}
+
+static void rtrs_srv_free_ops_ids(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ int i;
+
+ WARN_ON(atomic_read(&sess->ids_inflight));
+ if (sess->ops_ids) {
+ for (i = 0; i < srv->queue_depth; i++)
+ free_id(sess->ops_ids[i]);
+ kfree(sess->ops_ids);
+ sess->ops_ids = NULL;
+ }
+}
+
+static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static struct ib_cqe io_comp_cqe = {
+ .done = rtrs_srv_rdma_done
+};
+
+static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_op *id;
+ int i;
+
+ sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids),
+ GFP_KERNEL);
+ if (!sess->ops_ids)
+ goto err;
+
+ for (i = 0; i < srv->queue_depth; ++i) {
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ goto err;
+
+ sess->ops_ids[i] = id;
+ }
+ init_waitqueue_head(&sess->ids_waitq);
+ atomic_set(&sess->ids_inflight, 0);
+
+ return 0;
+
+err:
+ rtrs_srv_free_ops_ids(sess);
+ return -ENOMEM;
+}
+
+static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess)
+{
+ atomic_inc(&sess->ids_inflight);
+}
+
+static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess)
+{
+ if (atomic_dec_and_test(&sess->ids_inflight))
+ wake_up(&sess->ids_waitq);
+}
+
+static void rtrs_srv_wait_ops_ids(struct rtrs_srv_sess *sess)
+{
+ wait_event(sess->ids_waitq, !atomic_read(&sess->ids_inflight));
+}
+
+
+static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "REG MR failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_sess(sess);
+ return;
+ }
+}
+
+static struct ib_cqe local_reg_cqe = {
+ .done = rtrs_srv_reg_mr_done
+};
+
+static int rdma_write_sg(struct rtrs_srv_op *id)
+{
+ struct rtrs_sess *s = id->con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
+ struct rtrs_srv_mr *srv_mr;
+ struct rtrs_srv *srv = sess->srv;
+ struct ib_send_wr inv_wr, imm_wr;
+ struct ib_rdma_wr *wr = NULL;
+ enum ib_send_flags flags;
+ size_t sg_cnt;
+ int err, offset;
+ bool need_inval;
+ u32 rkey = 0;
+ struct ib_reg_wr rwr;
+ struct ib_sge *plist;
+ struct ib_sge list;
+
+ sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
+ need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
+ if (unlikely(sg_cnt != 1))
+ return -EINVAL;
+
+ offset = 0;
+
+ wr = &id->tx_wr;
+ plist = &id->tx_sg;
+ plist->addr = dma_addr + offset;
+ plist->length = le32_to_cpu(id->rd_msg->desc[0].len);
+
+ /* WR will fail with length error
+ * if this is 0
+ */
+ if (unlikely(plist->length == 0)) {
+ rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
+ return -EINVAL;
+ }
+
+ plist->lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ offset += plist->length;
+
+ wr->wr.sg_list = plist;
+ wr->wr.num_sge = 1;
+ wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr);
+ wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key);
+ if (rkey == 0)
+ rkey = wr->rkey;
+ else
+ /* Only one key is actually used */
+ WARN_ON_ONCE(rkey != wr->rkey);
+
+ wr->wr.opcode = IB_WR_RDMA_WRITE;
+ wr->wr.ex.imm_data = 0;
+ wr->wr.send_flags = 0;
+
+ if (need_inval && always_invalidate) {
+ wr->wr.next = &rwr.wr;
+ rwr.wr.next = &inv_wr;
+ inv_wr.next = &imm_wr;
+ } else if (always_invalidate) {
+ wr->wr.next = &rwr.wr;
+ rwr.wr.next = &imm_wr;
+ } else if (need_inval) {
+ wr->wr.next = &inv_wr;
+ inv_wr.next = &imm_wr;
+ } else {
+ wr->wr.next = &imm_wr;
+ }
+ /*
+ * From time to time we have to post signaled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = (atomic_inc_return(&id->con->wr_cnt) % srv->queue_depth) ?
+ 0 : IB_SEND_SIGNALED;
+
+ if (need_inval) {
+ inv_wr.sg_list = NULL;
+ inv_wr.num_sge = 0;
+ inv_wr.opcode = IB_WR_SEND_WITH_INV;
+ inv_wr.send_flags = 0;
+ inv_wr.ex.invalidate_rkey = rkey;
+ }
+
+ imm_wr.next = NULL;
+ if (always_invalidate) {
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &sess->mrs[id->msg_id];
+ rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.num_sge = 0;
+ rwr.mr = srv_mr->mr;
+ rwr.wr.send_flags = 0;
+ rwr.key = srv_mr->mr->rkey;
+ rwr.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ msg = srv_mr->iu->buf;
+ msg->buf_id = cpu_to_le16(id->msg_id);
+ msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
+ msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
+
+ list.addr = srv_mr->iu->dma_addr;
+ list.length = sizeof(*msg);
+ list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ imm_wr.sg_list = &list;
+ imm_wr.num_sge = 1;
+ imm_wr.opcode = IB_WR_SEND_WITH_IMM;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ srv_mr->iu->dma_addr,
+ srv_mr->iu->size, DMA_TO_DEVICE);
+ } else {
+ imm_wr.sg_list = NULL;
+ imm_wr.num_sge = 0;
+ imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ }
+ imm_wr.send_flags = flags;
+ imm_wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
+ 0, need_inval));
+
+ imm_wr.wr_cqe = &io_comp_cqe;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr,
+ offset, DMA_BIDIRECTIONAL);
+
+ err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
+ if (unlikely(err))
+ rtrs_err(s,
+ "Posting RDMA-Write-Request to QP failed, err: %d\n",
+ err);
+
+ return err;
+}
+
+/**
+ * send_io_resp_imm() - respond to client with empty IMM on failed READ/WRITE
+ * requests or on successful WRITE request.
+ * @con: the connection to send back result
+ * @id: the id associated with the IO
+ * @errno: the error number of the IO.
+ *
+ * Return 0 on success, errno otherwise.
+ */
+static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ int errno)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct ib_send_wr inv_wr, imm_wr, *wr = NULL;
+ struct ib_reg_wr rwr;
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_mr *srv_mr;
+ bool need_inval = false;
+ enum ib_send_flags flags;
+ u32 imm;
+ int err;
+
+ if (id->dir == READ) {
+ struct rtrs_msg_rdma_read *rd_msg = id->rd_msg;
+ size_t sg_cnt;
+
+ need_inval = le16_to_cpu(rd_msg->flags) &
+ RTRS_MSG_NEED_INVAL_F;
+ sg_cnt = le16_to_cpu(rd_msg->sg_cnt);
+
+ if (need_inval) {
+ if (likely(sg_cnt)) {
+ inv_wr.sg_list = NULL;
+ inv_wr.num_sge = 0;
+ inv_wr.opcode = IB_WR_SEND_WITH_INV;
+ inv_wr.send_flags = 0;
+ /* Only one key is actually used */
+ inv_wr.ex.invalidate_rkey =
+ le32_to_cpu(rd_msg->desc[0].key);
+ } else {
+ WARN_ON_ONCE(1);
+ need_inval = false;
+ }
+ }
+ }
+
+ if (need_inval && always_invalidate) {
+ wr = &inv_wr;
+ inv_wr.next = &rwr.wr;
+ rwr.wr.next = &imm_wr;
+ } else if (always_invalidate) {
+ wr = &rwr.wr;
+ rwr.wr.next = &imm_wr;
+ } else if (need_inval) {
+ wr = &inv_wr;
+ inv_wr.next = &imm_wr;
+ } else {
+ wr = &imm_wr;
+ }
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = (atomic_inc_return(&con->wr_cnt) % srv->queue_depth) ?
+ 0 : IB_SEND_SIGNALED;
+ imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
+ imm_wr.next = NULL;
+ if (always_invalidate) {
+ struct ib_sge list;
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &sess->mrs[id->msg_id];
+ rwr.wr.next = &imm_wr;
+ rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.num_sge = 0;
+ rwr.wr.send_flags = 0;
+ rwr.mr = srv_mr->mr;
+ rwr.key = srv_mr->mr->rkey;
+ rwr.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ msg = srv_mr->iu->buf;
+ msg->buf_id = cpu_to_le16(id->msg_id);
+ msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
+ msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
+
+ list.addr = srv_mr->iu->dma_addr;
+ list.length = sizeof(*msg);
+ list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ imm_wr.sg_list = &list;
+ imm_wr.num_sge = 1;
+ imm_wr.opcode = IB_WR_SEND_WITH_IMM;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ srv_mr->iu->dma_addr,
+ srv_mr->iu->size, DMA_TO_DEVICE);
+ } else {
+ imm_wr.sg_list = NULL;
+ imm_wr.num_sge = 0;
+ imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ }
+ imm_wr.send_flags = flags;
+ imm_wr.wr_cqe = &io_comp_cqe;
+
+ imm_wr.ex.imm_data = cpu_to_be32(imm);
+
+ err = ib_post_send(id->con->c.qp, wr, NULL);
+ if (unlikely(err))
+ rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n",
+ err);
+
+ return err;
+}
+
+void close_sess(struct rtrs_srv_sess *sess)
+{
+ enum rtrs_srv_state old_state;
+
+ if (rtrs_srv_change_state_get_old(sess, RTRS_SRV_CLOSING,
+ &old_state))
+ queue_work(rtrs_wq, &sess->close_work);
+ WARN_ON(sess->state != RTRS_SRV_CLOSING);
+}
+
+static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
+{
+ switch (state) {
+ case RTRS_SRV_CONNECTING:
+ return "RTRS_SRV_CONNECTING";
+ case RTRS_SRV_CONNECTED:
+ return "RTRS_SRV_CONNECTED";
+ case RTRS_SRV_CLOSING:
+ return "RTRS_SRV_CLOSING";
+ case RTRS_SRV_CLOSED:
+ return "RTRS_SRV_CLOSED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+/**
+ * rtrs_srv_resp_rdma() - Finish an RDMA request
+ *
+ * @id: Internal RTRS operation identifier
+ * @status: Response Code sent to the other side for this operation.
+ * 0 = success, <=0 error
+ * Context: any
+ *
+ * Finish a RDMA operation. A message is sent to the client and the
+ * corresponding memory areas will be released.
+ */
+bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_srv_con *con;
+ struct rtrs_sess *s;
+ int err;
+
+ if (WARN_ON(!id))
+ return true;
+
+ con = id->con;
+ s = con->c.sess;
+ sess = to_srv_sess(s);
+
+ id->status = status;
+
+ if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ rtrs_err_rl(s,
+ "Sending I/O response failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(sess->state));
+ goto out;
+ }
+ if (always_invalidate) {
+ struct rtrs_srv_mr *mr = &sess->mrs[id->msg_id];
+
+ ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
+ }
+ if (unlikely(atomic_sub_return(1,
+ &con->sq_wr_avail) < 0)) {
+ pr_err("IB send queue full\n");
+ atomic_add(1, &con->sq_wr_avail);
+ spin_lock(&con->rsp_wr_wait_lock);
+ list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
+ spin_unlock(&con->rsp_wr_wait_lock);
+ return false;
+ }
+
+ if (status || id->dir == WRITE || !id->rd_msg->sg_cnt)
+ err = send_io_resp_imm(con, id, status);
+ else
+ err = rdma_write_sg(id);
+
+ if (unlikely(err)) {
+ rtrs_err_rl(s, "IO response failed: %d\n", err);
+ close_sess(sess);
+ }
+out:
+ rtrs_srv_put_ops_ids(sess);
+ return true;
+}
+EXPORT_SYMBOL(rtrs_srv_resp_rdma);
+
+/**
+ * rtrs_srv_set_sess_priv() - Set private pointer in rtrs_srv.
+ * @srv: Session pointer
+ * @priv: The private pointer that is associated with the session.
+ */
+void rtrs_srv_set_sess_priv(struct rtrs_srv *srv, void *priv)
+{
+ srv->priv = priv;
+}
+EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
+
+static void unmap_cont_bufs(struct rtrs_srv_sess *sess)
+{
+ int i;
+
+ for (i = 0; i < sess->mrs_num; i++) {
+ struct rtrs_srv_mr *srv_mr;
+
+ srv_mr = &sess->mrs[i];
+ rtrs_iu_free(srv_mr->iu, DMA_TO_DEVICE,
+ sess->s.dev->ib_dev, 1);
+ ib_dereg_mr(srv_mr->mr);
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl,
+ srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
+ sg_free_table(&srv_mr->sgt);
+ }
+ kfree(sess->mrs);
+}
+
+static int map_cont_bufs(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *ss = &sess->s;
+ int i, mri, err, mrs_num;
+ unsigned int chunk_bits;
+ int chunks_per_mr = 1;
+
+ /*
+ * Here we map queue_depth chunks to MR. Firstly we have to
+ * figure out how many chunks can we map per MR.
+ */
+ if (always_invalidate) {
+ /*
+ * in order to do invalidate for each chunks of memory, we needs
+ * more memory regions.
+ */
+ mrs_num = srv->queue_depth;
+ } else {
+ chunks_per_mr =
+ sess->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
+ mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
+ chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
+ }
+
+ sess->mrs = kcalloc(mrs_num, sizeof(*sess->mrs), GFP_KERNEL);
+ if (!sess->mrs)
+ return -ENOMEM;
+
+ sess->mrs_num = mrs_num;
+
+ for (mri = 0; mri < mrs_num; mri++) {
+ struct rtrs_srv_mr *srv_mr = &sess->mrs[mri];
+ struct sg_table *sgt = &srv_mr->sgt;
+ struct scatterlist *s;
+ struct ib_mr *mr;
+ int nr, chunks;
+
+ chunks = chunks_per_mr * mri;
+ if (!always_invalidate)
+ chunks_per_mr = min_t(int, chunks_per_mr,
+ srv->queue_depth - chunks);
+
+ err = sg_alloc_table(sgt, chunks_per_mr, GFP_KERNEL);
+ if (err)
+ goto err;
+
+ for_each_sg(sgt->sgl, s, chunks_per_mr, i)
+ sg_set_page(s, srv->chunks[chunks + i],
+ max_chunk_size, 0);
+
+ nr = ib_dma_map_sg(sess->s.dev->ib_dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL);
+ if (nr < sgt->nents) {
+ err = nr < 0 ? nr : -EINVAL;
+ goto free_sg;
+ }
+ mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+ sgt->nents);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto unmap_sg;
+ }
+ nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
+ NULL, max_chunk_size);
+ if (nr < 0 || nr < sgt->nents) {
+ err = nr < 0 ? nr : -EINVAL;
+ goto dereg_mr;
+ }
+
+ if (always_invalidate) {
+ srv_mr->iu = rtrs_iu_alloc(1,
+ sizeof(struct rtrs_msg_rkey_rsp),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_TO_DEVICE, rtrs_srv_rdma_done);
+ if (!srv_mr->iu) {
+ err = -ENOMEM;
+ rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
+ goto free_iu;
+ }
+ }
+ /* Eventually dma addr for each chunk can be cached */
+ for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+ sess->dma_addr[chunks + i] = sg_dma_address(s);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+ srv_mr->mr = mr;
+
+ continue;
+err:
+ while (mri--) {
+ srv_mr = &sess->mrs[mri];
+ sgt = &srv_mr->sgt;
+ mr = srv_mr->mr;
+free_iu:
+ rtrs_iu_free(srv_mr->iu, DMA_TO_DEVICE,
+ sess->s.dev->ib_dev, 1);
+dereg_mr:
+ ib_dereg_mr(mr);
+unmap_sg:
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL);
+free_sg:
+ sg_free_table(sgt);
+ }
+ kfree(sess->mrs);
+
+ return err;
+ }
+
+ chunk_bits = ilog2(srv->queue_depth - 1) + 1;
+ sess->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
+
+ return 0;
+}
+
+static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
+{
+ close_sess(to_srv_sess(c->sess));
+}
+
+static void rtrs_srv_init_hb(struct rtrs_srv_sess *sess)
+{
+ rtrs_init_hb(&sess->s, &io_comp_cqe,
+ RTRS_HB_INTERVAL_MS,
+ RTRS_HB_MISSED_MAX,
+ rtrs_srv_hb_err_handler,
+ rtrs_wq);
+}
+
+static void rtrs_srv_start_hb(struct rtrs_srv_sess *sess)
+{
+ rtrs_start_hb(&sess->s);
+}
+
+static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess)
+{
+ rtrs_stop_hb(&sess->s);
+}
+
+static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_iu *iu;
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ rtrs_iu_free(iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "Sess info response send failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_sess(sess);
+ return;
+ }
+ WARN_ON(wc->opcode != IB_WC_SEND);
+}
+
+static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ int up;
+
+ mutex_lock(&srv->paths_ev_mutex);
+ up = ++srv->paths_up;
+ if (up == 1)
+ ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
+ mutex_unlock(&srv->paths_ev_mutex);
+
+ /* Mark session as established */
+ sess->established = true;
+}
+
+static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+
+ if (!sess->established)
+ return;
+
+ sess->established = false;
+ mutex_lock(&srv->paths_ev_mutex);
+ WARN_ON(!srv->paths_up);
+ if (--srv->paths_up == 0)
+ ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv);
+ mutex_unlock(&srv->paths_ev_mutex);
+}
+
+static int post_recv_sess(struct rtrs_srv_sess *sess);
+
+static int process_info_req(struct rtrs_srv_con *con,
+ struct rtrs_msg_info_req *msg)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct ib_send_wr *reg_wr = NULL;
+ struct rtrs_msg_info_rsp *rsp;
+ struct rtrs_iu *tx_iu;
+ struct ib_reg_wr *rwr;
+ int mri, err;
+ size_t tx_sz;
+
+ err = post_recv_sess(sess);
+ if (unlikely(err)) {
+ rtrs_err(s, "post_recv_sess(), err: %d\n", err);
+ return err;
+ }
+ rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL);
+ if (unlikely(!rwr))
+ return -ENOMEM;
+ strlcpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname));
+
+ tx_sz = sizeof(*rsp);
+ tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num;
+ tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
+ if (unlikely(!tx_iu)) {
+ err = -ENOMEM;
+ goto rwr_free;
+ }
+
+ rsp = tx_iu->buf;
+ rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
+ rsp->sg_cnt = cpu_to_le16(sess->mrs_num);
+
+ for (mri = 0; mri < sess->mrs_num; mri++) {
+ struct ib_mr *mr = sess->mrs[mri].mr;
+
+ rsp->desc[mri].addr = cpu_to_le64(mr->iova);
+ rsp->desc[mri].key = cpu_to_le32(mr->rkey);
+ rsp->desc[mri].len = cpu_to_le32(mr->length);
+
+ /*
+ * Fill in reg MR request and chain them *backwards*
+ */
+ rwr[mri].wr.next = mri ? &rwr[mri - 1].wr : NULL;
+ rwr[mri].wr.opcode = IB_WR_REG_MR;
+ rwr[mri].wr.wr_cqe = &local_reg_cqe;
+ rwr[mri].wr.num_sge = 0;
+ rwr[mri].wr.send_flags = mri ? 0 : IB_SEND_SIGNALED;
+ rwr[mri].mr = mr;
+ rwr[mri].key = mr->rkey;
+ rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ reg_wr = &rwr[mri].wr;
+ }
+
+ err = rtrs_srv_create_sess_files(sess);
+ if (unlikely(err))
+ goto iu_free;
+ kobject_get(&sess->kobj);
+ get_device(&sess->srv->dev);
+ rtrs_srv_change_state(sess, RTRS_SRV_CONNECTED);
+ rtrs_srv_start_hb(sess);
+
+ /*
+ * We do not account number of established connections at the current
+ * moment, we rely on the client, which should send info request when
+ * all connections are successfully established. Thus, simply notify
+ * listener with a proper event if we are the first path.
+ */
+ rtrs_srv_sess_up(sess);
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ tx_iu->size, DMA_TO_DEVICE);
+
+ /* Send info response */
+ err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
+iu_free:
+ rtrs_iu_free(tx_iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ }
+rwr_free:
+ kfree(rwr);
+
+ return err;
+}
+
+static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_msg_info_req *msg;
+ struct rtrs_iu *iu;
+ int err;
+
+ WARN_ON(con->c.cid);
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "Sess info request receive failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ goto close;
+ }
+ WARN_ON(wc->opcode != IB_WC_RECV);
+
+ if (unlikely(wc->byte_len < sizeof(*msg))) {
+ rtrs_err(s, "Sess info request is malformed: size %d\n",
+ wc->byte_len);
+ goto close;
+ }
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ)) {
+ rtrs_err(s, "Sess info request is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto close;
+ }
+ err = process_info_req(con, msg);
+ if (unlikely(err))
+ goto close;
+
+out:
+ rtrs_iu_free(iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ return;
+close:
+ close_sess(sess);
+ goto out;
+}
+
+static int post_recv_info_req(struct rtrs_srv_con *con)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_iu *rx_iu;
+ int err;
+
+ rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE, rtrs_srv_info_req_done);
+ if (unlikely(!rx_iu))
+ return -ENOMEM;
+ /* Prepare for getting info response */
+ err = rtrs_iu_post_recv(&con->c, rx_iu);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
+ rtrs_iu_free(rx_iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
+{
+ int i, err;
+
+ for (i = 0; i < q_size; i++) {
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (unlikely(err))
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_sess(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *s = &sess->s;
+ size_t q_size;
+ int err, cid;
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (cid == 0)
+ q_size = SERVICE_CON_QUEUE_DEPTH;
+ else
+ q_size = srv->queue_depth;
+
+ err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size);
+ if (unlikely(err)) {
+ rtrs_err(s, "post_recv_io(), err: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void process_read(struct rtrs_srv_con *con,
+ struct rtrs_msg_rdma_read *msg,
+ u32 buf_id, u32 off)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ struct rtrs_srv_op *id;
+
+ size_t usr_len, data_len;
+ void *data;
+ int ret;
+
+ if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ rtrs_err_rl(s,
+ "Processing read request failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(sess->state));
+ return;
+ }
+ if (unlikely(msg->sg_cnt != 1 && msg->sg_cnt != 0)) {
+ rtrs_err_rl(s,
+ "Processing read request failed, invalid message\n");
+ return;
+ }
+ rtrs_srv_get_ops_ids(sess);
+ rtrs_srv_update_rdma_stats(sess->stats, off, READ);
+ id = sess->ops_ids[buf_id];
+ id->con = con;
+ id->dir = READ;
+ id->msg_id = buf_id;
+ id->rd_msg = msg;
+ usr_len = le16_to_cpu(msg->usr_len);
+ data_len = off - usr_len;
+ data = page_address(srv->chunks[buf_id]);
+ ret = ctx->ops.rdma_ev(srv, srv->priv, id, READ, data, data_len,
+ data + data_len, usr_len);
+
+ if (unlikely(ret)) {
+ rtrs_err_rl(s,
+ "Processing read request failed, user module cb reported for msg_id %d, err: %d\n",
+ buf_id, ret);
+ goto send_err_msg;
+ }
+
+ return;
+
+send_err_msg:
+ ret = send_io_resp_imm(con, id, ret);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
+ buf_id, ret);
+ close_sess(sess);
+ }
+ rtrs_srv_put_ops_ids(sess);
+}
+
+static void process_write(struct rtrs_srv_con *con,
+ struct rtrs_msg_rdma_write *req,
+ u32 buf_id, u32 off)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ struct rtrs_srv_op *id;
+
+ size_t data_len, usr_len;
+ void *data;
+ int ret;
+
+ if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ rtrs_err_rl(s,
+ "Processing write request failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(sess->state));
+ return;
+ }
+ rtrs_srv_get_ops_ids(sess);
+ rtrs_srv_update_rdma_stats(sess->stats, off, WRITE);
+ id = sess->ops_ids[buf_id];
+ id->con = con;
+ id->dir = WRITE;
+ id->msg_id = buf_id;
+
+ usr_len = le16_to_cpu(req->usr_len);
+ data_len = off - usr_len;
+ data = page_address(srv->chunks[buf_id]);
+ ret = ctx->ops.rdma_ev(srv, srv->priv, id, WRITE, data, data_len,
+ data + data_len, usr_len);
+ if (unlikely(ret)) {
+ rtrs_err_rl(s,
+ "Processing write request failed, user module callback reports err: %d\n",
+ ret);
+ goto send_err_msg;
+ }
+
+ return;
+
+send_err_msg:
+ ret = send_io_resp_imm(con, id, ret);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
+ buf_id, ret);
+ close_sess(sess);
+ }
+ rtrs_srv_put_ops_ids(sess);
+}
+
+static void process_io_req(struct rtrs_srv_con *con, void *msg,
+ u32 id, u32 off)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_msg_rdma_hdr *hdr;
+ unsigned int type;
+
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, sess->dma_addr[id],
+ max_chunk_size, DMA_BIDIRECTIONAL);
+ hdr = msg;
+ type = le16_to_cpu(hdr->type);
+
+ switch (type) {
+ case RTRS_MSG_WRITE:
+ process_write(con, msg, id, off);
+ break;
+ case RTRS_MSG_READ:
+ process_read(con, msg, id, off);
+ break;
+ default:
+ rtrs_err(s,
+ "Processing I/O request failed, unknown message type received: 0x%02x\n",
+ type);
+ goto err;
+ }
+
+ return;
+
+err:
+ close_sess(sess);
+}
+
+static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_mr *mr =
+ container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ u32 msg_id, off;
+ void *data;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_sess(sess);
+ }
+ msg_id = mr->msg_id;
+ off = mr->msg_off;
+ data = page_address(srv->chunks[msg_id]) + off;
+ process_io_req(con, data, msg_id, off);
+}
+
+static int rtrs_srv_inv_rkey(struct rtrs_srv_con *con,
+ struct rtrs_srv_mr *mr)
+{
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &mr->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = mr->mr->rkey,
+ };
+ mr->inv_cqe.done = rtrs_srv_inv_rkey_done;
+
+ return ib_post_send(con->c.qp, &wr, NULL);
+}
+
+static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
+{
+ spin_lock(&con->rsp_wr_wait_lock);
+ while (!list_empty(&con->rsp_wr_wait_list)) {
+ struct rtrs_srv_op *id;
+ int ret;
+
+ id = list_entry(con->rsp_wr_wait_list.next,
+ struct rtrs_srv_op, wait_list);
+ list_del(&id->wait_list);
+
+ spin_unlock(&con->rsp_wr_wait_lock);
+ ret = rtrs_srv_resp_rdma(id, id->status);
+ spin_lock(&con->rsp_wr_wait_lock);
+
+ if (!ret) {
+ list_add(&id->wait_list, &con->rsp_wr_wait_list);
+ break;
+ }
+ }
+ spin_unlock(&con->rsp_wr_wait_lock);
+}
+
+static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ u32 imm_type, imm_payload;
+ int err;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ rtrs_err(s,
+ "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
+ ib_wc_status_msg(wc->status), wc->wr_cqe,
+ wc->opcode, wc->vendor_err, wc->byte_len);
+ close_sess(sess);
+ }
+ return;
+ }
+
+ switch (wc->opcode) {
+ case IB_WC_RECV_RDMA_WITH_IMM:
+ /*
+ * post_recv() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
+ return;
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
+ close_sess(sess);
+ break;
+ }
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
+ &imm_type, &imm_payload);
+ if (likely(imm_type == RTRS_IO_REQ_IMM)) {
+ u32 msg_id, off;
+ void *data;
+
+ msg_id = imm_payload >> sess->mem_bits;
+ off = imm_payload & ((1 << sess->mem_bits) - 1);
+ if (unlikely(msg_id >= srv->queue_depth ||
+ off >= max_chunk_size)) {
+ rtrs_err(s, "Wrong msg_id %u, off %u\n",
+ msg_id, off);
+ close_sess(sess);
+ return;
+ }
+ if (always_invalidate) {
+ struct rtrs_srv_mr *mr = &sess->mrs[msg_id];
+
+ mr->msg_off = off;
+ mr->msg_id = msg_id;
+ err = rtrs_srv_inv_rkey(con, mr);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_post_recv(), err: %d\n",
+ err);
+ close_sess(sess);
+ break;
+ }
+ } else {
+ data = page_address(srv->chunks[msg_id]) + off;
+ process_io_req(con, data, msg_id, off);
+ }
+ } else if (imm_type == RTRS_HB_MSG_IMM) {
+ WARN_ON(con->c.cid);
+ rtrs_send_hb_ack(&sess->s);
+ } else if (imm_type == RTRS_HB_ACK_IMM) {
+ WARN_ON(con->c.cid);
+ sess->s.hb_missed_cnt = 0;
+ } else {
+ rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
+ }
+ break;
+ case IB_WC_RDMA_WRITE:
+ case IB_WC_SEND:
+ /*
+ * post_send() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ atomic_add(srv->queue_depth, &con->sq_wr_avail);
+
+ if (unlikely(!list_empty_careful(&con->rsp_wr_wait_list)))
+ rtrs_rdma_process_wr_wait_list(con);
+
+ break;
+ default:
+ rtrs_wrn(s, "Unexpected WC type: %d\n", wc->opcode);
+ return;
+ }
+}
+
+/**
+ * rtrs_srv_get_sess_name() - Get rtrs_srv peer hostname.
+ * @srv: Session
+ * @sessname: Sessname buffer
+ * @len: Length of sessname buffer
+ */
+int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
+{
+ struct rtrs_srv_sess *sess;
+ int err = -ENOTCONN;
+
+ mutex_lock(&srv->paths_mutex);
+ list_for_each_entry(sess, &srv->paths_list, s.entry) {
+ if (sess->state != RTRS_SRV_CONNECTED)
+ continue;
+ strlcpy(sessname, sess->s.sessname,
+ min_t(size_t, sizeof(sess->s.sessname), len));
+ err = 0;
+ break;
+ }
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL(rtrs_srv_get_sess_name);
+
+/**
+ * rtrs_srv_get_sess_qdepth() - Get rtrs_srv qdepth.
+ * @srv: Session
+ */
+int rtrs_srv_get_queue_depth(struct rtrs_srv *srv)
+{
+ return srv->queue_depth;
+}
+EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
+
+static int find_next_bit_ring(struct rtrs_srv_sess *sess)
+{
+ struct ib_device *ib_dev = sess->s.dev->ib_dev;
+ int v;
+
+ v = cpumask_next(sess->cur_cq_vector, &cq_affinity_mask);
+ if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
+ v = cpumask_first(&cq_affinity_mask);
+ return v;
+}
+
+static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess)
+{
+ sess->cur_cq_vector = find_next_bit_ring(sess);
+
+ return sess->cur_cq_vector;
+}
+
+static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
+{
+ struct rtrs_srv *srv;
+ int i;
+
+ srv = kzalloc(sizeof(*srv), GFP_KERNEL);
+ if (!srv)
+ return NULL;
+
+ refcount_set(&srv->refcount, 1);
+ INIT_LIST_HEAD(&srv->paths_list);
+ mutex_init(&srv->paths_mutex);
+ mutex_init(&srv->paths_ev_mutex);
+ uuid_copy(&srv->paths_uuid, paths_uuid);
+ srv->queue_depth = sess_queue_depth;
+ srv->ctx = ctx;
+
+ srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
+ GFP_KERNEL);
+ if (!srv->chunks)
+ goto err_free_srv;
+
+ for (i = 0; i < srv->queue_depth; i++) {
+ srv->chunks[i] = mempool_alloc(chunk_pool, GFP_KERNEL);
+ if (!srv->chunks[i])
+ goto err_free_chunks;
+ }
+ list_add(&srv->ctx_list, &ctx->srv_list);
+
+ return srv;
+
+err_free_chunks:
+ while (i--)
+ mempool_free(srv->chunks[i], chunk_pool);
+ kfree(srv->chunks);
+
+err_free_srv:
+ kfree(srv);
+
+ return NULL;
+}
+
+static void free_srv(struct rtrs_srv *srv)
+{
+ int i;
+
+ WARN_ON(refcount_read(&srv->refcount));
+ for (i = 0; i < srv->queue_depth; i++)
+ mempool_free(srv->chunks[i], chunk_pool);
+ kfree(srv->chunks);
+ mutex_destroy(&srv->paths_mutex);
+ mutex_destroy(&srv->paths_ev_mutex);
+ /* last put to release the srv structure */
+ put_device(&srv->dev);
+}
+
+static inline struct rtrs_srv *__find_srv_and_get(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
+{
+ struct rtrs_srv *srv;
+
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
+ if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
+ refcount_inc_not_zero(&srv->refcount))
+ return srv;
+ }
+
+ return NULL;
+}
+
+static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
+{
+ struct rtrs_srv *srv;
+
+ mutex_lock(&ctx->srv_mutex);
+ srv = __find_srv_and_get(ctx, paths_uuid);
+ if (!srv)
+ srv = __alloc_srv(ctx, paths_uuid);
+ mutex_unlock(&ctx->srv_mutex);
+
+ return srv;
+}
+
+static void put_srv(struct rtrs_srv *srv)
+{
+ if (refcount_dec_and_test(&srv->refcount)) {
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+
+ WARN_ON(srv->dev.kobj.state_in_sysfs);
+
+ mutex_lock(&ctx->srv_mutex);
+ list_del(&srv->ctx_list);
+ mutex_unlock(&ctx->srv_mutex);
+ free_srv(srv);
+ }
+}
+
+static void __add_path_to_srv(struct rtrs_srv *srv,
+ struct rtrs_srv_sess *sess)
+{
+ list_add_tail(&sess->s.entry, &srv->paths_list);
+ srv->paths_num++;
+ WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
+}
+
+static void del_path_from_srv(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+
+ if (WARN_ON(!srv))
+ return;
+
+ mutex_lock(&srv->paths_mutex);
+ list_del(&sess->s.entry);
+ WARN_ON(!srv->paths_num);
+ srv->paths_num--;
+ mutex_unlock(&srv->paths_mutex);
+}
+
+/* return true if addresses are the same, error other wise */
+static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
+{
+ switch (a->sa_family) {
+ case AF_IB:
+ return memcmp(&((struct sockaddr_ib *)a)->sib_addr,
+ &((struct sockaddr_ib *)b)->sib_addr,
+ sizeof(struct ib_addr)) &&
+ (b->sa_family == AF_IB);
+ case AF_INET:
+ return memcmp(&((struct sockaddr_in *)a)->sin_addr,
+ &((struct sockaddr_in *)b)->sin_addr,
+ sizeof(struct in_addr)) &&
+ (b->sa_family == AF_INET);
+ case AF_INET6:
+ return memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
+ &((struct sockaddr_in6 *)b)->sin6_addr,
+ sizeof(struct in6_addr)) &&
+ (b->sa_family == AF_INET6);
+ default:
+ return -ENOENT;
+ }
+}
+
+static bool __is_path_w_addr_exists(struct rtrs_srv *srv,
+ struct rdma_addr *addr)
+{
+ struct rtrs_srv_sess *sess;
+
+ list_for_each_entry(sess, &srv->paths_list, s.entry)
+ if (!sockaddr_cmp((struct sockaddr *)&sess->s.dst_addr,
+ (struct sockaddr *)&addr->dst_addr) &&
+ !sockaddr_cmp((struct sockaddr *)&sess->s.src_addr,
+ (struct sockaddr *)&addr->src_addr))
+ return true;
+
+ return false;
+}
+
+static void free_sess(struct rtrs_srv_sess *sess)
+{
+ if (sess->kobj.state_in_sysfs)
+ kobject_put(&sess->kobj);
+ else
+ kfree(sess);
+}
+
+static void rtrs_srv_close_work(struct work_struct *work)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_srv_con *con;
+ int i;
+
+ sess = container_of(work, typeof(*sess), close_work);
+
+ rtrs_srv_destroy_sess_files(sess);
+ rtrs_srv_stop_hb(sess);
+
+ for (i = 0; i < sess->s.con_num; i++) {
+ if (!sess->s.con[i])
+ continue;
+ con = to_srv_con(sess->s.con[i]);
+ rdma_disconnect(con->c.cm_id);
+ ib_drain_qp(con->c.qp);
+ }
+ /* Wait for all inflights */
+ rtrs_srv_wait_ops_ids(sess);
+
+ /* Notify upper layer if we are the last path */
+ rtrs_srv_sess_down(sess);
+
+ unmap_cont_bufs(sess);
+ rtrs_srv_free_ops_ids(sess);
+
+ for (i = 0; i < sess->s.con_num; i++) {
+ if (!sess->s.con[i])
+ continue;
+ con = to_srv_con(sess->s.con[i]);
+ rtrs_cq_qp_destroy(&con->c);
+ rdma_destroy_id(con->c.cm_id);
+ kfree(con);
+ }
+ rtrs_ib_dev_put(sess->s.dev);
+
+ del_path_from_srv(sess);
+ put_srv(sess->srv);
+ sess->srv = NULL;
+ rtrs_srv_change_state(sess, RTRS_SRV_CLOSED);
+
+ kfree(sess->dma_addr);
+ kfree(sess->s.con);
+ free_sess(sess);
+}
+
+static int rtrs_rdma_do_accept(struct rtrs_srv_sess *sess,
+ struct rdma_cm_id *cm_id)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_msg_conn_rsp msg;
+ struct rdma_conn_param param;
+ int err;
+
+ param = (struct rdma_conn_param) {
+ .rnr_retry_count = 7,
+ .private_data = &msg,
+ .private_data_len = sizeof(msg),
+ };
+
+ msg = (struct rtrs_msg_conn_rsp) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .queue_depth = cpu_to_le16(srv->queue_depth),
+ .max_io_size = cpu_to_le32(max_chunk_size - MAX_HDR_SIZE),
+ .max_hdr_size = cpu_to_le32(MAX_HDR_SIZE),
+ };
+
+ if (always_invalidate)
+ msg.flags = cpu_to_le32(RTRS_MSG_NEW_RKEY_F);
+
+ err = rdma_accept(cm_id, &param);
+ if (err)
+ pr_err("rdma_accept(), err: %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
+{
+ struct rtrs_msg_conn_rsp msg;
+ int err;
+
+ msg = (struct rtrs_msg_conn_rsp) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .errno = cpu_to_le16(errno),
+ };
+
+ err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED);
+ if (err)
+ pr_err("rdma_reject(), err: %d\n", err);
+
+ /* Bounce errno back */
+ return errno;
+}
+
+static struct rtrs_srv_sess *
+__find_sess(struct rtrs_srv *srv, const uuid_t *sess_uuid)
+{
+ struct rtrs_srv_sess *sess;
+
+ list_for_each_entry(sess, &srv->paths_list, s.entry) {
+ if (uuid_equal(&sess->s.uuid, sess_uuid))
+ return sess;
+ }
+
+ return NULL;
+}
+
+static int create_con(struct rtrs_srv_sess *sess,
+ struct rdma_cm_id *cm_id,
+ unsigned int cid)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_con *con;
+
+ u16 cq_size, wr_queue_size;
+ int err, cq_vector;
+
+ con = kzalloc(sizeof(*con), GFP_KERNEL);
+ if (!con) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&con->rsp_wr_wait_lock);
+ INIT_LIST_HEAD(&con->rsp_wr_wait_list);
+ con->c.cm_id = cm_id;
+ con->c.sess = &sess->s;
+ con->c.cid = cid;
+ atomic_set(&con->wr_cnt, 0);
+
+ if (con->c.cid == 0) {
+ /*
+ * All receive and all send (each requiring invalidate)
+ * + 2 for drain and heartbeat
+ */
+ wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
+ cq_size = wr_queue_size;
+ } else {
+ /*
+ * If we have all receive requests posted and
+ * all write requests posted and each read request
+ * requires an invalidate request + drain
+ * and qp gets into error state.
+ */
+ cq_size = srv->queue_depth * 3 + 1;
+ /*
+ * In theory we might have queue_depth * 32
+ * outstanding requests if an unsafe global key is used
+ * and we have queue_depth read requests each consisting
+ * of 32 different addresses. div 3 for mlx5.
+ */
+ wr_queue_size = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
+ }
+ atomic_set(&con->sq_wr_avail, wr_queue_size);
+ cq_vector = rtrs_srv_get_next_cq_vector(sess);
+
+ /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
+ err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
+ wr_queue_size, IB_POLL_WORKQUEUE);
+ if (err) {
+ rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
+ goto free_con;
+ }
+ if (con->c.cid == 0) {
+ err = post_recv_info_req(con);
+ if (err)
+ goto free_cqqp;
+ }
+ WARN_ON(sess->s.con[cid]);
+ sess->s.con[cid] = &con->c;
+
+ /*
+ * Change context from server to current connection. The other
+ * way is to use cm_id->qp->qp_context, which does not work on OFED.
+ */
+ cm_id->context = &con->c;
+
+ return 0;
+
+free_cqqp:
+ rtrs_cq_qp_destroy(&con->c);
+free_con:
+ kfree(con);
+
+err:
+ return err;
+}
+
+static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
+ struct rdma_cm_id *cm_id,
+ unsigned int con_num,
+ unsigned int recon_cnt,
+ const uuid_t *uuid)
+{
+ struct rtrs_srv_sess *sess;
+ int err = -ENOMEM;
+
+ if (srv->paths_num >= MAX_PATHS_NUM) {
+ err = -ECONNRESET;
+ goto err;
+ }
+ if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) {
+ err = -EEXIST;
+ pr_err("Path with same addr exists\n");
+ goto err;
+ }
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ goto err;
+
+ sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
+ if (!sess->stats)
+ goto err_free_sess;
+
+ sess->stats->sess = sess;
+
+ sess->dma_addr = kcalloc(srv->queue_depth, sizeof(*sess->dma_addr),
+ GFP_KERNEL);
+ if (!sess->dma_addr)
+ goto err_free_stats;
+
+ sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
+ if (!sess->s.con)
+ goto err_free_dma_addr;
+
+ sess->state = RTRS_SRV_CONNECTING;
+ sess->srv = srv;
+ sess->cur_cq_vector = -1;
+ sess->s.dst_addr = cm_id->route.addr.dst_addr;
+ sess->s.src_addr = cm_id->route.addr.src_addr;
+ sess->s.con_num = con_num;
+ sess->s.recon_cnt = recon_cnt;
+ uuid_copy(&sess->s.uuid, uuid);
+ spin_lock_init(&sess->state_lock);
+ INIT_WORK(&sess->close_work, rtrs_srv_close_work);
+ rtrs_srv_init_hb(sess);
+
+ sess->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
+ if (!sess->s.dev) {
+ err = -ENOMEM;
+ goto err_free_con;
+ }
+ err = map_cont_bufs(sess);
+ if (err)
+ goto err_put_dev;
+
+ err = rtrs_srv_alloc_ops_ids(sess);
+ if (err)
+ goto err_unmap_bufs;
+
+ __add_path_to_srv(srv, sess);
+
+ return sess;
+
+err_unmap_bufs:
+ unmap_cont_bufs(sess);
+err_put_dev:
+ rtrs_ib_dev_put(sess->s.dev);
+err_free_con:
+ kfree(sess->s.con);
+err_free_dma_addr:
+ kfree(sess->dma_addr);
+err_free_stats:
+ kfree(sess->stats);
+err_free_sess:
+ kfree(sess);
+err:
+ return ERR_PTR(err);
+}
+
+static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
+ const struct rtrs_msg_conn_req *msg,
+ size_t len)
+{
+ struct rtrs_srv_ctx *ctx = cm_id->context;
+ struct rtrs_srv_sess *sess;
+ struct rtrs_srv *srv;
+
+ u16 version, con_num, cid;
+ u16 recon_cnt;
+ int err;
+
+ if (len < sizeof(*msg)) {
+ pr_err("Invalid RTRS connection request\n");
+ goto reject_w_econnreset;
+ }
+ if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
+ pr_err("Invalid RTRS magic\n");
+ goto reject_w_econnreset;
+ }
+ version = le16_to_cpu(msg->version);
+ if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
+ pr_err("Unsupported major RTRS version: %d, expected %d\n",
+ version >> 8, RTRS_PROTO_VER_MAJOR);
+ goto reject_w_econnreset;
+ }
+ con_num = le16_to_cpu(msg->cid_num);
+ if (con_num > 4096) {
+ /* Sanity check */
+ pr_err("Too many connections requested: %d\n", con_num);
+ goto reject_w_econnreset;
+ }
+ cid = le16_to_cpu(msg->cid);
+ if (cid >= con_num) {
+ /* Sanity check */
+ pr_err("Incorrect cid: %d >= %d\n", cid, con_num);
+ goto reject_w_econnreset;
+ }
+ recon_cnt = le16_to_cpu(msg->recon_cnt);
+ srv = get_or_create_srv(ctx, &msg->paths_uuid);
+ if (!srv) {
+ err = -ENOMEM;
+ goto reject_w_err;
+ }
+ mutex_lock(&srv->paths_mutex);
+ sess = __find_sess(srv, &msg->sess_uuid);
+ if (sess) {
+ struct rtrs_sess *s = &sess->s;
+
+ /* Session already holds a reference */
+ put_srv(srv);
+
+ if (sess->state != RTRS_SRV_CONNECTING) {
+ rtrs_err(s, "Session in wrong state: %s\n",
+ rtrs_srv_state_str(sess->state));
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_econnreset;
+ }
+ /*
+ * Sanity checks
+ */
+ if (con_num != s->con_num || cid >= s->con_num) {
+ rtrs_err(s, "Incorrect request: %d, %d\n",
+ cid, con_num);
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_econnreset;
+ }
+ if (s->con[cid]) {
+ rtrs_err(s, "Connection already exists: %d\n",
+ cid);
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_econnreset;
+ }
+ } else {
+ sess = __alloc_sess(srv, cm_id, con_num, recon_cnt,
+ &msg->sess_uuid);
+ if (IS_ERR(sess)) {
+ mutex_unlock(&srv->paths_mutex);
+ put_srv(srv);
+ err = PTR_ERR(sess);
+ goto reject_w_err;
+ }
+ }
+ err = create_con(sess, cm_id, cid);
+ if (err) {
+ (void)rtrs_rdma_do_reject(cm_id, err);
+ /*
+ * Since session has other connections we follow normal way
+ * through workqueue, but still return an error to tell cma.c
+ * to call rdma_destroy_id() for current connection.
+ */
+ goto close_and_return_err;
+ }
+ err = rtrs_rdma_do_accept(sess, cm_id);
+ if (err) {
+ (void)rtrs_rdma_do_reject(cm_id, err);
+ /*
+ * Since current connection was successfully added to the
+ * session we follow normal way through workqueue to close the
+ * session, thus return 0 to tell cma.c we call
+ * rdma_destroy_id() ourselves.
+ */
+ err = 0;
+ goto close_and_return_err;
+ }
+ mutex_unlock(&srv->paths_mutex);
+
+ return 0;
+
+reject_w_err:
+ return rtrs_rdma_do_reject(cm_id, err);
+
+reject_w_econnreset:
+ return rtrs_rdma_do_reject(cm_id, -ECONNRESET);
+
+close_and_return_err:
+ close_sess(sess);
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+
+static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_srv_sess *sess = NULL;
+ struct rtrs_sess *s = NULL;
+
+ if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
+ struct rtrs_con *c = cm_id->context;
+
+ s = c->sess;
+ sess = to_srv_sess(s);
+ }
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ /*
+ * In case of error cma.c will destroy cm_id,
+ * see cma_process_remove()
+ */
+ return rtrs_rdma_connect(cm_id, ev->param.conn.private_data,
+ ev->param.conn.private_data_len);
+ case RDMA_CM_EVENT_ESTABLISHED:
+ /* Nothing here */
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
+ close_sess(sess);
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ close_sess(sess);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ close_sess(sess);
+ break;
+ default:
+ pr_err("Ignoring unexpected CM event %s, err %d\n",
+ rdma_event_msg(ev->event), ev->status);
+ break;
+ }
+
+ return 0;
+}
+
+static struct rdma_cm_id *rtrs_srv_cm_init(struct rtrs_srv_ctx *ctx,
+ struct sockaddr *addr,
+ enum rdma_ucm_port_space ps)
+{
+ struct rdma_cm_id *cm_id;
+ int ret;
+
+ cm_id = rdma_create_id(&init_net, rtrs_srv_rdma_cm_handler,
+ ctx, ps, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ ret = PTR_ERR(cm_id);
+ pr_err("Creating id for RDMA connection failed, err: %d\n",
+ ret);
+ goto err_out;
+ }
+ ret = rdma_bind_addr(cm_id, addr);
+ if (ret) {
+ pr_err("Binding RDMA address failed, err: %d\n", ret);
+ goto err_cm;
+ }
+ ret = rdma_listen(cm_id, 64);
+ if (ret) {
+ pr_err("Listening on RDMA connection failed, err: %d\n",
+ ret);
+ goto err_cm;
+ }
+
+ return cm_id;
+
+err_cm:
+ rdma_destroy_id(cm_id);
+err_out:
+
+ return ERR_PTR(ret);
+}
+
+static int rtrs_srv_rdma_init(struct rtrs_srv_ctx *ctx, u16 port)
+{
+ struct sockaddr_in6 sin = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = IN6ADDR_ANY_INIT,
+ .sin6_port = htons(port),
+ };
+ struct sockaddr_ib sib = {
+ .sib_family = AF_IB,
+ .sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port),
+ .sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL),
+ .sib_pkey = cpu_to_be16(0xffff),
+ };
+ struct rdma_cm_id *cm_ip, *cm_ib;
+ int ret;
+
+ /*
+ * We accept both IPoIB and IB connections, so we need to keep
+ * two cm id's, one for each socket type and port space.
+ * If the cm initialization of one of the id's fails, we abort
+ * everything.
+ */
+ cm_ip = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sin, RDMA_PS_TCP);
+ if (IS_ERR(cm_ip))
+ return PTR_ERR(cm_ip);
+
+ cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB);
+ if (IS_ERR(cm_ib)) {
+ ret = PTR_ERR(cm_ib);
+ goto free_cm_ip;
+ }
+
+ ctx->cm_id_ip = cm_ip;
+ ctx->cm_id_ib = cm_ib;
+
+ return 0;
+
+free_cm_ip:
+ rdma_destroy_id(cm_ip);
+
+ return ret;
+}
+
+static struct rtrs_srv_ctx *alloc_srv_ctx(struct rtrs_srv_ops *ops)
+{
+ struct rtrs_srv_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->ops = *ops;
+ mutex_init(&ctx->srv_mutex);
+ INIT_LIST_HEAD(&ctx->srv_list);
+
+ return ctx;
+}
+
+static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
+{
+ WARN_ON(!list_empty(&ctx->srv_list));
+ mutex_destroy(&ctx->srv_mutex);
+ kfree(ctx);
+}
+
+/**
+ * rtrs_srv_open() - open RTRS server context
+ * @ops: callback functions
+ * @port: port to listen on
+ *
+ * Creates server context with specified callbacks.
+ *
+ * Return a valid pointer on success otherwise PTR_ERR.
+ */
+struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
+{
+ struct rtrs_srv_ctx *ctx;
+ int err;
+
+ ctx = alloc_srv_ctx(ops);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ err = rtrs_srv_rdma_init(ctx, port);
+ if (err) {
+ free_srv_ctx(ctx);
+ return ERR_PTR(err);
+ }
+
+ return ctx;
+}
+EXPORT_SYMBOL(rtrs_srv_open);
+
+static void close_sessions(struct rtrs_srv *srv)
+{
+ struct rtrs_srv_sess *sess;
+
+ mutex_lock(&srv->paths_mutex);
+ list_for_each_entry(sess, &srv->paths_list, s.entry)
+ close_sess(sess);
+ mutex_unlock(&srv->paths_mutex);
+}
+
+static void close_ctx(struct rtrs_srv_ctx *ctx)
+{
+ struct rtrs_srv *srv;
+
+ mutex_lock(&ctx->srv_mutex);
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list)
+ close_sessions(srv);
+ mutex_unlock(&ctx->srv_mutex);
+ flush_workqueue(rtrs_wq);
+}
+
+/**
+ * rtrs_srv_close() - close RTRS server context
+ * @ctx: pointer to server context
+ *
+ * Closes RTRS server context with all client sessions.
+ */
+void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
+{
+ rdma_destroy_id(ctx->cm_id_ip);
+ rdma_destroy_id(ctx->cm_id_ib);
+ close_ctx(ctx);
+ free_srv_ctx(ctx);
+}
+EXPORT_SYMBOL(rtrs_srv_close);
+
+static int check_module_params(void)
+{
+ if (sess_queue_depth < 1 || sess_queue_depth > MAX_SESS_QUEUE_DEPTH) {
+ pr_err("Invalid sess_queue_depth value %d, has to be >= %d, <= %d.\n",
+ sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
+ return -EINVAL;
+ }
+ if (max_chunk_size < 4096 || !is_power_of_2(max_chunk_size)) {
+ pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
+ max_chunk_size, 4096);
+ return -EINVAL;
+ }
+
+ /*
+ * Check if IB immediate data size is enough to hold the mem_id and the
+ * offset inside the memory chunk
+ */
+ if ((ilog2(sess_queue_depth - 1) + 1) +
+ (ilog2(max_chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) {
+ pr_err("RDMA immediate size (%db) not enough to encode %d buffers of size %dB. Reduce 'sess_queue_depth' or 'max_chunk_size' parameters.\n",
+ MAX_IMM_PAYL_BITS, sess_queue_depth, max_chunk_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init rtrs_server_init(void)
+{
+ int err;
+
+ pr_info("Loading module %s, proto %s: (max_chunk_size: %d (pure IO %ld, headers %ld) , sess_queue_depth: %d, always_invalidate: %d)\n",
+ KBUILD_MODNAME, RTRS_PROTO_VER_STRING,
+ max_chunk_size, max_chunk_size - MAX_HDR_SIZE, MAX_HDR_SIZE,
+ sess_queue_depth, always_invalidate);
+
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+
+ err = check_module_params();
+ if (err) {
+ pr_err("Failed to load module, invalid module parameters, err: %d\n",
+ err);
+ return err;
+ }
+ chunk_pool = mempool_create_page_pool(sess_queue_depth * CHUNK_POOL_SZ,
+ get_order(max_chunk_size));
+ if (!chunk_pool)
+ return -ENOMEM;
+ rtrs_dev_class = class_create(THIS_MODULE, "rtrs-server");
+ if (IS_ERR(rtrs_dev_class)) {
+ err = PTR_ERR(rtrs_dev_class);
+ goto out_chunk_pool;
+ }
+ rtrs_wq = alloc_workqueue("rtrs_server_wq", WQ_MEM_RECLAIM, 0);
+ if (!rtrs_wq) {
+ err = -ENOMEM;
+ goto out_dev_class;
+ }
+
+ return 0;
+
+out_dev_class:
+ class_destroy(rtrs_dev_class);
+out_chunk_pool:
+ mempool_destroy(chunk_pool);
+
+ return err;
+}
+
+static void __exit rtrs_server_exit(void)
+{
+ destroy_workqueue(rtrs_wq);
+ class_destroy(rtrs_dev_class);
+ mempool_destroy(chunk_pool);
+ rtrs_rdma_dev_pd_deinit(&dev_pd);
+}
+
+module_init(rtrs_server_init);
+module_exit(rtrs_server_exit);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
new file mode 100644
index 000000000000..dc95b0932f0d
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_SRV_H
+#define RTRS_SRV_H
+
+#include <linux/device.h>
+#include <linux/refcount.h>
+#include "rtrs-pri.h"
+
+/*
+ * enum rtrs_srv_state - Server states.
+ */
+enum rtrs_srv_state {
+ RTRS_SRV_CONNECTING,
+ RTRS_SRV_CONNECTED,
+ RTRS_SRV_CLOSING,
+ RTRS_SRV_CLOSED,
+};
+
+/* stats for Read and write operation.
+ * see Documentation/ABI/testing/sysfs-class-rtrs-server for details
+ */
+struct rtrs_srv_stats_rdma_stats {
+ struct {
+ atomic64_t cnt;
+ atomic64_t size_total;
+ } dir[2];
+};
+
+struct rtrs_srv_stats {
+ struct kobject kobj_stats;
+ struct rtrs_srv_stats_rdma_stats rdma_stats;
+ struct rtrs_srv_sess *sess;
+};
+
+struct rtrs_srv_con {
+ struct rtrs_con c;
+ atomic_t wr_cnt;
+ atomic_t sq_wr_avail;
+ struct list_head rsp_wr_wait_list;
+ spinlock_t rsp_wr_wait_lock;
+};
+
+/* IO context in rtrs_srv, each io has one */
+struct rtrs_srv_op {
+ struct rtrs_srv_con *con;
+ u32 msg_id;
+ u8 dir;
+ struct rtrs_msg_rdma_read *rd_msg;
+ struct ib_rdma_wr tx_wr;
+ struct ib_sge tx_sg;
+ struct list_head wait_list;
+ int status;
+};
+
+/*
+ * server side memory region context, when always_invalidate=Y, we need
+ * queue_depth of memory regrion to invalidate each memory region.
+ */
+struct rtrs_srv_mr {
+ struct ib_mr *mr;
+ struct sg_table sgt;
+ struct ib_cqe inv_cqe; /* only for always_invalidate=true */
+ u32 msg_id; /* only for always_invalidate=true */
+ u32 msg_off; /* only for always_invalidate=true */
+ struct rtrs_iu *iu; /* send buffer for new rkey msg */
+};
+
+struct rtrs_srv_sess {
+ struct rtrs_sess s;
+ struct rtrs_srv *srv;
+ struct work_struct close_work;
+ enum rtrs_srv_state state;
+ spinlock_t state_lock;
+ int cur_cq_vector;
+ struct rtrs_srv_op **ops_ids;
+ atomic_t ids_inflight;
+ wait_queue_head_t ids_waitq;
+ struct rtrs_srv_mr *mrs;
+ unsigned int mrs_num;
+ dma_addr_t *dma_addr;
+ bool established;
+ unsigned int mem_bits;
+ struct kobject kobj;
+ struct rtrs_srv_stats *stats;
+};
+
+struct rtrs_srv {
+ struct list_head paths_list;
+ int paths_up;
+ struct mutex paths_ev_mutex;
+ size_t paths_num;
+ struct mutex paths_mutex;
+ uuid_t paths_uuid;
+ refcount_t refcount;
+ struct rtrs_srv_ctx *ctx;
+ struct list_head ctx_list;
+ void *priv;
+ size_t queue_depth;
+ struct page **chunks;
+ struct device dev;
+ unsigned int dev_ref;
+ struct kobject *kobj_paths;
+};
+
+struct rtrs_srv_ctx {
+ struct rtrs_srv_ops ops;
+ struct rdma_cm_id *cm_id_ip;
+ struct rdma_cm_id *cm_id_ib;
+ struct mutex srv_mutex;
+ struct list_head srv_list;
+};
+
+extern struct class *rtrs_dev_class;
+
+void close_sess(struct rtrs_srv_sess *sess);
+
+static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
+ size_t size, int d)
+{
+ atomic64_inc(&s->rdma_stats.dir[d].cnt);
+ atomic64_add(size, &s->rdma_stats.dir[d].size_total);
+}
+
+/* functions which are implemented in rtrs-srv-stats.c */
+int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable);
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
+ char *page, size_t len);
+int rtrs_srv_reset_wc_completion_stats(struct rtrs_srv_stats *stats,
+ bool enable);
+int rtrs_srv_stats_wc_completion_to_str(struct rtrs_srv_stats *stats, char *buf,
+ size_t len);
+int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable);
+ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
+ char *page, size_t len);
+
+/* functions which are implemented in rtrs-srv-sysfs.c */
+int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess);
+void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess);
+
+#endif /* RTRS_SRV_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
new file mode 100644
index 000000000000..ff1093d6e4bc
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/inet.h>
+
+#include "rtrs-pri.h"
+#include "rtrs-log.h"
+
+MODULE_DESCRIPTION("RDMA Transport Core");
+MODULE_LICENSE("GPL");
+
+struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask,
+ struct ib_device *dma_dev,
+ enum dma_data_direction dir,
+ void (*done)(struct ib_cq *cq, struct ib_wc *wc))
+{
+ struct rtrs_iu *ius, *iu;
+ int i;
+
+ ius = kcalloc(queue_size, sizeof(*ius), gfp_mask);
+ if (!ius)
+ return NULL;
+ for (i = 0; i < queue_size; i++) {
+ iu = &ius[i];
+ iu->buf = kzalloc(size, gfp_mask);
+ if (!iu->buf)
+ goto err;
+
+ iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
+ if (ib_dma_mapping_error(dma_dev, iu->dma_addr))
+ goto err;
+
+ iu->cqe.done = done;
+ iu->size = size;
+ iu->direction = dir;
+ }
+ return ius;
+err:
+ rtrs_iu_free(ius, dir, dma_dev, i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_alloc);
+
+void rtrs_iu_free(struct rtrs_iu *ius, enum dma_data_direction dir,
+ struct ib_device *ibdev, u32 queue_size)
+{
+ struct rtrs_iu *iu;
+ int i;
+
+ if (!ius)
+ return;
+
+ for (i = 0; i < queue_size; i++) {
+ iu = &ius[i];
+ ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, dir);
+ kfree(iu->buf);
+ }
+ kfree(ius);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_free);
+
+int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu)
+{
+ struct rtrs_sess *sess = con->sess;
+ struct ib_recv_wr wr;
+ struct ib_sge list;
+
+ list.addr = iu->dma_addr;
+ list.length = iu->size;
+ list.lkey = sess->dev->ib_pd->local_dma_lkey;
+
+ if (list.length == 0) {
+ rtrs_wrn(con->sess,
+ "Posting receive work request failed, sg list is empty\n");
+ return -EINVAL;
+ }
+ wr = (struct ib_recv_wr) {
+ .wr_cqe = &iu->cqe,
+ .sg_list = &list,
+ .num_sge = 1,
+ };
+
+ return ib_post_recv(con->qp, &wr, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_recv);
+
+int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe)
+{
+ struct ib_recv_wr wr;
+
+ wr = (struct ib_recv_wr) {
+ .wr_cqe = cqe,
+ };
+
+ return ib_post_recv(con->qp, &wr, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_post_recv_empty);
+
+int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
+ struct ib_send_wr *head)
+{
+ struct rtrs_sess *sess = con->sess;
+ struct ib_send_wr wr;
+ struct ib_sge list;
+
+ if (WARN_ON(size == 0))
+ return -EINVAL;
+
+ list.addr = iu->dma_addr;
+ list.length = size;
+ list.lkey = sess->dev->ib_pd->local_dma_lkey;
+
+ wr = (struct ib_send_wr) {
+ .wr_cqe = &iu->cqe,
+ .sg_list = &list,
+ .num_sge = 1,
+ .opcode = IB_WR_SEND,
+ .send_flags = IB_SEND_SIGNALED,
+ };
+
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = &wr;
+ } else {
+ head = &wr;
+ }
+
+ return ib_post_send(con->qp, head, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_send);
+
+int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
+ struct ib_sge *sge, unsigned int num_sge,
+ u32 rkey, u64 rdma_addr, u32 imm_data,
+ enum ib_send_flags flags,
+ struct ib_send_wr *head)
+{
+ struct ib_rdma_wr wr;
+ int i;
+
+ wr = (struct ib_rdma_wr) {
+ .wr.wr_cqe = &iu->cqe,
+ .wr.sg_list = sge,
+ .wr.num_sge = num_sge,
+ .rkey = rkey,
+ .remote_addr = rdma_addr,
+ .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+ .wr.ex.imm_data = cpu_to_be32(imm_data),
+ .wr.send_flags = flags,
+ };
+
+ /*
+ * If one of the sges has 0 size, the operation will fail with a
+ * length error
+ */
+ for (i = 0; i < num_sge; i++)
+ if (WARN_ON(sge[i].length == 0))
+ return -EINVAL;
+
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = &wr.wr;
+ } else {
+ head = &wr.wr;
+ }
+
+ return ib_post_send(con->qp, head, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_rdma_write_imm);
+
+int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
+ u32 imm_data, enum ib_send_flags flags,
+ struct ib_send_wr *head)
+{
+ struct ib_send_wr wr;
+
+ wr = (struct ib_send_wr) {
+ .wr_cqe = cqe,
+ .send_flags = flags,
+ .opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+ .ex.imm_data = cpu_to_be32(imm_data),
+ };
+
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = &wr;
+ } else {
+ head = &wr;
+ }
+
+ return ib_post_send(con->qp, head, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_post_rdma_write_imm_empty);
+
+static void qp_event_handler(struct ib_event *ev, void *ctx)
+{
+ struct rtrs_con *con = ctx;
+
+ switch (ev->event) {
+ case IB_EVENT_COMM_EST:
+ rtrs_info(con->sess, "QP event %s (%d) received\n",
+ ib_event_msg(ev->event), ev->event);
+ rdma_notify(con->cm_id, IB_EVENT_COMM_EST);
+ break;
+ default:
+ rtrs_info(con->sess, "Unhandled QP event %s (%d) received\n",
+ ib_event_msg(ev->event), ev->event);
+ break;
+ }
+}
+
+static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size,
+ enum ib_poll_context poll_ctx)
+{
+ struct rdma_cm_id *cm_id = con->cm_id;
+ struct ib_cq *cq;
+
+ cq = ib_alloc_cq(cm_id->device, con, cq_size,
+ cq_vector, poll_ctx);
+ if (IS_ERR(cq)) {
+ rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
+ PTR_ERR(cq));
+ return PTR_ERR(cq);
+ }
+ con->cq = cq;
+
+ return 0;
+}
+
+static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
+ u16 wr_queue_size, u32 max_sge)
+{
+ struct ib_qp_init_attr init_attr = {NULL};
+ struct rdma_cm_id *cm_id = con->cm_id;
+ int ret;
+
+ init_attr.cap.max_send_wr = wr_queue_size;
+ init_attr.cap.max_recv_wr = wr_queue_size;
+ init_attr.cap.max_recv_sge = 1;
+ init_attr.event_handler = qp_event_handler;
+ init_attr.qp_context = con;
+ init_attr.cap.max_send_sge = max_sge;
+
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.send_cq = con->cq;
+ init_attr.recv_cq = con->cq;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+
+ ret = rdma_create_qp(cm_id, pd, &init_attr);
+ if (ret) {
+ rtrs_err(con->sess, "Creating QP failed, err: %d\n", ret);
+ return ret;
+ }
+ con->qp = cm_id->qp;
+
+ return ret;
+}
+
+int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+ u32 max_send_sge, int cq_vector, u16 cq_size,
+ u16 wr_queue_size, enum ib_poll_context poll_ctx)
+{
+ int err;
+
+ err = create_cq(con, cq_vector, cq_size, poll_ctx);
+ if (err)
+ return err;
+
+ err = create_qp(con, sess->dev->ib_pd, wr_queue_size, max_send_sge);
+ if (err) {
+ ib_free_cq(con->cq);
+ con->cq = NULL;
+ return err;
+ }
+ con->sess = sess;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtrs_cq_qp_create);
+
+void rtrs_cq_qp_destroy(struct rtrs_con *con)
+{
+ if (con->qp) {
+ rdma_destroy_qp(con->cm_id);
+ con->qp = NULL;
+ }
+ if (con->cq) {
+ ib_free_cq(con->cq);
+ con->cq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
+
+static void schedule_hb(struct rtrs_sess *sess)
+{
+ queue_delayed_work(sess->hb_wq, &sess->hb_dwork,
+ msecs_to_jiffies(sess->hb_interval_ms));
+}
+
+void rtrs_send_hb_ack(struct rtrs_sess *sess)
+{
+ struct rtrs_con *usr_con = sess->con[0];
+ u32 imm;
+ int err;
+
+ imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
+ err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ IB_SEND_SIGNALED, NULL);
+ if (err) {
+ sess->hb_err_handler(usr_con);
+ return;
+ }
+}
+EXPORT_SYMBOL_GPL(rtrs_send_hb_ack);
+
+static void hb_work(struct work_struct *work)
+{
+ struct rtrs_con *usr_con;
+ struct rtrs_sess *sess;
+ u32 imm;
+ int err;
+
+ sess = container_of(to_delayed_work(work), typeof(*sess), hb_dwork);
+ usr_con = sess->con[0];
+
+ if (sess->hb_missed_cnt > sess->hb_missed_max) {
+ sess->hb_err_handler(usr_con);
+ return;
+ }
+ if (sess->hb_missed_cnt++) {
+ /* Reschedule work without sending hb */
+ schedule_hb(sess);
+ return;
+ }
+ imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
+ err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ IB_SEND_SIGNALED, NULL);
+ if (err) {
+ sess->hb_err_handler(usr_con);
+ return;
+ }
+
+ schedule_hb(sess);
+}
+
+void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+ unsigned int interval_ms, unsigned int missed_max,
+ void (*err_handler)(struct rtrs_con *con),
+ struct workqueue_struct *wq)
+{
+ sess->hb_cqe = cqe;
+ sess->hb_interval_ms = interval_ms;
+ sess->hb_err_handler = err_handler;
+ sess->hb_wq = wq;
+ sess->hb_missed_max = missed_max;
+ sess->hb_missed_cnt = 0;
+ INIT_DELAYED_WORK(&sess->hb_dwork, hb_work);
+}
+EXPORT_SYMBOL_GPL(rtrs_init_hb);
+
+void rtrs_start_hb(struct rtrs_sess *sess)
+{
+ schedule_hb(sess);
+}
+EXPORT_SYMBOL_GPL(rtrs_start_hb);
+
+void rtrs_stop_hb(struct rtrs_sess *sess)
+{
+ cancel_delayed_work_sync(&sess->hb_dwork);
+ sess->hb_missed_cnt = 0;
+ sess->hb_missed_max = 0;
+}
+EXPORT_SYMBOL_GPL(rtrs_stop_hb);
+
+static int rtrs_str_gid_to_sockaddr(const char *addr, size_t len,
+ short port, struct sockaddr_storage *dst)
+{
+ struct sockaddr_ib *dst_ib = (struct sockaddr_ib *)dst;
+ int ret;
+
+ /*
+ * We can use some of the IPv6 functions since GID is a valid
+ * IPv6 address format
+ */
+ ret = in6_pton(addr, len, dst_ib->sib_addr.sib_raw, '\0', NULL);
+ if (ret == 0)
+ return -EINVAL;
+
+ dst_ib->sib_family = AF_IB;
+ /*
+ * Use the same TCP server port number as the IB service ID
+ * on the IB port space range
+ */
+ dst_ib->sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port);
+ dst_ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
+ dst_ib->sib_pkey = cpu_to_be16(0xffff);
+
+ return 0;
+}
+
+/**
+ * rtrs_str_to_sockaddr() - Convert rtrs address string to sockaddr
+ * @addr: String representation of an addr (IPv4, IPv6 or IB GID):
+ * - "ip:192.168.1.1"
+ * - "ip:fe80::200:5aee:feaa:20a2"
+ * - "gid:fe80::200:5aee:feaa:20a2"
+ * @len: String address length
+ * @port: Destination port
+ * @dst: Destination sockaddr structure
+ *
+ * Returns 0 if conversion successful. Non-zero on error.
+ */
+static int rtrs_str_to_sockaddr(const char *addr, size_t len,
+ u16 port, struct sockaddr_storage *dst)
+{
+ if (strncmp(addr, "gid:", 4) == 0) {
+ return rtrs_str_gid_to_sockaddr(addr + 4, len - 4, port, dst);
+ } else if (strncmp(addr, "ip:", 3) == 0) {
+ char port_str[8];
+ char *cpy;
+ int err;
+
+ snprintf(port_str, sizeof(port_str), "%u", port);
+ cpy = kstrndup(addr + 3, len - 3, GFP_KERNEL);
+ err = cpy ? inet_pton_with_scope(&init_net, AF_UNSPEC,
+ cpy, port_str, dst) : -ENOMEM;
+ kfree(cpy);
+
+ return err;
+ }
+ return -EPROTONOSUPPORT;
+}
+
+/**
+ * sockaddr_to_str() - convert sockaddr to a string.
+ * @addr: the sockadddr structure to be converted.
+ * @buf: string containing socket addr.
+ * @len: string length.
+ *
+ * The return value is the number of characters written into buf not
+ * including the trailing '\0'. If len is == 0 the function returns 0..
+ */
+int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len)
+{
+
+ switch (addr->sa_family) {
+ case AF_IB:
+ return scnprintf(buf, len, "gid:%pI6",
+ &((struct sockaddr_ib *)addr)->sib_addr.sib_raw);
+ case AF_INET:
+ return scnprintf(buf, len, "ip:%pI4",
+ &((struct sockaddr_in *)addr)->sin_addr);
+ case AF_INET6:
+ return scnprintf(buf, len, "ip:%pI6c",
+ &((struct sockaddr_in6 *)addr)->sin6_addr);
+ }
+ return scnprintf(buf, len, "<invalid address family>");
+}
+EXPORT_SYMBOL(sockaddr_to_str);
+
+/**
+ * rtrs_addr_to_sockaddr() - convert path string "src,dst" or "src@dst"
+ * to sockaddreses
+ * @str: string containing source and destination addr of a path
+ * separated by ',' or '@' I.e. "ip:1.1.1.1,ip:1.1.1.2" or
+ * "ip:1.1.1.1@ip:1.1.1.2". If str contains only one address it's
+ * considered to be destination.
+ * @len: string length
+ * @port: Destination port number.
+ * @addr: will be set to the source/destination address or to NULL
+ * if str doesn't contain any source address.
+ *
+ * Returns zero if conversion successful. Non-zero otherwise.
+ */
+int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
+ struct rtrs_addr *addr)
+{
+ const char *d;
+
+ d = strchr(str, ',');
+ if (!d)
+ d = strchr(str, '@');
+ if (d) {
+ if (rtrs_str_to_sockaddr(str, d - str, 0, addr->src))
+ return -EINVAL;
+ d += 1;
+ len -= d - str;
+ str = d;
+
+ } else {
+ addr->src = NULL;
+ }
+ return rtrs_str_to_sockaddr(str, len, port, addr->dst);
+}
+EXPORT_SYMBOL(rtrs_addr_to_sockaddr);
+
+void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
+ struct rtrs_rdma_dev_pd *pool)
+{
+ WARN_ON(pool->ops && (!pool->ops->alloc ^ !pool->ops->free));
+ INIT_LIST_HEAD(&pool->list);
+ mutex_init(&pool->mutex);
+ pool->pd_flags = pd_flags;
+}
+EXPORT_SYMBOL(rtrs_rdma_dev_pd_init);
+
+void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool)
+{
+ mutex_destroy(&pool->mutex);
+ WARN_ON(!list_empty(&pool->list));
+}
+EXPORT_SYMBOL(rtrs_rdma_dev_pd_deinit);
+
+static void dev_free(struct kref *ref)
+{
+ struct rtrs_rdma_dev_pd *pool;
+ struct rtrs_ib_dev *dev;
+
+ dev = container_of(ref, typeof(*dev), ref);
+ pool = dev->pool;
+
+ mutex_lock(&pool->mutex);
+ list_del(&dev->entry);
+ mutex_unlock(&pool->mutex);
+
+ if (pool->ops && pool->ops->deinit)
+ pool->ops->deinit(dev);
+
+ ib_dealloc_pd(dev->ib_pd);
+
+ if (pool->ops && pool->ops->free)
+ pool->ops->free(dev);
+ else
+ kfree(dev);
+}
+
+int rtrs_ib_dev_put(struct rtrs_ib_dev *dev)
+{
+ return kref_put(&dev->ref, dev_free);
+}
+EXPORT_SYMBOL(rtrs_ib_dev_put);
+
+static int rtrs_ib_dev_get(struct rtrs_ib_dev *dev)
+{
+ return kref_get_unless_zero(&dev->ref);
+}
+
+struct rtrs_ib_dev *
+rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
+ struct rtrs_rdma_dev_pd *pool)
+{
+ struct rtrs_ib_dev *dev;
+
+ mutex_lock(&pool->mutex);
+ list_for_each_entry(dev, &pool->list, entry) {
+ if (dev->ib_dev->node_guid == ib_dev->node_guid &&
+ rtrs_ib_dev_get(dev))
+ goto out_unlock;
+ }
+ mutex_unlock(&pool->mutex);
+ if (pool->ops && pool->ops->alloc)
+ dev = pool->ops->alloc();
+ else
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(dev))
+ goto out_err;
+
+ kref_init(&dev->ref);
+ dev->pool = pool;
+ dev->ib_dev = ib_dev;
+ dev->ib_pd = ib_alloc_pd(ib_dev, pool->pd_flags);
+ if (IS_ERR(dev->ib_pd))
+ goto out_free_dev;
+
+ if (pool->ops && pool->ops->init && pool->ops->init(dev))
+ goto out_free_pd;
+
+ mutex_lock(&pool->mutex);
+ list_add(&dev->entry, &pool->list);
+out_unlock:
+ mutex_unlock(&pool->mutex);
+ return dev;
+
+out_free_pd:
+ ib_dealloc_pd(dev->ib_pd);
+out_free_dev:
+ if (pool->ops && pool->ops->free)
+ pool->ops->free(dev);
+ else
+ kfree(dev);
+out_err:
+ return NULL;
+}
+EXPORT_SYMBOL(rtrs_ib_dev_find_or_add);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
new file mode 100644
index 000000000000..9af750f4d783
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RTRS_H
+#define RTRS_H
+
+#include <linux/socket.h>
+#include <linux/scatterlist.h>
+
+struct rtrs_permit;
+struct rtrs_clt;
+struct rtrs_srv_ctx;
+struct rtrs_srv;
+struct rtrs_srv_op;
+
+/*
+ * RDMA transport (RTRS) client API
+ */
+
+/**
+ * enum rtrs_clt_link_ev - Events about connectivity state of a client
+ * @RTRS_CLT_LINK_EV_RECONNECTED Client was reconnected.
+ * @RTRS_CLT_LINK_EV_DISCONNECTED Client was disconnected.
+ */
+enum rtrs_clt_link_ev {
+ RTRS_CLT_LINK_EV_RECONNECTED,
+ RTRS_CLT_LINK_EV_DISCONNECTED,
+};
+
+/**
+ * Source and destination address of a path to be established
+ */
+struct rtrs_addr {
+ struct sockaddr_storage *src;
+ struct sockaddr_storage *dst;
+};
+
+/**
+ * rtrs_clt_ops - it holds the link event callback and private pointer.
+ * @priv: User supplied private data.
+ * @link_ev: Event notification callback function for connection state changes
+ * @priv: User supplied data that was passed to rtrs_clt_open()
+ * @ev: Occurred event
+ */
+struct rtrs_clt_ops {
+ void *priv;
+ void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev);
+};
+
+struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *sessname,
+ const struct rtrs_addr *paths,
+ size_t path_cnt, u16 port,
+ size_t pdu_sz, u8 reconnect_delay_sec,
+ u16 max_segments,
+ size_t max_segment_size,
+ s16 max_reconnect_attempts);
+
+void rtrs_clt_close(struct rtrs_clt *sess);
+
+/**
+ * rtrs_permit_to_pdu() - converts rtrs_permit to opaque pdu pointer
+ * @permit: RTRS permit pointer, it associates the memory allocation for future
+ * RDMA operation.
+ */
+void *rtrs_permit_to_pdu(struct rtrs_permit *permit);
+
+enum {
+ RTRS_PERMIT_NOWAIT = 0,
+ RTRS_PERMIT_WAIT = 1,
+};
+
+/**
+ * enum rtrs_clt_con_type() type of ib connection to use with a given
+ * rtrs_permit
+ * @ADMIN_CON - use connection reserved for "service" messages
+ * @IO_CON - use a connection reserved for IO
+ */
+enum rtrs_clt_con_type {
+ RTRS_ADMIN_CON,
+ RTRS_IO_CON
+};
+
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *sess,
+ enum rtrs_clt_con_type con_type,
+ int wait);
+
+void rtrs_clt_put_permit(struct rtrs_clt *sess, struct rtrs_permit *permit);
+
+/**
+ * rtrs_clt_req_ops - it holds the request confirmation callback
+ * and a private pointer.
+ * @priv: User supplied private data.
+ * @conf_fn: callback function to be called as confirmation
+ * @priv: User provided data, passed back with corresponding
+ * @(conf) confirmation.
+ * @errno: error number.
+ */
+struct rtrs_clt_req_ops {
+ void *priv;
+ void (*conf_fn)(void *priv, int errno);
+};
+
+int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
+ struct rtrs_clt *sess, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t len,
+ struct scatterlist *sg, unsigned int sg_cnt);
+
+/**
+ * rtrs_attrs - RTRS session attributes
+ */
+struct rtrs_attrs {
+ u32 queue_depth;
+ u32 max_io_size;
+ u8 sessname[NAME_MAX];
+ struct kobject *sess_kobj;
+};
+
+int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr);
+
+/*
+ * Here goes RTRS server API
+ */
+
+/**
+ * enum rtrs_srv_link_ev - Server link events
+ * @RTRS_SRV_LINK_EV_CONNECTED: Connection from client established
+ * @RTRS_SRV_LINK_EV_DISCONNECTED: Connection was disconnected, all
+ * connection RTRS resources were freed.
+ */
+enum rtrs_srv_link_ev {
+ RTRS_SRV_LINK_EV_CONNECTED,
+ RTRS_SRV_LINK_EV_DISCONNECTED,
+};
+
+struct rtrs_srv_ops {
+ /**
+ * rdma_ev(): Event notification for RDMA operations
+ * If the callback returns a value != 0, an error
+ * message for the data transfer will be sent to
+ * the client.
+
+ * @sess: Session
+ * @priv: Private data set by rtrs_srv_set_sess_priv()
+ * @id: internal RTRS operation id
+ * @dir: READ/WRITE
+ * @data: Pointer to (bidirectional) rdma memory area:
+ * - in case of %RTRS_SRV_RDMA_EV_RECV contains
+ * data sent by the client
+ * - in case of %RTRS_SRV_RDMA_EV_WRITE_REQ points
+ * to the memory area where the response is to be
+ * written to
+ * @datalen: Size of the memory area in @data
+ * @usr: The extra user message sent by the client (%vec)
+ * @usrlen: Size of the user message
+ */
+ int (*rdma_ev)(struct rtrs_srv *sess, void *priv,
+ struct rtrs_srv_op *id, int dir,
+ void *data, size_t datalen, const void *usr,
+ size_t usrlen);
+ /**
+ * link_ev(): Events about connectivity state changes
+ * If the callback returns != 0 and the event
+ * %RTRS_SRV_LINK_EV_CONNECTED the corresponding
+ * session will be destroyed.
+ * @sess: Session
+ * @ev: event
+ * @priv: Private data from user if previously set with
+ * rtrs_srv_set_sess_priv()
+ */
+ int (*link_ev)(struct rtrs_srv *sess, enum rtrs_srv_link_ev ev,
+ void *priv);
+};
+
+struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port);
+
+void rtrs_srv_close(struct rtrs_srv_ctx *ctx);
+
+bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int errno);
+
+void rtrs_srv_set_sess_priv(struct rtrs_srv *sess, void *priv);
+
+int rtrs_srv_get_sess_name(struct rtrs_srv *sess, char *sessname, size_t len);
+
+int rtrs_srv_get_queue_depth(struct rtrs_srv *sess);
+
+int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
+ struct rtrs_addr *addr);
+
+int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len);
+#endif
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index cd1181c39ed2..d8fcd21ab472 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -71,7 +71,6 @@ static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
static unsigned int indirect_sg_entries;
static bool allow_ext_sg;
-static bool prefer_fr = true;
static bool register_always = true;
static bool never_register;
static int topspin_workarounds = 1;
@@ -95,10 +94,6 @@ module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC(topspin_workarounds,
"Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
-module_param(prefer_fr, bool, 0444);
-MODULE_PARM_DESC(prefer_fr,
-"Whether to use fast registration if both FMR and fast registration are supported");
-
module_param(register_always, bool, 0444);
MODULE_PARM_DESC(register_always,
"Use memory registration even for contiguous memory regions");
@@ -146,7 +141,7 @@ module_param(ch_count, uint, 0444);
MODULE_PARM_DESC(ch_count,
"Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
-static void srp_add_one(struct ib_device *device);
+static int srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_rename_dev(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
@@ -388,24 +383,6 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch)
srp_new_ib_cm_id(ch);
}
-static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
-{
- struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_fmr_pool_param fmr_param;
-
- memset(&fmr_param, 0, sizeof(fmr_param));
- fmr_param.pool_size = target->mr_pool_size;
- fmr_param.dirty_watermark = fmr_param.pool_size / 4;
- fmr_param.cache = 1;
- fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
- fmr_param.page_shift = ilog2(dev->mr_page_size);
- fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
-
- return ib_create_fmr_pool(dev->pd, &fmr_param);
-}
-
/**
* srp_destroy_fr_pool() - free the resources owned by a pool
* @pool: Fast registration pool to be destroyed.
@@ -556,7 +533,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
struct ib_qp_init_attr *init_attr;
struct ib_cq *recv_cq, *send_cq;
struct ib_qp *qp;
- struct ib_fmr_pool *fmr_pool = NULL;
struct srp_fr_pool *fr_pool = NULL;
const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
int ret;
@@ -619,14 +595,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
"FR pool allocation failed (%d)\n", ret);
goto err_qp;
}
- } else if (dev->use_fmr) {
- fmr_pool = srp_alloc_fmr_pool(target);
- if (IS_ERR(fmr_pool)) {
- ret = PTR_ERR(fmr_pool);
- shost_printk(KERN_WARNING, target->scsi_host, PFX
- "FMR pool allocation failed (%d)\n", ret);
- goto err_qp;
- }
}
if (ch->qp)
@@ -644,10 +612,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
ch->fr_pool = fr_pool;
- } else if (dev->use_fmr) {
- if (ch->fmr_pool)
- ib_destroy_fmr_pool(ch->fmr_pool);
- ch->fmr_pool = fmr_pool;
}
kfree(init_attr);
@@ -702,9 +666,6 @@ static void srp_free_ch_ib(struct srp_target_port *target,
if (dev->use_fast_reg) {
if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
- } else if (dev->use_fmr) {
- if (ch->fmr_pool)
- ib_destroy_fmr_pool(ch->fmr_pool);
}
srp_destroy_qp(ch);
@@ -1017,12 +978,8 @@ static void srp_free_req_data(struct srp_target_port *target,
for (i = 0; i < target->req_ring_size; ++i) {
req = &ch->req_ring[i];
- if (dev->use_fast_reg) {
+ if (dev->use_fast_reg)
kfree(req->fr_list);
- } else {
- kfree(req->fmr_list);
- kfree(req->map_page);
- }
if (req->indirect_dma_addr) {
ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
target->indirect_size,
@@ -1056,16 +1013,8 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
GFP_KERNEL);
if (!mr_list)
goto out;
- if (srp_dev->use_fast_reg) {
+ if (srp_dev->use_fast_reg)
req->fr_list = mr_list;
- } else {
- req->fmr_list = mr_list;
- req->map_page = kmalloc_array(srp_dev->max_pages_per_mr,
- sizeof(void *),
- GFP_KERNEL);
- if (!req->map_page)
- goto out;
- }
req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
if (!req->indirect_desc)
goto out;
@@ -1272,11 +1221,6 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
if (req->nmdesc)
srp_fr_pool_put(ch->fr_pool, req->fr_list,
req->nmdesc);
- } else if (dev->use_fmr) {
- struct ib_pool_fmr **pfmr;
-
- for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
- ib_fmr_pool_unmap(*pfmr);
}
ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
@@ -1472,50 +1416,6 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
state->ndesc++;
}
-static int srp_map_finish_fmr(struct srp_map_state *state,
- struct srp_rdma_ch *ch)
-{
- struct srp_target_port *target = ch->target;
- struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_pool_fmr *fmr;
- u64 io_addr = 0;
-
- if (state->fmr.next >= state->fmr.end) {
- shost_printk(KERN_ERR, ch->target->scsi_host,
- PFX "Out of MRs (mr_per_cmd = %d)\n",
- ch->target->mr_per_cmd);
- return -ENOMEM;
- }
-
- WARN_ON_ONCE(!dev->use_fmr);
-
- if (state->npages == 0)
- return 0;
-
- if (state->npages == 1 && target->global_rkey) {
- srp_map_desc(state, state->base_dma_addr, state->dma_len,
- target->global_rkey);
- goto reset_state;
- }
-
- fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
- state->npages, io_addr);
- if (IS_ERR(fmr))
- return PTR_ERR(fmr);
-
- *state->fmr.next++ = fmr;
- state->nmdesc++;
-
- srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
- state->dma_len, fmr->fmr->rkey);
-
-reset_state:
- state->npages = 0;
- state->dma_len = 0;
-
- return 0;
-}
-
static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
{
srp_handle_qp_err(cq, wc, "FAST REG");
@@ -1606,74 +1506,6 @@ static int srp_map_finish_fr(struct srp_map_state *state,
return n;
}
-static int srp_map_sg_entry(struct srp_map_state *state,
- struct srp_rdma_ch *ch,
- struct scatterlist *sg)
-{
- struct srp_target_port *target = ch->target;
- struct srp_device *dev = target->srp_host->srp_dev;
- dma_addr_t dma_addr = sg_dma_address(sg);
- unsigned int dma_len = sg_dma_len(sg);
- unsigned int len = 0;
- int ret;
-
- WARN_ON_ONCE(!dma_len);
-
- while (dma_len) {
- unsigned offset = dma_addr & ~dev->mr_page_mask;
-
- if (state->npages == dev->max_pages_per_mr ||
- (state->npages > 0 && offset != 0)) {
- ret = srp_map_finish_fmr(state, ch);
- if (ret)
- return ret;
- }
-
- len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
-
- if (!state->npages)
- state->base_dma_addr = dma_addr;
- state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
- state->dma_len += len;
- dma_addr += len;
- dma_len -= len;
- }
-
- /*
- * If the end of the MR is not on a page boundary then we need to
- * close it out and start a new one -- we can only merge at page
- * boundaries.
- */
- ret = 0;
- if ((dma_addr & ~dev->mr_page_mask) != 0)
- ret = srp_map_finish_fmr(state, ch);
- return ret;
-}
-
-static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
- struct srp_request *req, struct scatterlist *scat,
- int count)
-{
- struct scatterlist *sg;
- int i, ret;
-
- state->pages = req->map_page;
- state->fmr.next = req->fmr_list;
- state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
-
- for_each_sg(scat, sg, count, i) {
- ret = srp_map_sg_entry(state, ch, sg);
- if (ret)
- return ret;
- }
-
- ret = srp_map_finish_fmr(state, ch);
- if (ret)
- return ret;
-
- return 0;
-}
-
static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct srp_request *req, struct scatterlist *scat,
int count)
@@ -1733,7 +1565,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
struct srp_device *dev = target->srp_host->srp_dev;
struct srp_map_state state;
struct srp_direct_buf idb_desc;
- u64 idb_pages[1];
struct scatterlist idb_sg[1];
int ret;
@@ -1756,14 +1587,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
if (ret < 0)
return ret;
WARN_ON_ONCE(ret < 1);
- } else if (dev->use_fmr) {
- state.pages = idb_pages;
- state.pages[0] = (req->indirect_dma_addr &
- dev->mr_page_mask);
- state.npages = 1;
- ret = srp_map_finish_fmr(&state, ch);
- if (ret < 0)
- return ret;
} else {
return -EINVAL;
}
@@ -1787,9 +1610,6 @@ static void srp_check_mapping(struct srp_map_state *state,
if (dev->use_fast_reg)
for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
mr_len += (*pfr)->mr->length;
- else if (dev->use_fmr)
- for (i = 0; i < state->nmdesc; i++)
- mr_len += be32_to_cpu(req->indirect_desc[i].len);
if (desc_len != scsi_bufflen(req->scmnd) ||
mr_len > scsi_bufflen(req->scmnd))
pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
@@ -1904,8 +1724,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
state.desc = req->indirect_desc;
if (dev->use_fast_reg)
ret = srp_map_sg_fr(&state, ch, req, scat, count);
- else if (dev->use_fmr)
- ret = srp_map_sg_fmr(&state, ch, req, scat, count);
else
ret = srp_map_sg_dma(&state, ch, req, scat, count);
req->nmdesc = state.nmdesc;
@@ -3424,6 +3242,7 @@ enum {
SRP_OPT_IP_DEST = 1 << 16,
SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
+ SRP_OPT_CH_COUNT = 1 << 19,
};
static unsigned int srp_opt_mandatory[] = {
@@ -3457,6 +3276,7 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_IP_SRC, "src=%s" },
{ SRP_OPT_IP_DEST, "dest=%s" },
{ SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
+ { SRP_OPT_CH_COUNT, "ch_count=%u", },
{ SRP_OPT_ERR, NULL }
};
@@ -3758,6 +3578,14 @@ static int srp_parse_options(struct net *net, const char *buf,
target->max_it_iu_size = token;
break;
+ case SRP_OPT_CH_COUNT:
+ if (match_int(args, &token) || token < 1) {
+ pr_warn("bad channel count %s\n", p);
+ goto out;
+ }
+ target->ch_count = token;
+ break;
+
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
@@ -3864,13 +3692,13 @@ static ssize_t srp_create_target(struct device *dev,
goto out;
}
- if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
+ if (!srp_dev->has_fr && !target->allow_ext_sg &&
target->cmd_sg_cnt < target->sg_tablesize) {
pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
target->sg_tablesize = target->cmd_sg_cnt;
}
- if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
+ if (srp_dev->use_fast_reg) {
bool gaps_reg = (ibdev->attrs.device_cap_flags &
IB_DEVICE_SG_GAPS_REG);
@@ -3878,12 +3706,12 @@ static ssize_t srp_create_target(struct device *dev,
(ilog2(srp_dev->mr_page_size) - 9);
if (!gaps_reg) {
/*
- * FR and FMR can only map one HCA page per entry. If
- * the start address is not aligned on a HCA page
- * boundary two entries will be used for the head and
- * the tail although these two entries combined
- * contain at most one HCA page of data. Hence the "+
- * 1" in the calculation below.
+ * FR can only map one HCA page per entry. If the start
+ * address is not aligned on a HCA page boundary two
+ * entries will be used for the head and the tail
+ * although these two entries combined contain at most
+ * one HCA page of data. Hence the "+ 1" in the
+ * calculation below.
*
* The indirect data buffer descriptor is contiguous
* so the memory for that buffer will only be
@@ -3921,11 +3749,13 @@ static ssize_t srp_create_target(struct device *dev,
goto out;
ret = -ENOMEM;
- target->ch_count = max_t(unsigned, num_online_nodes(),
- min(ch_count ? :
- min(4 * num_online_nodes(),
- ibdev->num_comp_vectors),
- num_online_cpus()));
+ if (target->ch_count == 0)
+ target->ch_count =
+ max_t(unsigned int, num_online_nodes(),
+ min(ch_count ?:
+ min(4 * num_online_nodes(),
+ ibdev->num_comp_vectors),
+ num_online_cpus()));
target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
GFP_KERNEL);
if (!target->ch)
@@ -4132,7 +3962,7 @@ static void srp_rename_dev(struct ib_device *device, void *client_data)
}
}
-static void srp_add_one(struct ib_device *device)
+static int srp_add_one(struct ib_device *device)
{
struct srp_device *srp_dev;
struct ib_device_attr *attr = &device->attrs;
@@ -4144,7 +3974,7 @@ static void srp_add_one(struct ib_device *device)
srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
- return;
+ return -ENOMEM;
/*
* Use the smallest page size supported by the HCA, down to a
@@ -4162,23 +3992,15 @@ static void srp_add_one(struct ib_device *device)
srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
max_pages_per_mr);
- srp_dev->has_fmr = (device->ops.alloc_fmr &&
- device->ops.dealloc_fmr &&
- device->ops.map_phys_fmr &&
- device->ops.unmap_fmr);
srp_dev->has_fr = (attr->device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
- if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
- dev_warn(&device->dev, "neither FMR nor FR is supported\n");
- } else if (!never_register &&
- attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
- srp_dev->use_fast_reg = (srp_dev->has_fr &&
- (!srp_dev->has_fmr || prefer_fr));
- srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
- }
+ if (!never_register && !srp_dev->has_fr)
+ dev_warn(&device->dev, "FR is not supported\n");
+ else if (!never_register &&
+ attr->max_mr_size >= 2 * srp_dev->mr_page_size)
+ srp_dev->use_fast_reg = srp_dev->has_fr;
- if (never_register || !register_always ||
- (!srp_dev->has_fmr && !srp_dev->has_fr))
+ if (never_register || !register_always || !srp_dev->has_fr)
flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
if (srp_dev->use_fast_reg) {
@@ -4197,8 +4019,12 @@ static void srp_add_one(struct ib_device *device)
srp_dev->dev = device;
srp_dev->pd = ib_alloc_pd(device, flags);
- if (IS_ERR(srp_dev->pd))
- goto free_dev;
+ if (IS_ERR(srp_dev->pd)) {
+ int ret = PTR_ERR(srp_dev->pd);
+
+ kfree(srp_dev);
+ return ret;
+ }
if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
@@ -4212,10 +4038,7 @@ static void srp_add_one(struct ib_device *device)
}
ib_set_client_data(device, &srp_client, srp_dev);
- return;
-
-free_dev:
- kfree(srp_dev);
+ return 0;
}
static void srp_remove_one(struct ib_device *device, void *client_data)
@@ -4225,8 +4048,6 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
struct srp_target_port *target;
srp_dev = client_data;
- if (!srp_dev)
- return;
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
device_unregister(&host->dev);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 6fabcc2faf1f..6818cac0a3b7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -44,7 +44,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
-#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>
enum {
@@ -95,8 +94,7 @@ enum srp_iu_type {
/*
* @mr_page_mask: HCA memory registration page mask.
* @mr_page_size: HCA memory registration page size.
- * @mr_max_size: Maximum size in bytes of a single FMR / FR registration
- * request.
+ * @mr_max_size: Maximum size in bytes of a single FR registration request.
*/
struct srp_device {
struct list_head dev_list;
@@ -107,9 +105,7 @@ struct srp_device {
int mr_page_size;
int mr_max_size;
int max_pages_per_mr;
- bool has_fmr;
bool has_fr;
- bool use_fmr;
bool use_fast_reg;
};
@@ -127,11 +123,7 @@ struct srp_host {
struct srp_request {
struct scsi_cmnd *scmnd;
struct srp_iu *cmd;
- union {
- struct ib_pool_fmr **fmr_list;
- struct srp_fr_desc **fr_list;
- };
- u64 *map_page;
+ struct srp_fr_desc **fr_list;
struct srp_direct_buf *indirect_desc;
dma_addr_t indirect_dma_addr;
short nmdesc;
@@ -155,10 +147,7 @@ struct srp_rdma_ch {
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
struct ib_qp *qp;
- union {
- struct ib_fmr_pool *fmr_pool;
- struct srp_fr_pool *fr_pool;
- };
+ struct srp_fr_pool *fr_pool;
uint32_t max_it_iu_len;
uint32_t max_ti_iu_len;
u8 max_imm_sge;
@@ -319,20 +308,16 @@ struct srp_fr_pool {
* @pages: Array with DMA addresses of pages being considered for
* memory registration.
* @base_dma_addr: DMA address of the first page that has not yet been mapped.
- * @dma_len: Number of bytes that will be registered with the next
- * FMR or FR memory registration call.
+ * @dma_len: Number of bytes that will be registered with the next FR
+ * memory registration call.
* @total_len: Total number of bytes in the sg-list being mapped.
* @npages: Number of page addresses in the pages[] array.
- * @nmdesc: Number of FMR or FR memory descriptors used for mapping.
+ * @nmdesc: Number of FR memory descriptors used for mapping.
* @ndesc: Number of SRP buffer descriptors that have been filled in.
*/
struct srp_map_state {
union {
struct {
- struct ib_pool_fmr **next;
- struct ib_pool_fmr **end;
- } fmr;
- struct {
struct srp_fr_desc **next;
struct srp_fr_desc **end;
} fr;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 98552749d71c..ef7fcd3e8e15 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -81,7 +81,7 @@ MODULE_PARM_DESC(srpt_srq_size,
static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
{
- return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
+ return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
}
module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
0444);
@@ -135,14 +135,11 @@ static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
static void srpt_event_handler(struct ib_event_handler *handler,
struct ib_event *event)
{
- struct srpt_device *sdev;
+ struct srpt_device *sdev =
+ container_of(handler, struct srpt_device, event_handler);
struct srpt_port *sport;
u8 port_num;
- sdev = ib_get_client_data(event->device, &srpt_client);
- if (!sdev || sdev->device != event->device)
- return;
-
pr_debug("ASYNC event= %d on device= %s\n", event->event,
dev_name(&sdev->device->dev));
@@ -217,8 +214,9 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
*/
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{
- pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
- event->event, ch, ch->sess_name, ch->state);
+ pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
+ event->event, ch, ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
switch (event->event) {
case IB_EVENT_COMM_EST:
@@ -610,6 +608,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
dev_name(&sport->sdev->device->dev), sport->port,
PTR_ERR(sport->mad_agent));
sport->mad_agent = NULL;
+ memset(&port_modify, 0, sizeof(port_modify));
+ port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ ib_modify_port(sport->sdev->device, sport->port, 0,
+ &port_modify);
+
}
}
@@ -633,9 +636,8 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
WARN_ON(sport->port != i);
- if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
- pr_err("disabling MAD processing failed.\n");
if (sport->mad_agent) {
+ ib_modify_port(sdev->device, i, 0, &port_modify);
ib_unregister_mad_agent(sport->mad_agent);
sport->mad_agent = NULL;
}
@@ -1814,18 +1816,13 @@ retry:
*/
qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
qp_init->cap.max_rdma_ctxs = sq_size / 2;
- qp_init->cap.max_send_sge = min(attrs->max_send_sge,
- SRPT_MAX_SG_PER_WQE);
- qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
- SRPT_MAX_SG_PER_WQE);
+ qp_init->cap.max_send_sge = attrs->max_send_sge;
+ qp_init->cap.max_recv_sge = 1;
qp_init->port_num = ch->sport->port;
- if (sdev->use_srq) {
+ if (sdev->use_srq)
qp_init->srq = sdev->srq;
- } else {
+ else
qp_init->cap.max_recv_wr = ch->rq_size;
- qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
- SRPT_MAX_SG_PER_WQE);
- }
if (ch->using_rdma_cm) {
ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
@@ -1984,8 +1981,8 @@ static void __srpt_close_all_ch(struct srpt_port *sport)
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
if (srpt_disconnect_ch(ch) >= 0)
- pr_info("Closing channel %s because target %s_%d has been disabled\n",
- ch->sess_name,
+ pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
+ ch->sess_name, ch->qp->qp_num,
dev_name(&sport->sdev->device->dev),
sport->port);
srpt_close_ch(ch);
@@ -2496,7 +2493,8 @@ reject:
SRP_BUF_FORMAT_INDIRECT);
if (rdma_cm_id)
- rdma_reject(rdma_cm_id, rej, sizeof(*rej));
+ rdma_reject(rdma_cm_id, rej, sizeof(*rej),
+ IB_CM_REJ_CONSUMER_DEFINED);
else
ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
rej, sizeof(*rej));
@@ -3104,7 +3102,7 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
* srpt_add_one - InfiniBand device addition callback function
* @device: Describes a HCA.
*/
-static void srpt_add_one(struct ib_device *device)
+static int srpt_add_one(struct ib_device *device)
{
struct srpt_device *sdev;
struct srpt_port *sport;
@@ -3115,14 +3113,16 @@ static void srpt_add_one(struct ib_device *device)
sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
GFP_KERNEL);
if (!sdev)
- goto err;
+ return -ENOMEM;
sdev->device = device;
mutex_init(&sdev->sdev_mutex);
sdev->pd = ib_alloc_pd(device, 0);
- if (IS_ERR(sdev->pd))
+ if (IS_ERR(sdev->pd)) {
+ ret = PTR_ERR(sdev->pd);
goto free_dev;
+ }
sdev->lkey = sdev->pd->local_dma_lkey;
@@ -3138,6 +3138,7 @@ static void srpt_add_one(struct ib_device *device)
if (IS_ERR(sdev->cm_id)) {
pr_info("ib_create_cm_id() failed: %ld\n",
PTR_ERR(sdev->cm_id));
+ ret = PTR_ERR(sdev->cm_id);
sdev->cm_id = NULL;
if (!rdma_cm_id)
goto err_ring;
@@ -3182,7 +3183,8 @@ static void srpt_add_one(struct ib_device *device)
mutex_init(&sport->port_gid_id.mutex);
INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
- if (srpt_refresh_port(sport)) {
+ ret = srpt_refresh_port(sport);
+ if (ret) {
pr_err("MAD registration failed for %s-%d.\n",
dev_name(&sdev->device->dev), i);
goto err_event;
@@ -3193,10 +3195,9 @@ static void srpt_add_one(struct ib_device *device)
list_add_tail(&sdev->list, &srpt_dev_list);
spin_unlock(&srpt_dev_lock);
-out:
ib_set_client_data(device, &srpt_client, sdev);
pr_debug("added %s.\n", dev_name(&device->dev));
- return;
+ return 0;
err_event:
ib_unregister_event_handler(&sdev->event_handler);
@@ -3208,10 +3209,8 @@ err_ring:
ib_dealloc_pd(sdev->pd);
free_dev:
kfree(sdev);
-err:
- sdev = NULL;
pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
- goto out;
+ return ret;
}
/**
@@ -3224,12 +3223,6 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
struct srpt_device *sdev = client_data;
int i;
- if (!sdev) {
- pr_info("%s(%s): nothing to do.\n", __func__,
- dev_name(&device->dev));
- return;
- }
-
srpt_unregister_mad_agent(sdev);
ib_unregister_event_handler(&sdev->event_handler);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 2e1a69840857..f31c349d07a1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -105,11 +105,6 @@ enum {
SRP_CMD_ACA = 0x4,
SRPT_DEF_SG_TABLESIZE = 128,
- /*
- * An experimentally determined value that avoids that QP creation
- * fails due to "swiotlb buffer is full" on systems using the swiotlb.
- */
- SRPT_MAX_SG_PER_WQE = 16,
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 0d57e51b8ba1..e494295d1c7b 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -282,7 +282,8 @@ static void evdev_pass_values(struct evdev_client *client,
spin_unlock(&client->buffer_lock);
if (wakeup)
- wake_up_interruptible(&evdev->wait);
+ wake_up_interruptible_poll(&evdev->wait,
+ EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM);
}
/*
@@ -429,7 +430,7 @@ static void evdev_hangup(struct evdev *evdev)
kill_fasync(&client->fasync, SIGIO, POLL_HUP);
spin_unlock(&evdev->client_lock);
- wake_up_interruptible(&evdev->wait);
+ wake_up_interruptible_poll(&evdev->wait, EPOLLHUP | EPOLLERR);
}
static int evdev_release(struct inode *inode, struct file *file)
@@ -945,7 +946,7 @@ static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
client->revoked = true;
evdev_ungrab(evdev, client);
input_flush_device(&evdev->handle, file);
- wake_up_interruptible(&evdev->wait);
+ wake_up_interruptible_poll(&evdev->wait, EPOLLHUP | EPOLLERR);
return 0;
}
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 940b744639c7..6f73f02059b5 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -45,6 +45,7 @@ config JOYSTICK_A3D
config JOYSTICK_ADI
tristate "Logitech ADI digital joysticks and gamepads"
select GAMEPORT
+ depends on ADI!=m # avoid module name conflict
help
Say Y here if you have a Logitech controller using the ADI
protocol over the PC gameport.
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 28de965a08d5..793ecbbda32c 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -701,7 +701,7 @@ config KEYBOARD_SPEAR
Say Y here if you want to use the SPEAR keyboard.
To compile this driver as a module, choose M here: the
- module will be called spear-keboard.
+ module will be called spear-keyboard.
config KEYBOARD_TC3589X
tristate "TC3589X Keypad support"
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 7e3eae54c192..6ec28265771d 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -24,6 +24,7 @@
#include <linux/libps2.h>
#include <linux/mutex.h>
#include <linux/dmi.h>
+#include <linux/property.h>
#define DRIVER_DESC "AT and PS/2 keyboard driver"
@@ -63,6 +64,11 @@ static bool atkbd_terminal;
module_param_named(terminal, atkbd_terminal, bool, 0);
MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
+#define MAX_FUNCTION_ROW_KEYS 24
+
+#define SCANCODE(keymap) ((keymap >> 16) & 0xFFFF)
+#define KEYCODE(keymap) (keymap & 0xFFFF)
+
/*
* Scancode to keycode tables. These are just the default setting, and
* are loadable via a userland utility.
@@ -230,6 +236,9 @@ struct atkbd {
/* Serializes reconnect(), attr->set() and event work */
struct mutex mutex;
+
+ u32 function_row_physmap[MAX_FUNCTION_ROW_KEYS];
+ int num_function_row_keys;
};
/*
@@ -283,6 +292,7 @@ static struct device_attribute atkbd_attr_##_name = \
__ATTR(_name, S_IRUGO, atkbd_do_show_##_name, NULL);
ATKBD_DEFINE_RO_ATTR(err_count);
+ATKBD_DEFINE_RO_ATTR(function_row_physmap);
static struct attribute *atkbd_attributes[] = {
&atkbd_attr_extra.attr,
@@ -292,11 +302,42 @@ static struct attribute *atkbd_attributes[] = {
&atkbd_attr_softrepeat.attr,
&atkbd_attr_softraw.attr,
&atkbd_attr_err_count.attr,
+ &atkbd_attr_function_row_physmap.attr,
NULL
};
+static ssize_t atkbd_show_function_row_physmap(struct atkbd *atkbd, char *buf)
+{
+ ssize_t size = 0;
+ int i;
+
+ if (!atkbd->num_function_row_keys)
+ return 0;
+
+ for (i = 0; i < atkbd->num_function_row_keys; i++)
+ size += scnprintf(buf + size, PAGE_SIZE - size, "%02X ",
+ atkbd->function_row_physmap[i]);
+ size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+ return size;
+}
+
+static umode_t atkbd_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct serio *serio = to_serio_port(dev);
+ struct atkbd *atkbd = serio_get_drvdata(serio);
+
+ if (attr == &atkbd_attr_function_row_physmap.attr &&
+ !atkbd->num_function_row_keys)
+ return 0;
+
+ return attr->mode;
+}
+
static struct attribute_group atkbd_attribute_group = {
.attrs = atkbd_attributes,
+ .is_visible = atkbd_attr_is_visible,
};
static const unsigned int xl_table[] = {
@@ -994,6 +1035,39 @@ static unsigned int atkbd_oqo_01plus_scancode_fixup(struct atkbd *atkbd,
return code;
}
+static int atkbd_get_keymap_from_fwnode(struct atkbd *atkbd)
+{
+ struct device *dev = &atkbd->ps2dev.serio->dev;
+ int i, n;
+ u32 *ptr;
+ u16 scancode, keycode;
+
+ /* Parse "linux,keymap" property */
+ n = device_property_count_u32(dev, "linux,keymap");
+ if (n <= 0 || n > ATKBD_KEYMAP_SIZE)
+ return -ENXIO;
+
+ ptr = kcalloc(n, sizeof(u32), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ if (device_property_read_u32_array(dev, "linux,keymap", ptr, n)) {
+ dev_err(dev, "problem parsing FW keymap property\n");
+ kfree(ptr);
+ return -EINVAL;
+ }
+
+ memset(atkbd->keycode, 0, sizeof(atkbd->keycode));
+ for (i = 0; i < n; i++) {
+ scancode = SCANCODE(ptr[i]);
+ keycode = KEYCODE(ptr[i]);
+ atkbd->keycode[scancode] = keycode;
+ }
+
+ kfree(ptr);
+ return 0;
+}
+
/*
* atkbd_set_keycode_table() initializes keyboard's keycode table
* according to the selected scancode set
@@ -1001,13 +1075,16 @@ static unsigned int atkbd_oqo_01plus_scancode_fixup(struct atkbd *atkbd,
static void atkbd_set_keycode_table(struct atkbd *atkbd)
{
+ struct device *dev = &atkbd->ps2dev.serio->dev;
unsigned int scancode;
int i, j;
memset(atkbd->keycode, 0, sizeof(atkbd->keycode));
bitmap_zero(atkbd->force_release_mask, ATKBD_KEYMAP_SIZE);
- if (atkbd->translated) {
+ if (!atkbd_get_keymap_from_fwnode(atkbd)) {
+ dev_dbg(dev, "Using FW keymap\n");
+ } else if (atkbd->translated) {
for (i = 0; i < 128; i++) {
scancode = atkbd_unxlate_table[i];
atkbd->keycode[i] = atkbd_set2_keycode[scancode];
@@ -1121,6 +1198,22 @@ static void atkbd_set_device_attrs(struct atkbd *atkbd)
}
}
+static void atkbd_parse_fwnode_data(struct serio *serio)
+{
+ struct atkbd *atkbd = serio_get_drvdata(serio);
+ struct device *dev = &serio->dev;
+ int n;
+
+ /* Parse "function-row-physmap" property */
+ n = device_property_count_u32(dev, "function-row-physmap");
+ if (n > 0 && n <= MAX_FUNCTION_ROW_KEYS &&
+ !device_property_read_u32_array(dev, "function-row-physmap",
+ atkbd->function_row_physmap, n)) {
+ atkbd->num_function_row_keys = n;
+ dev_dbg(dev, "FW reported %d function-row key locations\n", n);
+ }
+}
+
/*
* atkbd_connect() is called when the serio module finds an interface
* that isn't handled yet by an appropriate device driver. We check if
@@ -1184,6 +1277,8 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
atkbd->id = 0xab00;
}
+ atkbd_parse_fwnode_data(serio);
+
atkbd_set_keycode_table(atkbd);
atkbd_set_device_attrs(atkbd);
diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c
index 9f809aeb785c..d18839f1f4f6 100644
--- a/drivers/input/keyboard/imx_sc_key.c
+++ b/drivers/input/keyboard/imx_sc_key.c
@@ -99,6 +99,15 @@ static void imx_sc_check_for_events(struct work_struct *work)
msecs_to_jiffies(REPEAT_INTERVAL));
}
+static void imx_sc_key_action(void *data)
+{
+ struct imx_key_drv_data *priv = data;
+
+ imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON, false);
+ imx_scu_irq_unregister_notifier(&priv->key_notifier);
+ cancel_delayed_work_sync(&priv->check_work);
+}
+
static int imx_sc_key_probe(struct platform_device *pdev)
{
struct imx_key_drv_data *priv;
@@ -149,27 +158,16 @@ static int imx_sc_key_probe(struct platform_device *pdev)
return error;
}
+ error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, &priv);
+ if (error)
+ return error;
+
priv->key_notifier.notifier_call = imx_sc_key_notify;
error = imx_scu_irq_register_notifier(&priv->key_notifier);
- if (error) {
- imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON,
- false);
+ if (error)
dev_err(&pdev->dev, "failed to register scu notifier\n");
- return error;
- }
-
- return 0;
-}
-
-static int imx_sc_key_remove(struct platform_device *pdev)
-{
- struct imx_key_drv_data *priv = platform_get_drvdata(pdev);
-
- imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON, false);
- imx_scu_irq_unregister_notifier(&priv->key_notifier);
- cancel_delayed_work_sync(&priv->check_work);
- return 0;
+ return error;
}
static const struct of_device_id imx_sc_key_ids[] = {
@@ -184,7 +182,6 @@ static struct platform_driver imx_sc_key_driver = {
.of_match_table = imx_sc_key_ids,
},
.probe = imx_sc_key_probe,
- .remove = imx_sc_key_remove,
};
module_platform_driver(imx_sc_key_driver);
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 21758767ccf0..9b0f9665dcb0 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -374,5 +374,5 @@ static void __exit tca6416_keypad_exit(void)
module_exit(tca6416_keypad_exit);
MODULE_AUTHOR("Sriramakrishnan <srk@ti.com>");
-MODULE_DESCRIPTION("Keypad driver over tca6146 IO expander");
+MODULE_DESCRIPTION("Keypad driver over tca6416 IO expander");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7e2e658d551c..362e8a01980c 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -117,16 +117,6 @@ config INPUT_E3X0_BUTTON
To compile this driver as a module, choose M here: the
module will be called e3x0_button.
-config INPUT_MSM_VIBRATOR
- tristate "Qualcomm MSM vibrator driver"
- select INPUT_FF_MEMLESS
- help
- Support for the vibrator that is found on various Qualcomm MSM
- SOCs.
-
- To compile this driver as a module, choose M here: the module
- will be called msm_vibrator.
-
config INPUT_PCSPKR
tristate "PC Speaker support"
depends on PCSPKR_PLATFORM
@@ -265,17 +255,6 @@ config INPUT_APANEL
To compile this driver as a module, choose M here: the module will
be called apanel.
-config INPUT_GP2A
- tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
- depends on I2C
- depends on GPIOLIB || COMPILE_TEST
- help
- Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
- hooked to an I2C bus.
-
- To compile this driver as a module, choose M here: the
- module will be called gp2ap002a00f.
-
config INPUT_GPIO_BEEPER
tristate "Generic GPIO Beeper support"
depends on GPIOLIB || COMPILE_TEST
@@ -739,6 +718,17 @@ config INPUT_IMS_PCU
To compile this driver as a module, choose M here: the module will be
called ims_pcu.
+config INPUT_IQS269A
+ tristate "Azoteq IQS269A capacitive touch controller"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y to enable support for the Azoteq IQS269A capacitive
+ touch controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iqs269a.
+
config INPUT_CMA3000
tristate "VTI CMA3000 Tri-axis accelerometer"
help
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 8fd187f314bd..a48e5f2d859d 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -33,13 +33,13 @@ obj-$(CONFIG_INPUT_E3X0_BUTTON) += e3x0-button.o
obj-$(CONFIG_INPUT_DRV260X_HAPTICS) += drv260x.o
obj-$(CONFIG_INPUT_DRV2665_HAPTICS) += drv2665.o
obj-$(CONFIG_INPUT_DRV2667_HAPTICS) += drv2667.o
-obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o
obj-$(CONFIG_INPUT_GPIO_DECODER) += gpio_decoder.o
obj-$(CONFIG_INPUT_GPIO_VIBRA) += gpio-vibra.o
obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
+obj-$(CONFIG_INPUT_IQS269A) += iqs269a.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
@@ -50,7 +50,6 @@ obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
obj-$(CONFIG_INPUT_MAX8997_HAPTIC) += max8997_haptic.o
obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o
obj-$(CONFIG_INPUT_MMA8450) += mma8450.o
-obj-$(CONFIG_INPUT_MSM_VIBRATOR) += msm-vibrator.o
obj-$(CONFIG_INPUT_PALMAS_PWRBUTTON) += palmas-pwrbutton.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
deleted file mode 100644
index 90abda8eea67..000000000000
--- a/drivers/input/misc/gp2ap002a00f.c
+++ /dev/null
@@ -1,281 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Sony Ericsson Mobile Communications Inc.
- *
- * Author: Courtney Cavin <courtney.cavin@sonyericsson.com>
- * Prepared for up-stream by: Oskar Andero <oskar.andero@sonyericsson.com>
- */
-
-#include <linux/i2c.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/input.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <linux/input/gp2ap002a00f.h>
-
-struct gp2a_data {
- struct input_dev *input;
- const struct gp2a_platform_data *pdata;
- struct i2c_client *i2c_client;
-};
-
-enum gp2a_addr {
- GP2A_ADDR_PROX = 0x0,
- GP2A_ADDR_GAIN = 0x1,
- GP2A_ADDR_HYS = 0x2,
- GP2A_ADDR_CYCLE = 0x3,
- GP2A_ADDR_OPMOD = 0x4,
- GP2A_ADDR_CON = 0x6
-};
-
-enum gp2a_controls {
- /* Software Shutdown control: 0 = shutdown, 1 = normal operation */
- GP2A_CTRL_SSD = 0x01
-};
-
-static int gp2a_report(struct gp2a_data *dt)
-{
- int vo = gpio_get_value(dt->pdata->vout_gpio);
-
- input_report_switch(dt->input, SW_FRONT_PROXIMITY, !vo);
- input_sync(dt->input);
-
- return 0;
-}
-
-static irqreturn_t gp2a_irq(int irq, void *handle)
-{
- struct gp2a_data *dt = handle;
-
- gp2a_report(dt);
-
- return IRQ_HANDLED;
-}
-
-static int gp2a_enable(struct gp2a_data *dt)
-{
- return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
- GP2A_CTRL_SSD);
-}
-
-static int gp2a_disable(struct gp2a_data *dt)
-{
- return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
- 0x00);
-}
-
-static int gp2a_device_open(struct input_dev *dev)
-{
- struct gp2a_data *dt = input_get_drvdata(dev);
- int error;
-
- error = gp2a_enable(dt);
- if (error < 0) {
- dev_err(&dt->i2c_client->dev,
- "unable to activate, err %d\n", error);
- return error;
- }
-
- gp2a_report(dt);
-
- return 0;
-}
-
-static void gp2a_device_close(struct input_dev *dev)
-{
- struct gp2a_data *dt = input_get_drvdata(dev);
- int error;
-
- error = gp2a_disable(dt);
- if (error < 0)
- dev_err(&dt->i2c_client->dev,
- "unable to deactivate, err %d\n", error);
-}
-
-static int gp2a_initialize(struct gp2a_data *dt)
-{
- int error;
-
- error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_GAIN,
- 0x08);
- if (error < 0)
- return error;
-
- error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_HYS,
- 0xc2);
- if (error < 0)
- return error;
-
- error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_CYCLE,
- 0x04);
- if (error < 0)
- return error;
-
- error = gp2a_disable(dt);
-
- return error;
-}
-
-static int gp2a_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- const struct gp2a_platform_data *pdata = dev_get_platdata(&client->dev);
- struct gp2a_data *dt;
- int error;
-
- if (!pdata)
- return -EINVAL;
-
- if (pdata->hw_setup) {
- error = pdata->hw_setup(client);
- if (error < 0)
- return error;
- }
-
- error = gpio_request_one(pdata->vout_gpio, GPIOF_IN, GP2A_I2C_NAME);
- if (error)
- goto err_hw_shutdown;
-
- dt = kzalloc(sizeof(struct gp2a_data), GFP_KERNEL);
- if (!dt) {
- error = -ENOMEM;
- goto err_free_gpio;
- }
-
- dt->pdata = pdata;
- dt->i2c_client = client;
-
- error = gp2a_initialize(dt);
- if (error < 0)
- goto err_free_mem;
-
- dt->input = input_allocate_device();
- if (!dt->input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
-
- input_set_drvdata(dt->input, dt);
-
- dt->input->open = gp2a_device_open;
- dt->input->close = gp2a_device_close;
- dt->input->name = GP2A_I2C_NAME;
- dt->input->id.bustype = BUS_I2C;
- dt->input->dev.parent = &client->dev;
-
- input_set_capability(dt->input, EV_SW, SW_FRONT_PROXIMITY);
-
- error = request_threaded_irq(client->irq, NULL, gp2a_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- GP2A_I2C_NAME, dt);
- if (error) {
- dev_err(&client->dev, "irq request failed\n");
- goto err_free_input_dev;
- }
-
- error = input_register_device(dt->input);
- if (error) {
- dev_err(&client->dev, "device registration failed\n");
- goto err_free_irq;
- }
-
- device_init_wakeup(&client->dev, pdata->wakeup);
- i2c_set_clientdata(client, dt);
-
- return 0;
-
-err_free_irq:
- free_irq(client->irq, dt);
-err_free_input_dev:
- input_free_device(dt->input);
-err_free_mem:
- kfree(dt);
-err_free_gpio:
- gpio_free(pdata->vout_gpio);
-err_hw_shutdown:
- if (pdata->hw_shutdown)
- pdata->hw_shutdown(client);
- return error;
-}
-
-static int gp2a_remove(struct i2c_client *client)
-{
- struct gp2a_data *dt = i2c_get_clientdata(client);
- const struct gp2a_platform_data *pdata = dt->pdata;
-
- free_irq(client->irq, dt);
-
- input_unregister_device(dt->input);
- kfree(dt);
-
- gpio_free(pdata->vout_gpio);
-
- if (pdata->hw_shutdown)
- pdata->hw_shutdown(client);
-
- return 0;
-}
-
-static int __maybe_unused gp2a_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct gp2a_data *dt = i2c_get_clientdata(client);
- int retval = 0;
-
- if (device_may_wakeup(&client->dev)) {
- enable_irq_wake(client->irq);
- } else {
- mutex_lock(&dt->input->mutex);
- if (dt->input->users)
- retval = gp2a_disable(dt);
- mutex_unlock(&dt->input->mutex);
- }
-
- return retval;
-}
-
-static int __maybe_unused gp2a_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct gp2a_data *dt = i2c_get_clientdata(client);
- int retval = 0;
-
- if (device_may_wakeup(&client->dev)) {
- disable_irq_wake(client->irq);
- } else {
- mutex_lock(&dt->input->mutex);
- if (dt->input->users)
- retval = gp2a_enable(dt);
- mutex_unlock(&dt->input->mutex);
- }
-
- return retval;
-}
-
-static SIMPLE_DEV_PM_OPS(gp2a_pm, gp2a_suspend, gp2a_resume);
-
-static const struct i2c_device_id gp2a_i2c_id[] = {
- { GP2A_I2C_NAME, 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, gp2a_i2c_id);
-
-static struct i2c_driver gp2a_i2c_driver = {
- .driver = {
- .name = GP2A_I2C_NAME,
- .pm = &gp2a_pm,
- },
- .probe = gp2a_probe,
- .remove = gp2a_remove,
- .id_table = gp2a_i2c_id,
-};
-
-module_i2c_driver(gp2a_i2c_driver);
-
-MODULE_AUTHOR("Courtney Cavin <courtney.cavin@sonyericsson.com>");
-MODULE_DESCRIPTION("Sharp GP2AP002A00F I2C Proximity/Opto sensor driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c
new file mode 100644
index 000000000000..6699eb160a0f
--- /dev/null
+++ b/drivers/input/misc/iqs269a.c
@@ -0,0 +1,1833 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS269A Capacitive Touch Controller
+ *
+ * Copyright (C) 2020 Jeff LaBundy <jeff@labundy.com>
+ *
+ * This driver registers up to 3 input devices: one representing capacitive or
+ * inductive keys as well as Hall-effect switches, and one for each of the two
+ * axial sliders presented by the device.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define IQS269_VER_INFO 0x00
+#define IQS269_VER_INFO_PROD_NUM 0x4F
+
+#define IQS269_SYS_FLAGS 0x02
+#define IQS269_SYS_FLAGS_SHOW_RESET BIT(15)
+#define IQS269_SYS_FLAGS_PWR_MODE_MASK GENMASK(12, 11)
+#define IQS269_SYS_FLAGS_PWR_MODE_SHIFT 11
+#define IQS269_SYS_FLAGS_IN_ATI BIT(10)
+
+#define IQS269_CHx_COUNTS 0x08
+
+#define IQS269_SLIDER_X 0x30
+
+#define IQS269_CAL_DATA_A 0x35
+#define IQS269_CAL_DATA_A_HALL_BIN_L_MASK GENMASK(15, 12)
+#define IQS269_CAL_DATA_A_HALL_BIN_L_SHIFT 12
+#define IQS269_CAL_DATA_A_HALL_BIN_R_MASK GENMASK(11, 8)
+#define IQS269_CAL_DATA_A_HALL_BIN_R_SHIFT 8
+
+#define IQS269_SYS_SETTINGS 0x80
+#define IQS269_SYS_SETTINGS_CLK_DIV BIT(15)
+#define IQS269_SYS_SETTINGS_ULP_AUTO BIT(14)
+#define IQS269_SYS_SETTINGS_DIS_AUTO BIT(13)
+#define IQS269_SYS_SETTINGS_PWR_MODE_MASK GENMASK(12, 11)
+#define IQS269_SYS_SETTINGS_PWR_MODE_SHIFT 11
+#define IQS269_SYS_SETTINGS_PWR_MODE_MAX 3
+#define IQS269_SYS_SETTINGS_ULP_UPDATE_MASK GENMASK(10, 8)
+#define IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT 8
+#define IQS269_SYS_SETTINGS_ULP_UPDATE_MAX 7
+#define IQS269_SYS_SETTINGS_RESEED_OFFSET BIT(6)
+#define IQS269_SYS_SETTINGS_EVENT_MODE BIT(5)
+#define IQS269_SYS_SETTINGS_EVENT_MODE_LP BIT(4)
+#define IQS269_SYS_SETTINGS_REDO_ATI BIT(2)
+#define IQS269_SYS_SETTINGS_ACK_RESET BIT(0)
+
+#define IQS269_FILT_STR_LP_LTA_MASK GENMASK(7, 6)
+#define IQS269_FILT_STR_LP_LTA_SHIFT 6
+#define IQS269_FILT_STR_LP_CNT_MASK GENMASK(5, 4)
+#define IQS269_FILT_STR_LP_CNT_SHIFT 4
+#define IQS269_FILT_STR_NP_LTA_MASK GENMASK(3, 2)
+#define IQS269_FILT_STR_NP_LTA_SHIFT 2
+#define IQS269_FILT_STR_NP_CNT_MASK GENMASK(1, 0)
+#define IQS269_FILT_STR_MAX 3
+
+#define IQS269_EVENT_MASK_SYS BIT(6)
+#define IQS269_EVENT_MASK_DEEP BIT(2)
+#define IQS269_EVENT_MASK_TOUCH BIT(1)
+#define IQS269_EVENT_MASK_PROX BIT(0)
+
+#define IQS269_RATE_NP_MS_MAX 255
+#define IQS269_RATE_LP_MS_MAX 255
+#define IQS269_RATE_ULP_MS_MAX 4080
+#define IQS269_TIMEOUT_PWR_MS_MAX 130560
+#define IQS269_TIMEOUT_LTA_MS_MAX 130560
+
+#define IQS269_MISC_A_ATI_BAND_DISABLE BIT(15)
+#define IQS269_MISC_A_ATI_LP_ONLY BIT(14)
+#define IQS269_MISC_A_ATI_BAND_TIGHTEN BIT(13)
+#define IQS269_MISC_A_FILT_DISABLE BIT(12)
+#define IQS269_MISC_A_GPIO3_SELECT_MASK GENMASK(10, 8)
+#define IQS269_MISC_A_GPIO3_SELECT_SHIFT 8
+#define IQS269_MISC_A_DUAL_DIR BIT(6)
+#define IQS269_MISC_A_TX_FREQ_MASK GENMASK(5, 4)
+#define IQS269_MISC_A_TX_FREQ_SHIFT 4
+#define IQS269_MISC_A_TX_FREQ_MAX 3
+#define IQS269_MISC_A_GLOBAL_CAP_SIZE BIT(0)
+
+#define IQS269_MISC_B_RESEED_UI_SEL_MASK GENMASK(7, 6)
+#define IQS269_MISC_B_RESEED_UI_SEL_SHIFT 6
+#define IQS269_MISC_B_RESEED_UI_SEL_MAX 3
+#define IQS269_MISC_B_TRACKING_UI_ENABLE BIT(4)
+#define IQS269_MISC_B_FILT_STR_SLIDER GENMASK(1, 0)
+
+#define IQS269_CHx_SETTINGS 0x8C
+
+#define IQS269_CHx_ENG_A_MEAS_CAP_SIZE BIT(15)
+#define IQS269_CHx_ENG_A_RX_GND_INACTIVE BIT(13)
+#define IQS269_CHx_ENG_A_LOCAL_CAP_SIZE BIT(12)
+#define IQS269_CHx_ENG_A_ATI_MODE_MASK GENMASK(9, 8)
+#define IQS269_CHx_ENG_A_ATI_MODE_SHIFT 8
+#define IQS269_CHx_ENG_A_ATI_MODE_MAX 3
+#define IQS269_CHx_ENG_A_INV_LOGIC BIT(7)
+#define IQS269_CHx_ENG_A_PROJ_BIAS_MASK GENMASK(6, 5)
+#define IQS269_CHx_ENG_A_PROJ_BIAS_SHIFT 5
+#define IQS269_CHx_ENG_A_PROJ_BIAS_MAX 3
+#define IQS269_CHx_ENG_A_SENSE_MODE_MASK GENMASK(3, 0)
+#define IQS269_CHx_ENG_A_SENSE_MODE_MAX 15
+
+#define IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE BIT(13)
+#define IQS269_CHx_ENG_B_SENSE_FREQ_MASK GENMASK(10, 9)
+#define IQS269_CHx_ENG_B_SENSE_FREQ_SHIFT 9
+#define IQS269_CHx_ENG_B_SENSE_FREQ_MAX 3
+#define IQS269_CHx_ENG_B_STATIC_ENABLE BIT(8)
+#define IQS269_CHx_ENG_B_ATI_BASE_MASK GENMASK(7, 6)
+#define IQS269_CHx_ENG_B_ATI_BASE_75 0x00
+#define IQS269_CHx_ENG_B_ATI_BASE_100 0x40
+#define IQS269_CHx_ENG_B_ATI_BASE_150 0x80
+#define IQS269_CHx_ENG_B_ATI_BASE_200 0xC0
+#define IQS269_CHx_ENG_B_ATI_TARGET_MASK GENMASK(5, 0)
+#define IQS269_CHx_ENG_B_ATI_TARGET_MAX 2016
+
+#define IQS269_CHx_WEIGHT_MAX 255
+#define IQS269_CHx_THRESH_MAX 255
+#define IQS269_CHx_HYST_DEEP_MASK GENMASK(7, 4)
+#define IQS269_CHx_HYST_DEEP_SHIFT 4
+#define IQS269_CHx_HYST_TOUCH_MASK GENMASK(3, 0)
+#define IQS269_CHx_HYST_MAX 15
+
+#define IQS269_CHx_HALL_INACTIVE 6
+#define IQS269_CHx_HALL_ACTIVE 7
+
+#define IQS269_HALL_PAD_R BIT(0)
+#define IQS269_HALL_PAD_L BIT(1)
+#define IQS269_HALL_PAD_INV BIT(6)
+
+#define IQS269_HALL_UI 0xF5
+#define IQS269_HALL_UI_ENABLE BIT(15)
+
+#define IQS269_MAX_REG 0xFF
+
+#define IQS269_NUM_CH 8
+#define IQS269_NUM_SL 2
+
+#define IQS269_ATI_POLL_SLEEP_US (iqs269->delay_mult * 10000)
+#define IQS269_ATI_POLL_TIMEOUT_US (iqs269->delay_mult * 500000)
+#define IQS269_ATI_STABLE_DELAY_MS (iqs269->delay_mult * 150)
+
+#define IQS269_PWR_MODE_POLL_SLEEP_US IQS269_ATI_POLL_SLEEP_US
+#define IQS269_PWR_MODE_POLL_TIMEOUT_US IQS269_ATI_POLL_TIMEOUT_US
+
+#define iqs269_irq_wait() usleep_range(100, 150)
+
+enum iqs269_local_cap_size {
+ IQS269_LOCAL_CAP_SIZE_0,
+ IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY,
+ IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5,
+};
+
+enum iqs269_st_offs {
+ IQS269_ST_OFFS_PROX,
+ IQS269_ST_OFFS_DIR,
+ IQS269_ST_OFFS_TOUCH,
+ IQS269_ST_OFFS_DEEP,
+};
+
+enum iqs269_th_offs {
+ IQS269_TH_OFFS_PROX,
+ IQS269_TH_OFFS_TOUCH,
+ IQS269_TH_OFFS_DEEP,
+};
+
+enum iqs269_event_id {
+ IQS269_EVENT_PROX_DN,
+ IQS269_EVENT_PROX_UP,
+ IQS269_EVENT_TOUCH_DN,
+ IQS269_EVENT_TOUCH_UP,
+ IQS269_EVENT_DEEP_DN,
+ IQS269_EVENT_DEEP_UP,
+};
+
+struct iqs269_switch_desc {
+ unsigned int code;
+ bool enabled;
+};
+
+struct iqs269_event_desc {
+ const char *name;
+ enum iqs269_st_offs st_offs;
+ enum iqs269_th_offs th_offs;
+ bool dir_up;
+ u8 mask;
+};
+
+static const struct iqs269_event_desc iqs269_events[] = {
+ [IQS269_EVENT_PROX_DN] = {
+ .name = "event-prox",
+ .st_offs = IQS269_ST_OFFS_PROX,
+ .th_offs = IQS269_TH_OFFS_PROX,
+ .mask = IQS269_EVENT_MASK_PROX,
+ },
+ [IQS269_EVENT_PROX_UP] = {
+ .name = "event-prox-alt",
+ .st_offs = IQS269_ST_OFFS_PROX,
+ .th_offs = IQS269_TH_OFFS_PROX,
+ .dir_up = true,
+ .mask = IQS269_EVENT_MASK_PROX,
+ },
+ [IQS269_EVENT_TOUCH_DN] = {
+ .name = "event-touch",
+ .st_offs = IQS269_ST_OFFS_TOUCH,
+ .th_offs = IQS269_TH_OFFS_TOUCH,
+ .mask = IQS269_EVENT_MASK_TOUCH,
+ },
+ [IQS269_EVENT_TOUCH_UP] = {
+ .name = "event-touch-alt",
+ .st_offs = IQS269_ST_OFFS_TOUCH,
+ .th_offs = IQS269_TH_OFFS_TOUCH,
+ .dir_up = true,
+ .mask = IQS269_EVENT_MASK_TOUCH,
+ },
+ [IQS269_EVENT_DEEP_DN] = {
+ .name = "event-deep",
+ .st_offs = IQS269_ST_OFFS_DEEP,
+ .th_offs = IQS269_TH_OFFS_DEEP,
+ .mask = IQS269_EVENT_MASK_DEEP,
+ },
+ [IQS269_EVENT_DEEP_UP] = {
+ .name = "event-deep-alt",
+ .st_offs = IQS269_ST_OFFS_DEEP,
+ .th_offs = IQS269_TH_OFFS_DEEP,
+ .dir_up = true,
+ .mask = IQS269_EVENT_MASK_DEEP,
+ },
+};
+
+struct iqs269_ver_info {
+ u8 prod_num;
+ u8 sw_num;
+ u8 hw_num;
+ u8 padding;
+} __packed;
+
+struct iqs269_sys_reg {
+ __be16 general;
+ u8 active;
+ u8 filter;
+ u8 reseed;
+ u8 event_mask;
+ u8 rate_np;
+ u8 rate_lp;
+ u8 rate_ulp;
+ u8 timeout_pwr;
+ u8 timeout_rdy;
+ u8 timeout_lta;
+ __be16 misc_a;
+ __be16 misc_b;
+ u8 blocking;
+ u8 padding;
+ u8 slider_select[IQS269_NUM_SL];
+ u8 timeout_tap;
+ u8 timeout_swipe;
+ u8 thresh_swipe;
+ u8 redo_ati;
+} __packed;
+
+struct iqs269_ch_reg {
+ u8 rx_enable;
+ u8 tx_enable;
+ __be16 engine_a;
+ __be16 engine_b;
+ __be16 ati_comp;
+ u8 thresh[3];
+ u8 hyst;
+ u8 assoc_select;
+ u8 assoc_weight;
+} __packed;
+
+struct iqs269_flags {
+ __be16 system;
+ u8 gesture;
+ u8 padding;
+ u8 states[4];
+} __packed;
+
+struct iqs269_private {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct mutex lock;
+ struct iqs269_switch_desc switches[ARRAY_SIZE(iqs269_events)];
+ struct iqs269_ch_reg ch_reg[IQS269_NUM_CH];
+ struct iqs269_sys_reg sys_reg;
+ struct input_dev *keypad;
+ struct input_dev *slider[IQS269_NUM_SL];
+ unsigned int keycode[ARRAY_SIZE(iqs269_events) * IQS269_NUM_CH];
+ unsigned int suspend_mode;
+ unsigned int delay_mult;
+ unsigned int ch_num;
+ bool hall_enable;
+ bool ati_current;
+};
+
+static int iqs269_ati_mode_set(struct iqs269_private *iqs269,
+ unsigned int ch_num, unsigned int mode)
+{
+ u16 engine_a;
+
+ if (ch_num >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ if (mode > IQS269_CHx_ENG_A_ATI_MODE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&iqs269->lock);
+
+ engine_a = be16_to_cpu(iqs269->ch_reg[ch_num].engine_a);
+
+ engine_a &= ~IQS269_CHx_ENG_A_ATI_MODE_MASK;
+ engine_a |= (mode << IQS269_CHx_ENG_A_ATI_MODE_SHIFT);
+
+ iqs269->ch_reg[ch_num].engine_a = cpu_to_be16(engine_a);
+ iqs269->ati_current = false;
+
+ mutex_unlock(&iqs269->lock);
+
+ return 0;
+}
+
+static int iqs269_ati_mode_get(struct iqs269_private *iqs269,
+ unsigned int ch_num, unsigned int *mode)
+{
+ u16 engine_a;
+
+ if (ch_num >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ mutex_lock(&iqs269->lock);
+ engine_a = be16_to_cpu(iqs269->ch_reg[ch_num].engine_a);
+ mutex_unlock(&iqs269->lock);
+
+ engine_a &= IQS269_CHx_ENG_A_ATI_MODE_MASK;
+ *mode = (engine_a >> IQS269_CHx_ENG_A_ATI_MODE_SHIFT);
+
+ return 0;
+}
+
+static int iqs269_ati_base_set(struct iqs269_private *iqs269,
+ unsigned int ch_num, unsigned int base)
+{
+ u16 engine_b;
+
+ if (ch_num >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ switch (base) {
+ case 75:
+ base = IQS269_CHx_ENG_B_ATI_BASE_75;
+ break;
+
+ case 100:
+ base = IQS269_CHx_ENG_B_ATI_BASE_100;
+ break;
+
+ case 150:
+ base = IQS269_CHx_ENG_B_ATI_BASE_150;
+ break;
+
+ case 200:
+ base = IQS269_CHx_ENG_B_ATI_BASE_200;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&iqs269->lock);
+
+ engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b);
+
+ engine_b &= ~IQS269_CHx_ENG_B_ATI_BASE_MASK;
+ engine_b |= base;
+
+ iqs269->ch_reg[ch_num].engine_b = cpu_to_be16(engine_b);
+ iqs269->ati_current = false;
+
+ mutex_unlock(&iqs269->lock);
+
+ return 0;
+}
+
+static int iqs269_ati_base_get(struct iqs269_private *iqs269,
+ unsigned int ch_num, unsigned int *base)
+{
+ u16 engine_b;
+
+ if (ch_num >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ mutex_lock(&iqs269->lock);
+ engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b);
+ mutex_unlock(&iqs269->lock);
+
+ switch (engine_b & IQS269_CHx_ENG_B_ATI_BASE_MASK) {
+ case IQS269_CHx_ENG_B_ATI_BASE_75:
+ *base = 75;
+ return 0;
+
+ case IQS269_CHx_ENG_B_ATI_BASE_100:
+ *base = 100;
+ return 0;
+
+ case IQS269_CHx_ENG_B_ATI_BASE_150:
+ *base = 150;
+ return 0;
+
+ case IQS269_CHx_ENG_B_ATI_BASE_200:
+ *base = 200;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int iqs269_ati_target_set(struct iqs269_private *iqs269,
+ unsigned int ch_num, unsigned int target)
+{
+ u16 engine_b;
+
+ if (ch_num >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ if (target > IQS269_CHx_ENG_B_ATI_TARGET_MAX)
+ return -EINVAL;
+
+ mutex_lock(&iqs269->lock);
+
+ engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b);
+
+ engine_b &= ~IQS269_CHx_ENG_B_ATI_TARGET_MASK;
+ engine_b |= target / 32;
+
+ iqs269->ch_reg[ch_num].engine_b = cpu_to_be16(engine_b);
+ iqs269->ati_current = false;
+
+ mutex_unlock(&iqs269->lock);
+
+ return 0;
+}
+
+static int iqs269_ati_target_get(struct iqs269_private *iqs269,
+ unsigned int ch_num, unsigned int *target)
+{
+ u16 engine_b;
+
+ if (ch_num >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ mutex_lock(&iqs269->lock);
+ engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b);
+ mutex_unlock(&iqs269->lock);
+
+ *target = (engine_b & IQS269_CHx_ENG_B_ATI_TARGET_MASK) * 32;
+
+ return 0;
+}
+
+static int iqs269_parse_mask(const struct fwnode_handle *fwnode,
+ const char *propname, u8 *mask)
+{
+ unsigned int val[IQS269_NUM_CH];
+ int count, error, i;
+
+ count = fwnode_property_count_u32(fwnode, propname);
+ if (count < 0)
+ return 0;
+
+ if (count > IQS269_NUM_CH)
+ return -EINVAL;
+
+ error = fwnode_property_read_u32_array(fwnode, propname, val, count);
+ if (error)
+ return error;
+
+ *mask = 0;
+
+ for (i = 0; i < count; i++) {
+ if (val[i] >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ *mask |= BIT(val[i]);
+ }
+
+ return 0;
+}
+
+static int iqs269_parse_chan(struct iqs269_private *iqs269,
+ const struct fwnode_handle *ch_node)
+{
+ struct i2c_client *client = iqs269->client;
+ struct fwnode_handle *ev_node;
+ struct iqs269_ch_reg *ch_reg;
+ u16 engine_a, engine_b;
+ unsigned int reg, val;
+ int error, i;
+
+ error = fwnode_property_read_u32(ch_node, "reg", &reg);
+ if (error) {
+ dev_err(&client->dev, "Failed to read channel number: %d\n",
+ error);
+ return error;
+ } else if (reg >= IQS269_NUM_CH) {
+ dev_err(&client->dev, "Invalid channel number: %u\n", reg);
+ return -EINVAL;
+ }
+
+ iqs269->sys_reg.active |= BIT(reg);
+ if (!fwnode_property_present(ch_node, "azoteq,reseed-disable"))
+ iqs269->sys_reg.reseed |= BIT(reg);
+
+ if (fwnode_property_present(ch_node, "azoteq,blocking-enable"))
+ iqs269->sys_reg.blocking |= BIT(reg);
+
+ if (fwnode_property_present(ch_node, "azoteq,slider0-select"))
+ iqs269->sys_reg.slider_select[0] |= BIT(reg);
+
+ if (fwnode_property_present(ch_node, "azoteq,slider1-select"))
+ iqs269->sys_reg.slider_select[1] |= BIT(reg);
+
+ ch_reg = &iqs269->ch_reg[reg];
+
+ error = regmap_raw_read(iqs269->regmap,
+ IQS269_CHx_SETTINGS + reg * sizeof(*ch_reg) / 2,
+ ch_reg, sizeof(*ch_reg));
+ if (error)
+ return error;
+
+ error = iqs269_parse_mask(ch_node, "azoteq,rx-enable",
+ &ch_reg->rx_enable);
+ if (error) {
+ dev_err(&client->dev, "Invalid channel %u RX enable mask: %d\n",
+ reg, error);
+ return error;
+ }
+
+ error = iqs269_parse_mask(ch_node, "azoteq,tx-enable",
+ &ch_reg->tx_enable);
+ if (error) {
+ dev_err(&client->dev, "Invalid channel %u TX enable mask: %d\n",
+ reg, error);
+ return error;
+ }
+
+ engine_a = be16_to_cpu(ch_reg->engine_a);
+ engine_b = be16_to_cpu(ch_reg->engine_b);
+
+ engine_a |= IQS269_CHx_ENG_A_MEAS_CAP_SIZE;
+ if (fwnode_property_present(ch_node, "azoteq,meas-cap-decrease"))
+ engine_a &= ~IQS269_CHx_ENG_A_MEAS_CAP_SIZE;
+
+ engine_a |= IQS269_CHx_ENG_A_RX_GND_INACTIVE;
+ if (fwnode_property_present(ch_node, "azoteq,rx-float-inactive"))
+ engine_a &= ~IQS269_CHx_ENG_A_RX_GND_INACTIVE;
+
+ engine_a &= ~IQS269_CHx_ENG_A_LOCAL_CAP_SIZE;
+ engine_b &= ~IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE;
+ if (!fwnode_property_read_u32(ch_node, "azoteq,local-cap-size", &val)) {
+ switch (val) {
+ case IQS269_LOCAL_CAP_SIZE_0:
+ break;
+
+ case IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5:
+ engine_a |= IQS269_CHx_ENG_A_LOCAL_CAP_SIZE;
+
+ /* fall through */
+
+ case IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY:
+ engine_b |= IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE;
+ break;
+
+ default:
+ dev_err(&client->dev,
+ "Invalid channel %u local cap. size: %u\n", reg,
+ val);
+ return -EINVAL;
+ }
+ }
+
+ engine_a &= ~IQS269_CHx_ENG_A_INV_LOGIC;
+ if (fwnode_property_present(ch_node, "azoteq,invert-enable"))
+ engine_a |= IQS269_CHx_ENG_A_INV_LOGIC;
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,proj-bias", &val)) {
+ if (val > IQS269_CHx_ENG_A_PROJ_BIAS_MAX) {
+ dev_err(&client->dev,
+ "Invalid channel %u bias current: %u\n", reg,
+ val);
+ return -EINVAL;
+ }
+
+ engine_a &= ~IQS269_CHx_ENG_A_PROJ_BIAS_MASK;
+ engine_a |= (val << IQS269_CHx_ENG_A_PROJ_BIAS_SHIFT);
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,sense-mode", &val)) {
+ if (val > IQS269_CHx_ENG_A_SENSE_MODE_MAX) {
+ dev_err(&client->dev,
+ "Invalid channel %u sensing mode: %u\n", reg,
+ val);
+ return -EINVAL;
+ }
+
+ engine_a &= ~IQS269_CHx_ENG_A_SENSE_MODE_MASK;
+ engine_a |= val;
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,sense-freq", &val)) {
+ if (val > IQS269_CHx_ENG_B_SENSE_FREQ_MAX) {
+ dev_err(&client->dev,
+ "Invalid channel %u sensing frequency: %u\n",
+ reg, val);
+ return -EINVAL;
+ }
+
+ engine_b &= ~IQS269_CHx_ENG_B_SENSE_FREQ_MASK;
+ engine_b |= (val << IQS269_CHx_ENG_B_SENSE_FREQ_SHIFT);
+ }
+
+ engine_b &= ~IQS269_CHx_ENG_B_STATIC_ENABLE;
+ if (fwnode_property_present(ch_node, "azoteq,static-enable"))
+ engine_b |= IQS269_CHx_ENG_B_STATIC_ENABLE;
+
+ ch_reg->engine_a = cpu_to_be16(engine_a);
+ ch_reg->engine_b = cpu_to_be16(engine_b);
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,ati-mode", &val)) {
+ error = iqs269_ati_mode_set(iqs269, reg, val);
+ if (error) {
+ dev_err(&client->dev,
+ "Invalid channel %u ATI mode: %u\n", reg, val);
+ return error;
+ }
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,ati-base", &val)) {
+ error = iqs269_ati_base_set(iqs269, reg, val);
+ if (error) {
+ dev_err(&client->dev,
+ "Invalid channel %u ATI base: %u\n", reg, val);
+ return error;
+ }
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,ati-target", &val)) {
+ error = iqs269_ati_target_set(iqs269, reg, val);
+ if (error) {
+ dev_err(&client->dev,
+ "Invalid channel %u ATI target: %u\n", reg,
+ val);
+ return error;
+ }
+ }
+
+ error = iqs269_parse_mask(ch_node, "azoteq,assoc-select",
+ &ch_reg->assoc_select);
+ if (error) {
+ dev_err(&client->dev, "Invalid channel %u association: %d\n",
+ reg, error);
+ return error;
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,assoc-weight", &val)) {
+ if (val > IQS269_CHx_WEIGHT_MAX) {
+ dev_err(&client->dev,
+ "Invalid channel %u associated weight: %u\n",
+ reg, val);
+ return -EINVAL;
+ }
+
+ ch_reg->assoc_weight = val;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) {
+ ev_node = fwnode_get_named_child_node(ch_node,
+ iqs269_events[i].name);
+ if (!ev_node)
+ continue;
+
+ if (!fwnode_property_read_u32(ev_node, "azoteq,thresh", &val)) {
+ if (val > IQS269_CHx_THRESH_MAX) {
+ dev_err(&client->dev,
+ "Invalid channel %u threshold: %u\n",
+ reg, val);
+ return -EINVAL;
+ }
+
+ ch_reg->thresh[iqs269_events[i].th_offs] = val;
+ }
+
+ if (!fwnode_property_read_u32(ev_node, "azoteq,hyst", &val)) {
+ u8 *hyst = &ch_reg->hyst;
+
+ if (val > IQS269_CHx_HYST_MAX) {
+ dev_err(&client->dev,
+ "Invalid channel %u hysteresis: %u\n",
+ reg, val);
+ return -EINVAL;
+ }
+
+ if (i == IQS269_EVENT_DEEP_DN ||
+ i == IQS269_EVENT_DEEP_UP) {
+ *hyst &= ~IQS269_CHx_HYST_DEEP_MASK;
+ *hyst |= (val << IQS269_CHx_HYST_DEEP_SHIFT);
+ } else if (i == IQS269_EVENT_TOUCH_DN ||
+ i == IQS269_EVENT_TOUCH_UP) {
+ *hyst &= ~IQS269_CHx_HYST_TOUCH_MASK;
+ *hyst |= val;
+ }
+ }
+
+ if (fwnode_property_read_u32(ev_node, "linux,code", &val))
+ continue;
+
+ switch (reg) {
+ case IQS269_CHx_HALL_ACTIVE:
+ if (iqs269->hall_enable) {
+ iqs269->switches[i].code = val;
+ iqs269->switches[i].enabled = true;
+ }
+
+ /* fall through */
+
+ case IQS269_CHx_HALL_INACTIVE:
+ if (iqs269->hall_enable)
+ break;
+
+ /* fall through */
+
+ default:
+ iqs269->keycode[i * IQS269_NUM_CH + reg] = val;
+ }
+
+ iqs269->sys_reg.event_mask &= ~iqs269_events[i].mask;
+ }
+
+ return 0;
+}
+
+static int iqs269_parse_prop(struct iqs269_private *iqs269)
+{
+ struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg;
+ struct i2c_client *client = iqs269->client;
+ struct fwnode_handle *ch_node;
+ u16 general, misc_a, misc_b;
+ unsigned int val;
+ int error;
+
+ iqs269->hall_enable = device_property_present(&client->dev,
+ "azoteq,hall-enable");
+
+ if (!device_property_read_u32(&client->dev, "azoteq,suspend-mode",
+ &val)) {
+ if (val > IQS269_SYS_SETTINGS_PWR_MODE_MAX) {
+ dev_err(&client->dev, "Invalid suspend mode: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ iqs269->suspend_mode = val;
+ }
+
+ error = regmap_raw_read(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg,
+ sizeof(*sys_reg));
+ if (error)
+ return error;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,filt-str-lp-lta",
+ &val)) {
+ if (val > IQS269_FILT_STR_MAX) {
+ dev_err(&client->dev, "Invalid filter strength: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->filter &= ~IQS269_FILT_STR_LP_LTA_MASK;
+ sys_reg->filter |= (val << IQS269_FILT_STR_LP_LTA_SHIFT);
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,filt-str-lp-cnt",
+ &val)) {
+ if (val > IQS269_FILT_STR_MAX) {
+ dev_err(&client->dev, "Invalid filter strength: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->filter &= ~IQS269_FILT_STR_LP_CNT_MASK;
+ sys_reg->filter |= (val << IQS269_FILT_STR_LP_CNT_SHIFT);
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,filt-str-np-lta",
+ &val)) {
+ if (val > IQS269_FILT_STR_MAX) {
+ dev_err(&client->dev, "Invalid filter strength: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->filter &= ~IQS269_FILT_STR_NP_LTA_MASK;
+ sys_reg->filter |= (val << IQS269_FILT_STR_NP_LTA_SHIFT);
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,filt-str-np-cnt",
+ &val)) {
+ if (val > IQS269_FILT_STR_MAX) {
+ dev_err(&client->dev, "Invalid filter strength: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->filter &= ~IQS269_FILT_STR_NP_CNT_MASK;
+ sys_reg->filter |= val;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,rate-np-ms",
+ &val)) {
+ if (val > IQS269_RATE_NP_MS_MAX) {
+ dev_err(&client->dev, "Invalid report rate: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->rate_np = val;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,rate-lp-ms",
+ &val)) {
+ if (val > IQS269_RATE_LP_MS_MAX) {
+ dev_err(&client->dev, "Invalid report rate: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->rate_lp = val;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,rate-ulp-ms",
+ &val)) {
+ if (val > IQS269_RATE_ULP_MS_MAX) {
+ dev_err(&client->dev, "Invalid report rate: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->rate_ulp = val / 16;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,timeout-pwr-ms",
+ &val)) {
+ if (val > IQS269_TIMEOUT_PWR_MS_MAX) {
+ dev_err(&client->dev, "Invalid timeout: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->timeout_pwr = val / 512;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,timeout-lta-ms",
+ &val)) {
+ if (val > IQS269_TIMEOUT_LTA_MS_MAX) {
+ dev_err(&client->dev, "Invalid timeout: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->timeout_lta = val / 512;
+ }
+
+ misc_a = be16_to_cpu(sys_reg->misc_a);
+ misc_b = be16_to_cpu(sys_reg->misc_b);
+
+ misc_a &= ~IQS269_MISC_A_ATI_BAND_DISABLE;
+ if (device_property_present(&client->dev, "azoteq,ati-band-disable"))
+ misc_a |= IQS269_MISC_A_ATI_BAND_DISABLE;
+
+ misc_a &= ~IQS269_MISC_A_ATI_LP_ONLY;
+ if (device_property_present(&client->dev, "azoteq,ati-lp-only"))
+ misc_a |= IQS269_MISC_A_ATI_LP_ONLY;
+
+ misc_a &= ~IQS269_MISC_A_ATI_BAND_TIGHTEN;
+ if (device_property_present(&client->dev, "azoteq,ati-band-tighten"))
+ misc_a |= IQS269_MISC_A_ATI_BAND_TIGHTEN;
+
+ misc_a &= ~IQS269_MISC_A_FILT_DISABLE;
+ if (device_property_present(&client->dev, "azoteq,filt-disable"))
+ misc_a |= IQS269_MISC_A_FILT_DISABLE;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,gpio3-select",
+ &val)) {
+ if (val >= IQS269_NUM_CH) {
+ dev_err(&client->dev, "Invalid GPIO3 selection: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ misc_a &= ~IQS269_MISC_A_GPIO3_SELECT_MASK;
+ misc_a |= (val << IQS269_MISC_A_GPIO3_SELECT_SHIFT);
+ }
+
+ misc_a &= ~IQS269_MISC_A_DUAL_DIR;
+ if (device_property_present(&client->dev, "azoteq,dual-direction"))
+ misc_a |= IQS269_MISC_A_DUAL_DIR;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,tx-freq", &val)) {
+ if (val > IQS269_MISC_A_TX_FREQ_MAX) {
+ dev_err(&client->dev,
+ "Invalid excitation frequency: %u\n", val);
+ return -EINVAL;
+ }
+
+ misc_a &= ~IQS269_MISC_A_TX_FREQ_MASK;
+ misc_a |= (val << IQS269_MISC_A_TX_FREQ_SHIFT);
+ }
+
+ misc_a &= ~IQS269_MISC_A_GLOBAL_CAP_SIZE;
+ if (device_property_present(&client->dev, "azoteq,global-cap-increase"))
+ misc_a |= IQS269_MISC_A_GLOBAL_CAP_SIZE;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,reseed-select",
+ &val)) {
+ if (val > IQS269_MISC_B_RESEED_UI_SEL_MAX) {
+ dev_err(&client->dev, "Invalid reseed selection: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ misc_b &= ~IQS269_MISC_B_RESEED_UI_SEL_MASK;
+ misc_b |= (val << IQS269_MISC_B_RESEED_UI_SEL_SHIFT);
+ }
+
+ misc_b &= ~IQS269_MISC_B_TRACKING_UI_ENABLE;
+ if (device_property_present(&client->dev, "azoteq,tracking-enable"))
+ misc_b |= IQS269_MISC_B_TRACKING_UI_ENABLE;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,filt-str-slider",
+ &val)) {
+ if (val > IQS269_FILT_STR_MAX) {
+ dev_err(&client->dev, "Invalid filter strength: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ misc_b &= ~IQS269_MISC_B_FILT_STR_SLIDER;
+ misc_b |= val;
+ }
+
+ sys_reg->misc_a = cpu_to_be16(misc_a);
+ sys_reg->misc_b = cpu_to_be16(misc_b);
+
+ sys_reg->active = 0;
+ sys_reg->reseed = 0;
+
+ sys_reg->blocking = 0;
+
+ sys_reg->slider_select[0] = 0;
+ sys_reg->slider_select[1] = 0;
+
+ sys_reg->event_mask = ~((u8)IQS269_EVENT_MASK_SYS);
+
+ device_for_each_child_node(&client->dev, ch_node) {
+ error = iqs269_parse_chan(iqs269, ch_node);
+ if (error) {
+ fwnode_handle_put(ch_node);
+ return error;
+ }
+ }
+
+ /*
+ * Volunteer all active channels to participate in ATI when REDO-ATI is
+ * manually triggered.
+ */
+ sys_reg->redo_ati = sys_reg->active;
+
+ general = be16_to_cpu(sys_reg->general);
+
+ if (device_property_present(&client->dev, "azoteq,clk-div")) {
+ general |= IQS269_SYS_SETTINGS_CLK_DIV;
+ iqs269->delay_mult = 4;
+ } else {
+ general &= ~IQS269_SYS_SETTINGS_CLK_DIV;
+ iqs269->delay_mult = 1;
+ }
+
+ /*
+ * Configure the device to automatically switch between normal and low-
+ * power modes as a function of sensing activity. Ultra-low-power mode,
+ * if enabled, is reserved for suspend.
+ */
+ general &= ~IQS269_SYS_SETTINGS_ULP_AUTO;
+ general &= ~IQS269_SYS_SETTINGS_DIS_AUTO;
+ general &= ~IQS269_SYS_SETTINGS_PWR_MODE_MASK;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,ulp-update",
+ &val)) {
+ if (val > IQS269_SYS_SETTINGS_ULP_UPDATE_MAX) {
+ dev_err(&client->dev, "Invalid update rate: %u\n", val);
+ return -EINVAL;
+ }
+
+ general &= ~IQS269_SYS_SETTINGS_ULP_UPDATE_MASK;
+ general |= (val << IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT);
+ }
+
+ general &= ~IQS269_SYS_SETTINGS_RESEED_OFFSET;
+ if (device_property_present(&client->dev, "azoteq,reseed-offset"))
+ general |= IQS269_SYS_SETTINGS_RESEED_OFFSET;
+
+ general |= IQS269_SYS_SETTINGS_EVENT_MODE;
+
+ /*
+ * As per the datasheet, enable streaming during normal-power mode if
+ * either slider is in use. In that case, the device returns to event
+ * mode during low-power mode.
+ */
+ if (sys_reg->slider_select[0] || sys_reg->slider_select[1])
+ general |= IQS269_SYS_SETTINGS_EVENT_MODE_LP;
+
+ general |= IQS269_SYS_SETTINGS_REDO_ATI;
+ general |= IQS269_SYS_SETTINGS_ACK_RESET;
+
+ sys_reg->general = cpu_to_be16(general);
+
+ return 0;
+}
+
+static int iqs269_dev_init(struct iqs269_private *iqs269)
+{
+ struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg;
+ struct iqs269_ch_reg *ch_reg;
+ unsigned int val;
+ int error, i;
+
+ mutex_lock(&iqs269->lock);
+
+ error = regmap_update_bits(iqs269->regmap, IQS269_HALL_UI,
+ IQS269_HALL_UI_ENABLE,
+ iqs269->hall_enable ? ~0 : 0);
+ if (error)
+ goto err_mutex;
+
+ for (i = 0; i < IQS269_NUM_CH; i++) {
+ if (!(sys_reg->active & BIT(i)))
+ continue;
+
+ ch_reg = &iqs269->ch_reg[i];
+
+ error = regmap_raw_write(iqs269->regmap,
+ IQS269_CHx_SETTINGS + i *
+ sizeof(*ch_reg) / 2, ch_reg,
+ sizeof(*ch_reg));
+ if (error)
+ goto err_mutex;
+ }
+
+ /*
+ * The REDO-ATI and ATI channel selection fields must be written in the
+ * same block write, so every field between registers 0x80 through 0x8B
+ * (inclusive) must be written as well.
+ */
+ error = regmap_raw_write(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg,
+ sizeof(*sys_reg));
+ if (error)
+ goto err_mutex;
+
+ error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val,
+ !(val & IQS269_SYS_FLAGS_IN_ATI),
+ IQS269_ATI_POLL_SLEEP_US,
+ IQS269_ATI_POLL_TIMEOUT_US);
+ if (error)
+ goto err_mutex;
+
+ msleep(IQS269_ATI_STABLE_DELAY_MS);
+ iqs269->ati_current = true;
+
+err_mutex:
+ mutex_unlock(&iqs269->lock);
+
+ return error;
+}
+
+static int iqs269_input_init(struct iqs269_private *iqs269)
+{
+ struct i2c_client *client = iqs269->client;
+ struct iqs269_flags flags;
+ unsigned int sw_code, keycode;
+ int error, i, j;
+ u8 dir_mask, state;
+
+ iqs269->keypad = devm_input_allocate_device(&client->dev);
+ if (!iqs269->keypad)
+ return -ENOMEM;
+
+ iqs269->keypad->keycodemax = ARRAY_SIZE(iqs269->keycode);
+ iqs269->keypad->keycode = iqs269->keycode;
+ iqs269->keypad->keycodesize = sizeof(*iqs269->keycode);
+
+ iqs269->keypad->name = "iqs269a_keypad";
+ iqs269->keypad->id.bustype = BUS_I2C;
+
+ if (iqs269->hall_enable) {
+ error = regmap_raw_read(iqs269->regmap, IQS269_SYS_FLAGS,
+ &flags, sizeof(flags));
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read initial status: %d\n", error);
+ return error;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) {
+ dir_mask = flags.states[IQS269_ST_OFFS_DIR];
+ if (!iqs269_events[i].dir_up)
+ dir_mask = ~dir_mask;
+
+ state = flags.states[iqs269_events[i].st_offs] & dir_mask;
+
+ sw_code = iqs269->switches[i].code;
+
+ for (j = 0; j < IQS269_NUM_CH; j++) {
+ keycode = iqs269->keycode[i * IQS269_NUM_CH + j];
+
+ /*
+ * Hall-effect sensing repurposes a pair of dedicated
+ * channels, only one of which reports events.
+ */
+ switch (j) {
+ case IQS269_CHx_HALL_ACTIVE:
+ if (iqs269->hall_enable &&
+ iqs269->switches[i].enabled) {
+ input_set_capability(iqs269->keypad,
+ EV_SW, sw_code);
+ input_report_switch(iqs269->keypad,
+ sw_code,
+ state & BIT(j));
+ }
+
+ /* fall through */
+
+ case IQS269_CHx_HALL_INACTIVE:
+ if (iqs269->hall_enable)
+ continue;
+
+ /* fall through */
+
+ default:
+ if (keycode != KEY_RESERVED)
+ input_set_capability(iqs269->keypad,
+ EV_KEY, keycode);
+ }
+ }
+ }
+
+ input_sync(iqs269->keypad);
+
+ error = input_register_device(iqs269->keypad);
+ if (error) {
+ dev_err(&client->dev, "Failed to register keypad: %d\n", error);
+ return error;
+ }
+
+ for (i = 0; i < IQS269_NUM_SL; i++) {
+ if (!iqs269->sys_reg.slider_select[i])
+ continue;
+
+ iqs269->slider[i] = devm_input_allocate_device(&client->dev);
+ if (!iqs269->slider[i])
+ return -ENOMEM;
+
+ iqs269->slider[i]->name = i ? "iqs269a_slider_1"
+ : "iqs269a_slider_0";
+ iqs269->slider[i]->id.bustype = BUS_I2C;
+
+ input_set_capability(iqs269->slider[i], EV_KEY, BTN_TOUCH);
+ input_set_abs_params(iqs269->slider[i], ABS_X, 0, 255, 0, 0);
+
+ error = input_register_device(iqs269->slider[i]);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register slider %d: %d\n", i, error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static int iqs269_report(struct iqs269_private *iqs269)
+{
+ struct i2c_client *client = iqs269->client;
+ struct iqs269_flags flags;
+ unsigned int sw_code, keycode;
+ int error, i, j;
+ u8 slider_x[IQS269_NUM_SL];
+ u8 dir_mask, state;
+
+ error = regmap_raw_read(iqs269->regmap, IQS269_SYS_FLAGS, &flags,
+ sizeof(flags));
+ if (error) {
+ dev_err(&client->dev, "Failed to read device status: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * The device resets itself if its own watchdog bites, which can happen
+ * in the event of an I2C communication error. In this case, the device
+ * asserts a SHOW_RESET interrupt and all registers must be restored.
+ */
+ if (be16_to_cpu(flags.system) & IQS269_SYS_FLAGS_SHOW_RESET) {
+ dev_err(&client->dev, "Unexpected device reset\n");
+
+ error = iqs269_dev_init(iqs269);
+ if (error)
+ dev_err(&client->dev,
+ "Failed to re-initialize device: %d\n", error);
+
+ return error;
+ }
+
+ error = regmap_raw_read(iqs269->regmap, IQS269_SLIDER_X, slider_x,
+ sizeof(slider_x));
+ if (error) {
+ dev_err(&client->dev, "Failed to read slider position: %d\n",
+ error);
+ return error;
+ }
+
+ for (i = 0; i < IQS269_NUM_SL; i++) {
+ if (!iqs269->sys_reg.slider_select[i])
+ continue;
+
+ /*
+ * Report BTN_TOUCH if any channel that participates in the
+ * slider is in a state of touch.
+ */
+ if (flags.states[IQS269_ST_OFFS_TOUCH] &
+ iqs269->sys_reg.slider_select[i]) {
+ input_report_key(iqs269->slider[i], BTN_TOUCH, 1);
+ input_report_abs(iqs269->slider[i], ABS_X, slider_x[i]);
+ } else {
+ input_report_key(iqs269->slider[i], BTN_TOUCH, 0);
+ }
+
+ input_sync(iqs269->slider[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) {
+ dir_mask = flags.states[IQS269_ST_OFFS_DIR];
+ if (!iqs269_events[i].dir_up)
+ dir_mask = ~dir_mask;
+
+ state = flags.states[iqs269_events[i].st_offs] & dir_mask;
+
+ sw_code = iqs269->switches[i].code;
+
+ for (j = 0; j < IQS269_NUM_CH; j++) {
+ keycode = iqs269->keycode[i * IQS269_NUM_CH + j];
+
+ switch (j) {
+ case IQS269_CHx_HALL_ACTIVE:
+ if (iqs269->hall_enable &&
+ iqs269->switches[i].enabled)
+ input_report_switch(iqs269->keypad,
+ sw_code,
+ state & BIT(j));
+
+ /* fall through */
+
+ case IQS269_CHx_HALL_INACTIVE:
+ if (iqs269->hall_enable)
+ continue;
+
+ /* fall through */
+
+ default:
+ input_report_key(iqs269->keypad, keycode,
+ state & BIT(j));
+ }
+ }
+ }
+
+ input_sync(iqs269->keypad);
+
+ return 0;
+}
+
+static irqreturn_t iqs269_irq(int irq, void *context)
+{
+ struct iqs269_private *iqs269 = context;
+
+ if (iqs269_report(iqs269))
+ return IRQ_NONE;
+
+ /*
+ * The device does not deassert its interrupt (RDY) pin until shortly
+ * after receiving an I2C stop condition; the following delay ensures
+ * the interrupt handler does not return before this time.
+ */
+ iqs269_irq_wait();
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t counts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs269->client;
+ __le16 counts;
+ int error;
+
+ if (!iqs269->ati_current || iqs269->hall_enable)
+ return -EPERM;
+
+ /*
+ * Unsolicited I2C communication prompts the device to assert its RDY
+ * pin, so disable the interrupt line until the operation is finished
+ * and RDY has been deasserted.
+ */
+ disable_irq(client->irq);
+
+ error = regmap_raw_read(iqs269->regmap,
+ IQS269_CHx_COUNTS + iqs269->ch_num * 2,
+ &counts, sizeof(counts));
+
+ iqs269_irq_wait();
+ enable_irq(client->irq);
+
+ if (error)
+ return error;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", le16_to_cpu(counts));
+}
+
+static ssize_t hall_bin_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs269->client;
+ unsigned int val;
+ int error;
+
+ disable_irq(client->irq);
+
+ error = regmap_read(iqs269->regmap, IQS269_CAL_DATA_A, &val);
+
+ iqs269_irq_wait();
+ enable_irq(client->irq);
+
+ if (error)
+ return error;
+
+ switch (iqs269->ch_reg[IQS269_CHx_HALL_ACTIVE].rx_enable &
+ iqs269->ch_reg[IQS269_CHx_HALL_INACTIVE].rx_enable) {
+ case IQS269_HALL_PAD_R:
+ val &= IQS269_CAL_DATA_A_HALL_BIN_R_MASK;
+ val >>= IQS269_CAL_DATA_A_HALL_BIN_R_SHIFT;
+ break;
+
+ case IQS269_HALL_PAD_L:
+ val &= IQS269_CAL_DATA_A_HALL_BIN_L_MASK;
+ val >>= IQS269_CAL_DATA_A_HALL_BIN_L_SHIFT;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t hall_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->hall_enable);
+}
+
+static ssize_t hall_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&iqs269->lock);
+
+ iqs269->hall_enable = val;
+ iqs269->ati_current = false;
+
+ mutex_unlock(&iqs269->lock);
+
+ return count;
+}
+
+static ssize_t ch_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ch_num);
+}
+
+static ssize_t ch_number_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ if (val >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ iqs269->ch_num = val;
+
+ return count;
+}
+
+static ssize_t rx_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ iqs269->ch_reg[iqs269->ch_num].rx_enable);
+}
+
+static ssize_t rx_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ if (val > 0xFF)
+ return -EINVAL;
+
+ mutex_lock(&iqs269->lock);
+
+ iqs269->ch_reg[iqs269->ch_num].rx_enable = val;
+ iqs269->ati_current = false;
+
+ mutex_unlock(&iqs269->lock);
+
+ return count;
+}
+
+static ssize_t ati_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = iqs269_ati_mode_get(iqs269, iqs269->ch_num, &val);
+ if (error)
+ return error;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t ati_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ error = iqs269_ati_mode_set(iqs269, iqs269->ch_num, val);
+ if (error)
+ return error;
+
+ return count;
+}
+
+static ssize_t ati_base_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = iqs269_ati_base_get(iqs269, iqs269->ch_num, &val);
+ if (error)
+ return error;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t ati_base_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ error = iqs269_ati_base_set(iqs269, iqs269->ch_num, val);
+ if (error)
+ return error;
+
+ return count;
+}
+
+static ssize_t ati_target_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = iqs269_ati_target_get(iqs269, iqs269->ch_num, &val);
+ if (error)
+ return error;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t ati_target_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ error = iqs269_ati_target_set(iqs269, iqs269->ch_num, val);
+ if (error)
+ return error;
+
+ return count;
+}
+
+static ssize_t ati_trigger_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ati_current);
+}
+
+static ssize_t ati_trigger_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs269->client;
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ if (!val)
+ return count;
+
+ disable_irq(client->irq);
+
+ error = iqs269_dev_init(iqs269);
+
+ iqs269_irq_wait();
+ enable_irq(client->irq);
+
+ if (error)
+ return error;
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(counts);
+static DEVICE_ATTR_RO(hall_bin);
+static DEVICE_ATTR_RW(hall_enable);
+static DEVICE_ATTR_RW(ch_number);
+static DEVICE_ATTR_RW(rx_enable);
+static DEVICE_ATTR_RW(ati_mode);
+static DEVICE_ATTR_RW(ati_base);
+static DEVICE_ATTR_RW(ati_target);
+static DEVICE_ATTR_RW(ati_trigger);
+
+static struct attribute *iqs269_attrs[] = {
+ &dev_attr_counts.attr,
+ &dev_attr_hall_bin.attr,
+ &dev_attr_hall_enable.attr,
+ &dev_attr_ch_number.attr,
+ &dev_attr_rx_enable.attr,
+ &dev_attr_ati_mode.attr,
+ &dev_attr_ati_base.attr,
+ &dev_attr_ati_target.attr,
+ &dev_attr_ati_trigger.attr,
+ NULL,
+};
+
+static const struct attribute_group iqs269_attr_group = {
+ .attrs = iqs269_attrs,
+};
+
+static const struct regmap_config iqs269_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = IQS269_MAX_REG,
+};
+
+static int iqs269_probe(struct i2c_client *client)
+{
+ struct iqs269_ver_info ver_info;
+ struct iqs269_private *iqs269;
+ int error;
+
+ iqs269 = devm_kzalloc(&client->dev, sizeof(*iqs269), GFP_KERNEL);
+ if (!iqs269)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, iqs269);
+ iqs269->client = client;
+
+ iqs269->regmap = devm_regmap_init_i2c(client, &iqs269_regmap_config);
+ if (IS_ERR(iqs269->regmap)) {
+ error = PTR_ERR(iqs269->regmap);
+ dev_err(&client->dev, "Failed to initialize register map: %d\n",
+ error);
+ return error;
+ }
+
+ mutex_init(&iqs269->lock);
+
+ error = regmap_raw_read(iqs269->regmap, IQS269_VER_INFO, &ver_info,
+ sizeof(ver_info));
+ if (error)
+ return error;
+
+ if (ver_info.prod_num != IQS269_VER_INFO_PROD_NUM) {
+ dev_err(&client->dev, "Unrecognized product number: 0x%02X\n",
+ ver_info.prod_num);
+ return -EINVAL;
+ }
+
+ error = iqs269_parse_prop(iqs269);
+ if (error)
+ return error;
+
+ error = iqs269_dev_init(iqs269);
+ if (error) {
+ dev_err(&client->dev, "Failed to initialize device: %d\n",
+ error);
+ return error;
+ }
+
+ error = iqs269_input_init(iqs269);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, iqs269_irq, IRQF_ONESHOT,
+ client->name, iqs269);
+ if (error) {
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
+ return error;
+ }
+
+ error = devm_device_add_group(&client->dev, &iqs269_attr_group);
+ if (error)
+ dev_err(&client->dev, "Failed to add attributes: %d\n", error);
+
+ return error;
+}
+
+static int __maybe_unused iqs269_suspend(struct device *dev)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs269->client;
+ unsigned int val;
+ int error;
+
+ if (!iqs269->suspend_mode)
+ return 0;
+
+ disable_irq(client->irq);
+
+ /*
+ * Automatic power mode switching must be disabled before the device is
+ * forced into any particular power mode. In this case, the device will
+ * transition into normal-power mode.
+ */
+ error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS,
+ IQS269_SYS_SETTINGS_DIS_AUTO, ~0);
+ if (error)
+ goto err_irq;
+
+ /*
+ * The following check ensures the device has completed its transition
+ * into normal-power mode before a manual mode switch is performed.
+ */
+ error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val,
+ !(val & IQS269_SYS_FLAGS_PWR_MODE_MASK),
+ IQS269_PWR_MODE_POLL_SLEEP_US,
+ IQS269_PWR_MODE_POLL_TIMEOUT_US);
+ if (error)
+ goto err_irq;
+
+ error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS,
+ IQS269_SYS_SETTINGS_PWR_MODE_MASK,
+ iqs269->suspend_mode <<
+ IQS269_SYS_SETTINGS_PWR_MODE_SHIFT);
+ if (error)
+ goto err_irq;
+
+ /*
+ * This last check ensures the device has completed its transition into
+ * the desired power mode to prevent any spurious interrupts from being
+ * triggered after iqs269_suspend has already returned.
+ */
+ error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val,
+ (val & IQS269_SYS_FLAGS_PWR_MODE_MASK)
+ == (iqs269->suspend_mode <<
+ IQS269_SYS_FLAGS_PWR_MODE_SHIFT),
+ IQS269_PWR_MODE_POLL_SLEEP_US,
+ IQS269_PWR_MODE_POLL_TIMEOUT_US);
+
+err_irq:
+ iqs269_irq_wait();
+ enable_irq(client->irq);
+
+ return error;
+}
+
+static int __maybe_unused iqs269_resume(struct device *dev)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs269->client;
+ unsigned int val;
+ int error;
+
+ if (!iqs269->suspend_mode)
+ return 0;
+
+ disable_irq(client->irq);
+
+ error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS,
+ IQS269_SYS_SETTINGS_PWR_MODE_MASK, 0);
+ if (error)
+ goto err_irq;
+
+ /*
+ * This check ensures the device has returned to normal-power mode
+ * before automatic power mode switching is re-enabled.
+ */
+ error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val,
+ !(val & IQS269_SYS_FLAGS_PWR_MODE_MASK),
+ IQS269_PWR_MODE_POLL_SLEEP_US,
+ IQS269_PWR_MODE_POLL_TIMEOUT_US);
+ if (error)
+ goto err_irq;
+
+ error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS,
+ IQS269_SYS_SETTINGS_DIS_AUTO, 0);
+ if (error)
+ goto err_irq;
+
+ /*
+ * This step reports any events that may have been "swallowed" as a
+ * result of polling PWR_MODE (which automatically acknowledges any
+ * pending interrupts).
+ */
+ error = iqs269_report(iqs269);
+
+err_irq:
+ iqs269_irq_wait();
+ enable_irq(client->irq);
+
+ return error;
+}
+
+static SIMPLE_DEV_PM_OPS(iqs269_pm, iqs269_suspend, iqs269_resume);
+
+static const struct of_device_id iqs269_of_match[] = {
+ { .compatible = "azoteq,iqs269a" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, iqs269_of_match);
+
+static struct i2c_driver iqs269_i2c_driver = {
+ .driver = {
+ .name = "iqs269a",
+ .of_match_table = iqs269_of_match,
+ .pm = &iqs269_pm,
+ },
+ .probe_new = iqs269_probe,
+};
+module_i2c_driver(iqs269_i2c_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS269A Capacitive Touch Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/msm-vibrator.c b/drivers/input/misc/msm-vibrator.c
deleted file mode 100644
index b60f1aaee705..000000000000
--- a/drivers/input/misc/msm-vibrator.c
+++ /dev/null
@@ -1,281 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Qualcomm MSM vibrator driver
- *
- * Copyright (c) 2018 Brian Masney <masneyb@onstation.org>
- *
- * Based on qcom,pwm-vibrator.c from:
- * Copyright (c) 2018 Jonathan Marek <jonathan@marek.ca>
- *
- * Based on msm_pwm_vibrator.c from downstream Android sources:
- * Copyright (C) 2009-2014 LGE, Inc.
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/gpio/consumer.h>
-#include <linux/input.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-
-#define REG_CMD_RCGR 0x00
-#define REG_CFG_RCGR 0x04
-#define REG_M 0x08
-#define REG_N 0x0C
-#define REG_D 0x10
-#define REG_CBCR 0x24
-#define MMSS_CC_M_DEFAULT 1
-
-struct msm_vibrator {
- struct input_dev *input;
- struct mutex mutex;
- struct work_struct worker;
- void __iomem *base;
- struct regulator *vcc;
- struct clk *clk;
- struct gpio_desc *enable_gpio;
- u16 magnitude;
- bool enabled;
-};
-
-static void msm_vibrator_write(struct msm_vibrator *vibrator, int offset,
- u32 value)
-{
- writel(value, vibrator->base + offset);
-}
-
-static int msm_vibrator_start(struct msm_vibrator *vibrator)
-{
- int d_reg_val, ret = 0;
-
- mutex_lock(&vibrator->mutex);
-
- if (!vibrator->enabled) {
- ret = clk_set_rate(vibrator->clk, 24000);
- if (ret) {
- dev_err(&vibrator->input->dev,
- "Failed to set clock rate: %d\n", ret);
- goto unlock;
- }
-
- ret = clk_prepare_enable(vibrator->clk);
- if (ret) {
- dev_err(&vibrator->input->dev,
- "Failed to enable clock: %d\n", ret);
- goto unlock;
- }
-
- ret = regulator_enable(vibrator->vcc);
- if (ret) {
- dev_err(&vibrator->input->dev,
- "Failed to enable regulator: %d\n", ret);
- clk_disable(vibrator->clk);
- goto unlock;
- }
-
- gpiod_set_value_cansleep(vibrator->enable_gpio, 1);
-
- vibrator->enabled = true;
- }
-
- d_reg_val = 127 - ((126 * vibrator->magnitude) / 0xffff);
- msm_vibrator_write(vibrator, REG_CFG_RCGR,
- (2 << 12) | /* dual edge mode */
- (0 << 8) | /* cxo */
- (7 << 0));
- msm_vibrator_write(vibrator, REG_M, 1);
- msm_vibrator_write(vibrator, REG_N, 128);
- msm_vibrator_write(vibrator, REG_D, d_reg_val);
- msm_vibrator_write(vibrator, REG_CMD_RCGR, 1);
- msm_vibrator_write(vibrator, REG_CBCR, 1);
-
-unlock:
- mutex_unlock(&vibrator->mutex);
-
- return ret;
-}
-
-static void msm_vibrator_stop(struct msm_vibrator *vibrator)
-{
- mutex_lock(&vibrator->mutex);
-
- if (vibrator->enabled) {
- gpiod_set_value_cansleep(vibrator->enable_gpio, 0);
- regulator_disable(vibrator->vcc);
- clk_disable(vibrator->clk);
- vibrator->enabled = false;
- }
-
- mutex_unlock(&vibrator->mutex);
-}
-
-static void msm_vibrator_worker(struct work_struct *work)
-{
- struct msm_vibrator *vibrator = container_of(work,
- struct msm_vibrator,
- worker);
-
- if (vibrator->magnitude)
- msm_vibrator_start(vibrator);
- else
- msm_vibrator_stop(vibrator);
-}
-
-static int msm_vibrator_play_effect(struct input_dev *dev, void *data,
- struct ff_effect *effect)
-{
- struct msm_vibrator *vibrator = input_get_drvdata(dev);
-
- mutex_lock(&vibrator->mutex);
-
- if (effect->u.rumble.strong_magnitude > 0)
- vibrator->magnitude = effect->u.rumble.strong_magnitude;
- else
- vibrator->magnitude = effect->u.rumble.weak_magnitude;
-
- mutex_unlock(&vibrator->mutex);
-
- schedule_work(&vibrator->worker);
-
- return 0;
-}
-
-static void msm_vibrator_close(struct input_dev *input)
-{
- struct msm_vibrator *vibrator = input_get_drvdata(input);
-
- cancel_work_sync(&vibrator->worker);
- msm_vibrator_stop(vibrator);
-}
-
-static int msm_vibrator_probe(struct platform_device *pdev)
-{
- struct msm_vibrator *vibrator;
- struct resource *res;
- int ret;
-
- vibrator = devm_kzalloc(&pdev->dev, sizeof(*vibrator), GFP_KERNEL);
- if (!vibrator)
- return -ENOMEM;
-
- vibrator->input = devm_input_allocate_device(&pdev->dev);
- if (!vibrator->input)
- return -ENOMEM;
-
- vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc");
- if (IS_ERR(vibrator->vcc)) {
- if (PTR_ERR(vibrator->vcc) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get regulator: %ld\n",
- PTR_ERR(vibrator->vcc));
- return PTR_ERR(vibrator->vcc);
- }
-
- vibrator->enable_gpio = devm_gpiod_get(&pdev->dev, "enable",
- GPIOD_OUT_LOW);
- if (IS_ERR(vibrator->enable_gpio)) {
- if (PTR_ERR(vibrator->enable_gpio) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get enable gpio: %ld\n",
- PTR_ERR(vibrator->enable_gpio));
- return PTR_ERR(vibrator->enable_gpio);
- }
-
- vibrator->clk = devm_clk_get(&pdev->dev, "pwm");
- if (IS_ERR(vibrator->clk)) {
- if (PTR_ERR(vibrator->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to lookup pwm clock: %ld\n",
- PTR_ERR(vibrator->clk));
- return PTR_ERR(vibrator->clk);
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get platform resource\n");
- return -ENODEV;
- }
-
- vibrator->base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!vibrator->base) {
- dev_err(&pdev->dev, "Failed to iomap resource.\n");
- return -ENOMEM;
- }
-
- vibrator->enabled = false;
- mutex_init(&vibrator->mutex);
- INIT_WORK(&vibrator->worker, msm_vibrator_worker);
-
- vibrator->input->name = "msm-vibrator";
- vibrator->input->id.bustype = BUS_HOST;
- vibrator->input->close = msm_vibrator_close;
-
- input_set_drvdata(vibrator->input, vibrator);
- input_set_capability(vibrator->input, EV_FF, FF_RUMBLE);
-
- ret = input_ff_create_memless(vibrator->input, NULL,
- msm_vibrator_play_effect);
- if (ret) {
- dev_err(&pdev->dev, "Failed to create ff memless: %d", ret);
- return ret;
- }
-
- ret = input_register_device(vibrator->input);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register input device: %d", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, vibrator);
-
- return 0;
-}
-
-static int __maybe_unused msm_vibrator_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_vibrator *vibrator = platform_get_drvdata(pdev);
-
- cancel_work_sync(&vibrator->worker);
-
- if (vibrator->enabled)
- msm_vibrator_stop(vibrator);
-
- return 0;
-}
-
-static int __maybe_unused msm_vibrator_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_vibrator *vibrator = platform_get_drvdata(pdev);
-
- if (vibrator->enabled)
- msm_vibrator_start(vibrator);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(msm_vibrator_pm_ops, msm_vibrator_suspend,
- msm_vibrator_resume);
-
-static const struct of_device_id msm_vibrator_of_match[] = {
- { .compatible = "qcom,msm8226-vibrator" },
- { .compatible = "qcom,msm8974-vibrator" },
- {},
-};
-MODULE_DEVICE_TABLE(of, msm_vibrator_of_match);
-
-static struct platform_driver msm_vibrator_driver = {
- .probe = msm_vibrator_probe,
- .driver = {
- .name = "msm-vibrator",
- .pm = &msm_vibrator_pm_ops,
- .of_match_table = of_match_ptr(msm_vibrator_of_match),
- },
-};
-module_platform_driver(msm_vibrator_driver);
-
-MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
-MODULE_DESCRIPTION("Qualcomm MSM vibrator driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 24bc5c5d876f..a1bba722b234 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -146,7 +146,7 @@ static void xenkbd_handle_mt_event(struct xenkbd_info *info,
break;
case XENKBD_MT_EV_UP:
- input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(info->mtouch);
break;
case XENKBD_MT_EV_SYN:
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 8719da540383..3f9354baac4b 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -938,7 +938,7 @@ static void elan_report_contact(struct elan_tp_data *data,
input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
} else {
input_mt_slot(input, contact_num);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(input);
}
}
diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h
deleted file mode 100644
index 391f94d9e47d..000000000000
--- a/drivers/input/serio/i8042-ppcio.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _I8042_PPCIO_H
-#define _I8042_PPCIO_H
-
-
-#if defined(CONFIG_WALNUT)
-
-#define I8042_KBD_IRQ 25
-#define I8042_AUX_IRQ 26
-
-#define I8042_KBD_PHYS_DESC "walnutps2/serio0"
-#define I8042_AUX_PHYS_DESC "walnutps2/serio1"
-#define I8042_MUX_PHYS_DESC "walnutps2/serio%d"
-
-extern void *kb_cs;
-extern void *kb_data;
-
-#define I8042_COMMAND_REG (*(int *)kb_cs)
-#define I8042_DATA_REG (*(int *)kb_data)
-
-static inline int i8042_read_data(void)
-{
- return readb(kb_data);
-}
-
-static inline int i8042_read_status(void)
-{
- return readb(kb_cs);
-}
-
-static inline void i8042_write_data(int val)
-{
- writeb(val, kb_data);
-}
-
-static inline void i8042_write_command(int val)
-{
- writeb(val, kb_cs);
-}
-
-static inline int i8042_platform_init(void)
-{
- i8042_reset = I8042_RESET_ALWAYS;
- return 0;
-}
-
-static inline void i8042_platform_exit(void)
-{
-}
-
-#else
-
-#include "i8042-io.h"
-
-#endif
-
-#endif /* _I8042_PPCIO_H */
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 7e048b557462..7b08ff8ddf35 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -945,6 +945,7 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
}
i8042_pnp_id_to_string(dev->id, i8042_kbd_firmware_id,
sizeof(i8042_kbd_firmware_id));
+ i8042_kbd_fwnode = dev_fwnode(&dev->dev);
/* Keyboard ports are always supposed to be wakeup-enabled */
device_set_wakeup_enable(&dev->dev, true);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 20ff2bed3917..0dddf273afd9 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -21,6 +21,7 @@
#include <linux/i8042.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <linux/property.h>
#include <asm/io.h>
@@ -124,6 +125,7 @@ MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive da
static bool i8042_bypass_aux_irq_test;
static char i8042_kbd_firmware_id[128];
static char i8042_aux_firmware_id[128];
+static struct fwnode_handle *i8042_kbd_fwnode;
#include "i8042.h"
@@ -1335,6 +1337,7 @@ static int __init i8042_create_kbd_port(void)
strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
strlcpy(serio->firmware_id, i8042_kbd_firmware_id,
sizeof(serio->firmware_id));
+ set_primary_fwnode(&serio->dev, i8042_kbd_fwnode);
port->serio = serio;
port->irq = I8042_KBD_IRQ;
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index 38dc27ad3c18..eb376700dfff 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -17,8 +17,6 @@
#include "i8042-ip22io.h"
#elif defined(CONFIG_SNI_RM)
#include "i8042-snirm.h"
-#elif defined(CONFIG_PPC)
-#include "i8042-ppcio.h"
#elif defined(CONFIG_SPARC)
#include "i8042-sparcio.h"
#elif defined(CONFIG_X86) || defined(CONFIG_IA64)
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index c071f7c407b6..35c867b2d9a7 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -201,6 +201,18 @@ config TOUCHSCREEN_CHIPONE_ICN8505
To compile this driver as a module, choose M here: the
module will be called chipone_icn8505.
+config TOUCHSCREEN_CY8CTMA140
+ tristate "cy8ctma140 touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have a Cypress CY8CTMA140 capacitive
+ touchscreen also just known as "TMA140"
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cy8ctma140.
+
config TOUCHSCREEN_CY8CTMG110
tristate "cy8ctmg110 touchscreen"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 94c6162409b3..30d1e1b42492 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
obj-$(CONFIG_TOUCHSCREEN_BU21029) += bu21029_ts.o
obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8318) += chipone_icn8318.o
obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8505) += chipone_icn8505.o
+obj-$(CONFIG_TOUCHSCREEN_CY8CTMA140) += cy8ctma140.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o cyttsp_i2c_common.o
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index ae60442efda0..a2189739e30f 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -822,8 +822,7 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
* have happened.
*/
if (status & MXT_T9_RELEASE) {
- input_mt_report_slot_state(input_dev,
- MT_TOOL_FINGER, 0);
+ input_mt_report_slot_inactive(input_dev);
mxt_input_sync(data);
}
@@ -839,7 +838,7 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, area);
} else {
/* Touch no longer active, close out slot */
- input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 0);
+ input_mt_report_slot_inactive(input_dev);
}
data->update_input = true;
@@ -947,7 +946,7 @@ static void mxt_proc_t100_message(struct mxt_data *data, u8 *message)
dev_dbg(dev, "[%u] release\n", id);
/* close out slot */
- input_mt_report_slot_state(input_dev, 0, 0);
+ input_mt_report_slot_inactive(input_dev);
}
data->update_input = true;
diff --git a/drivers/input/touchscreen/cy8ctma140.c b/drivers/input/touchscreen/cy8ctma140.c
new file mode 100644
index 000000000000..a9be29139cbf
--- /dev/null
+++ b/drivers/input/touchscreen/cy8ctma140.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Cypress CY8CTMA140 (TMA140) touchscreen
+ * (C) 2020 Linus Walleij <linus.walleij@linaro.org>
+ * (C) 2007 Cypress
+ * (C) 2007 Google, Inc.
+ *
+ * Inspired by the tma140_skomer.c driver in the Samsung GT-S7710 code
+ * drop. The GT-S7710 is codenamed "Skomer", the code also indicates
+ * that the same touchscreen was used in a product called "Lucas".
+ *
+ * The code drop for GT-S7710 also contains a firmware downloader and
+ * 15 (!) versions of the firmware drop from Cypress. But here we assume
+ * the firmware got downloaded to the touchscreen flash successfully and
+ * just use it to read the fingers. The shipped vendor driver does the
+ * same.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/input/touchscreen.h>
+#include <linux/input/mt.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#define CY8CTMA140_NAME "cy8ctma140"
+
+#define CY8CTMA140_MAX_FINGERS 4
+
+#define CY8CTMA140_GET_FINGERS 0x00
+#define CY8CTMA140_GET_FW_INFO 0x19
+
+/* This message also fits some bytes for touchkeys, if used */
+#define CY8CTMA140_PACKET_SIZE 31
+
+#define CY8CTMA140_INVALID_BUFFER_BIT 5
+
+struct cy8ctma140 {
+ struct input_dev *input;
+ struct touchscreen_properties props;
+ struct device *dev;
+ struct i2c_client *client;
+ struct regulator_bulk_data regulators[2];
+ u8 prev_fingers;
+ u8 prev_f1id;
+ u8 prev_f2id;
+};
+
+static void cy8ctma140_report(struct cy8ctma140 *ts, u8 *data, int n_fingers)
+{
+ static const u8 contact_offsets[] = { 0x03, 0x09, 0x10, 0x16 };
+ u8 *buf;
+ u16 x, y;
+ u8 w;
+ u8 id;
+ int slot;
+ int i;
+
+ for (i = 0; i < n_fingers; i++) {
+ buf = &data[contact_offsets[i]];
+
+ /*
+ * Odd contacts have contact ID in the lower nibble of
+ * the preceding byte, whereas even contacts have it in
+ * the upper nibble of the following byte.
+ */
+ id = i % 2 ? buf[-1] & 0x0f : buf[5] >> 4;
+ slot = input_mt_get_slot_by_key(ts->input, id);
+ if (slot < 0)
+ continue;
+
+ x = get_unaligned_be16(buf);
+ y = get_unaligned_be16(buf + 2);
+ w = buf[4];
+
+ dev_dbg(ts->dev, "finger %d: ID %02x (%d, %d) w: %d\n",
+ slot, id, x, y, w);
+
+ input_mt_slot(ts->input, slot);
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, true);
+ touchscreen_report_pos(ts->input, &ts->props, x, y, true);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, w);
+ }
+
+ input_mt_sync_frame(ts->input);
+ input_sync(ts->input);
+}
+
+static irqreturn_t cy8ctma140_irq_thread(int irq, void *d)
+{
+ struct cy8ctma140 *ts = d;
+ u8 cmdbuf[] = { CY8CTMA140_GET_FINGERS };
+ u8 buf[CY8CTMA140_PACKET_SIZE];
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .len = sizeof(cmdbuf),
+ .buf = cmdbuf,
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(buf),
+ .buf = buf,
+ },
+ };
+ u8 n_fingers;
+ int ret;
+
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ if (ret < 0)
+ dev_err(ts->dev, "error reading message: %d\n", ret);
+ else
+ dev_err(ts->dev, "wrong number of messages\n");
+ goto out;
+ }
+
+ if (buf[1] & BIT(CY8CTMA140_INVALID_BUFFER_BIT)) {
+ dev_dbg(ts->dev, "invalid event\n");
+ goto out;
+ }
+
+ n_fingers = buf[2] & 0x0f;
+ if (n_fingers > CY8CTMA140_MAX_FINGERS) {
+ dev_err(ts->dev, "unexpected number of fingers: %d\n",
+ n_fingers);
+ goto out;
+ }
+
+ cy8ctma140_report(ts, buf, n_fingers);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int cy8ctma140_init(struct cy8ctma140 *ts)
+{
+ u8 addr[1];
+ u8 buf[5];
+ int ret;
+
+ addr[0] = CY8CTMA140_GET_FW_INFO;
+ ret = i2c_master_send(ts->client, addr, 1);
+ if (ret < 0) {
+ dev_err(ts->dev, "error sending FW info message\n");
+ return ret;
+ }
+ ret = i2c_master_recv(ts->client, buf, 5);
+ if (ret < 0) {
+ dev_err(ts->dev, "error receiving FW info message\n");
+ return ret;
+ }
+ if (ret != 5) {
+ dev_err(ts->dev, "got only %d bytes\n", ret);
+ return -EIO;
+ }
+
+ dev_dbg(ts->dev, "vendor %c%c, HW ID %.2d, FW ver %.4d\n",
+ buf[0], buf[1], buf[3], buf[4]);
+
+ return 0;
+}
+
+static int cy8ctma140_power_up(struct cy8ctma140 *ts)
+{
+ int error;
+
+ error = regulator_bulk_enable(ARRAY_SIZE(ts->regulators),
+ ts->regulators);
+ if (error) {
+ dev_err(ts->dev, "failed to enable regulators\n");
+ return error;
+ }
+
+ msleep(250);
+
+ return 0;
+}
+
+static void cy8ctma140_power_down(struct cy8ctma140 *ts)
+{
+ regulator_bulk_disable(ARRAY_SIZE(ts->regulators),
+ ts->regulators);
+}
+
+/* Called from the registered devm action */
+static void cy8ctma140_power_off_action(void *d)
+{
+ struct cy8ctma140 *ts = d;
+
+ cy8ctma140_power_down(ts);
+}
+
+static int cy8ctma140_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct cy8ctma140 *ts;
+ struct input_dev *input;
+ struct device *dev = &client->dev;
+ int error;
+
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ ts->dev = dev;
+ ts->client = client;
+ ts->input = input;
+
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
+ /* One byte for width 0..255 so this is the limit */
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ /*
+ * This sets up event max/min capabilities and fuzz.
+ * Some DT properties are compulsory so we do not need
+ * to provide defaults for X/Y max or pressure max.
+ *
+ * We just initialize a very simple MT touchscreen here,
+ * some devices use the capability of this touchscreen to
+ * provide touchkeys, and in that case this needs to be
+ * extended to handle touchkey input.
+ *
+ * The firmware takes care of finger tracking and dropping
+ * invalid ranges.
+ */
+ touchscreen_parse_properties(input, true, &ts->props);
+ input_abs_set_fuzz(input, ABS_MT_POSITION_X, 0);
+ input_abs_set_fuzz(input, ABS_MT_POSITION_Y, 0);
+
+ error = input_mt_init_slots(input, CY8CTMA140_MAX_FINGERS,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error)
+ return error;
+
+ input->name = CY8CTMA140_NAME;
+ input->id.bustype = BUS_I2C;
+ input_set_drvdata(input, ts);
+
+ /*
+ * VCPIN is the analog voltage supply
+ * VDD is the digital voltage supply
+ * since the voltage range of VDD overlaps that of VCPIN,
+ * many designs to just supply both with a single voltage
+ * source of ~3.3 V.
+ */
+ ts->regulators[0].supply = "vcpin";
+ ts->regulators[1].supply = "vdd";
+ error = devm_regulator_bulk_get(dev, ARRAY_SIZE(ts->regulators),
+ ts->regulators);
+ if (error) {
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get regulators %d\n",
+ error);
+ return error;
+ }
+
+ error = cy8ctma140_power_up(ts);
+ if (error)
+ return error;
+
+ error = devm_add_action_or_reset(dev, cy8ctma140_power_off_action, ts);
+ if (error) {
+ dev_err(dev, "failed to install power off handler\n");
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, cy8ctma140_irq_thread,
+ IRQF_ONESHOT, CY8CTMA140_NAME, ts);
+ if (error) {
+ dev_err(dev, "irq %d busy? error %d\n", client->irq, error);
+ return error;
+ }
+
+ error = cy8ctma140_init(ts);
+ if (error)
+ return error;
+
+ error = input_register_device(input);
+ if (error)
+ return error;
+
+ i2c_set_clientdata(client, ts);
+
+ return 0;
+}
+
+static int __maybe_unused cy8ctma140_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cy8ctma140 *ts = i2c_get_clientdata(client);
+
+ if (!device_may_wakeup(&client->dev))
+ cy8ctma140_power_down(ts);
+
+ return 0;
+}
+
+static int __maybe_unused cy8ctma140_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cy8ctma140 *ts = i2c_get_clientdata(client);
+ int error;
+
+ if (!device_may_wakeup(&client->dev)) {
+ error = cy8ctma140_power_up(ts);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cy8ctma140_pm, cy8ctma140_suspend, cy8ctma140_resume);
+
+static const struct i2c_device_id cy8ctma140_idtable[] = {
+ { CY8CTMA140_NAME, 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, cy8ctma140_idtable);
+
+static const struct of_device_id cy8ctma140_of_match[] = {
+ { .compatible = "cypress,cy8ctma140", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cy8ctma140_of_match);
+
+static struct i2c_driver cy8ctma140_driver = {
+ .driver = {
+ .name = CY8CTMA140_NAME,
+ .pm = &cy8ctma140_pm,
+ .of_match_table = cy8ctma140_of_match,
+ },
+ .id_table = cy8ctma140_idtable,
+ .probe = cy8ctma140_probe,
+};
+module_i2c_driver(cy8ctma140_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("CY8CTMA140 TouchScreen Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 6bcffc930384..02a73d9a4def 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -744,8 +744,7 @@ static void cyttsp4_report_slot_liftoff(struct cyttsp4_mt_data *md,
for (t = 0; t < max_slots; t++) {
input_mt_slot(md->input, t);
- input_mt_report_slot_state(md->input,
- MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(md->input);
}
}
@@ -845,7 +844,7 @@ static void cyttsp4_final_sync(struct input_dev *input, int max_slots, int *ids)
if (ids[t])
continue;
input_mt_slot(input, t);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(input);
}
input_sync(input);
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 3f5d463dbeed..697aa2c158f7 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -340,7 +340,7 @@ static void cyttsp_report_tchdata(struct cyttsp *ts)
continue;
input_mt_slot(input, i);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(input);
}
input_sync(input);
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index d2587724c52a..3a4f18d3450d 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -38,6 +38,9 @@
#define WORK_REGISTER_NUM_X 0x33
#define WORK_REGISTER_NUM_Y 0x34
+#define PMOD_REGISTER_ACTIVE 0x00
+#define PMOD_REGISTER_HIBERNATE 0x03
+
#define M09_REGISTER_THRESHOLD 0x80
#define M09_REGISTER_GAIN 0x92
#define M09_REGISTER_OFFSET 0x93
@@ -53,6 +56,7 @@
#define WORK_REGISTER_OPMODE 0x3c
#define FACTORY_REGISTER_OPMODE 0x01
+#define PMOD_REGISTER_OPMODE 0xa5
#define TOUCH_EVENT_DOWN 0x00
#define TOUCH_EVENT_UP 0x01
@@ -65,6 +69,12 @@
#define EDT_RAW_DATA_RETRIES 100
#define EDT_RAW_DATA_DELAY 1000 /* usec */
+enum edt_pmode {
+ EDT_PMODE_NOT_SUPPORTED,
+ EDT_PMODE_HIBERNATE,
+ EDT_PMODE_POWEROFF,
+};
+
enum edt_ver {
EDT_M06,
EDT_M09,
@@ -103,6 +113,7 @@ struct edt_ft5x06_ts_data {
struct mutex mutex;
bool factory_mode;
+ enum edt_pmode suspend_mode;
int threshold;
int gain;
int offset;
@@ -527,6 +538,29 @@ static const struct attribute_group edt_ft5x06_attr_group = {
.attrs = edt_ft5x06_attrs,
};
+static void edt_ft5x06_restore_reg_parameters(struct edt_ft5x06_ts_data *tsdata)
+{
+ struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
+
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold,
+ tsdata->threshold);
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_gain,
+ tsdata->gain);
+ if (reg_addr->reg_offset != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset,
+ tsdata->offset);
+ if (reg_addr->reg_offset_x != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x,
+ tsdata->offset_x);
+ if (reg_addr->reg_offset_y != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y,
+ tsdata->offset_y);
+ if (reg_addr->reg_report_rate != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate,
+ tsdata->report_rate);
+
+}
+
#ifdef CONFIG_DEBUG_FS
static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
{
@@ -592,7 +626,6 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata)
{
struct i2c_client *client = tsdata->client;
int retries = EDT_SWITCH_MODE_RETRIES;
- struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
int ret;
int error;
@@ -624,24 +657,7 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata)
kfree(tsdata->raw_buffer);
tsdata->raw_buffer = NULL;
- /* restore parameters */
- edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold,
- tsdata->threshold);
- edt_ft5x06_register_write(tsdata, reg_addr->reg_gain,
- tsdata->gain);
- if (reg_addr->reg_offset != NO_REGISTER)
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset,
- tsdata->offset);
- if (reg_addr->reg_offset_x != NO_REGISTER)
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x,
- tsdata->offset_x);
- if (reg_addr->reg_offset_y != NO_REGISTER)
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y,
- tsdata->offset_y);
- if (reg_addr->reg_report_rate != NO_REGISTER)
- edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate,
- tsdata->report_rate);
-
+ edt_ft5x06_restore_reg_parameters(tsdata);
enable_irq(client->irq);
return 0;
@@ -762,9 +778,8 @@ static const struct file_operations debugfs_raw_data_fops = {
.read = edt_ft5x06_debugfs_raw_data_read,
};
-static void
-edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
- const char *debugfs_name)
+static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
+ const char *debugfs_name)
{
tsdata->debug_dir = debugfs_create_dir(debugfs_name, NULL);
@@ -777,8 +792,7 @@ edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
tsdata->debug_dir, tsdata, &debugfs_raw_data_fops);
}
-static void
-edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
+static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
debugfs_remove_recursive(tsdata->debug_dir);
kfree(tsdata->raw_buffer);
@@ -786,14 +800,17 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
#else
-static inline void
-edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
- const char *debugfs_name)
+static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
+{
+ return -ENOSYS;
+}
+
+static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
+ const char *debugfs_name)
{
}
-static inline void
-edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
+static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
}
@@ -938,19 +955,25 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev,
error = device_property_read_u32(dev, "offset", &val);
if (!error) {
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val);
+ if (reg_addr->reg_offset != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata,
+ reg_addr->reg_offset, val);
tsdata->offset = val;
}
error = device_property_read_u32(dev, "offset-x", &val);
if (!error) {
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x, val);
+ if (reg_addr->reg_offset_x != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata,
+ reg_addr->reg_offset_x, val);
tsdata->offset_x = val;
}
error = device_property_read_u32(dev, "offset-y", &val);
if (!error) {
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y, val);
+ if (reg_addr->reg_offset_y != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata,
+ reg_addr->reg_offset_y, val);
tsdata->offset_y = val;
}
}
@@ -1114,6 +1137,19 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return error;
}
+ /*
+ * Check which sleep modes we can support. Power-off requieres the
+ * reset-pin to ensure correct power-down/power-up behaviour. Start with
+ * the EDT_PMODE_POWEROFF test since this is the deepest possible sleep
+ * mode.
+ */
+ if (tsdata->reset_gpio)
+ tsdata->suspend_mode = EDT_PMODE_POWEROFF;
+ else if (tsdata->wake_gpio)
+ tsdata->suspend_mode = EDT_PMODE_HIBERNATE;
+ else
+ tsdata->suspend_mode = EDT_PMODE_NOT_SUPPORTED;
+
if (tsdata->wake_gpio) {
usleep_range(5000, 6000);
gpiod_set_value_cansleep(tsdata->wake_gpio, 1);
@@ -1227,6 +1263,102 @@ static int edt_ft5x06_ts_remove(struct i2c_client *client)
return 0;
}
+static int __maybe_unused edt_ft5x06_ts_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+ struct gpio_desc *reset_gpio = tsdata->reset_gpio;
+ int ret;
+
+ if (device_may_wakeup(dev))
+ return 0;
+
+ if (tsdata->suspend_mode == EDT_PMODE_NOT_SUPPORTED)
+ return 0;
+
+ /* Enter hibernate mode. */
+ ret = edt_ft5x06_register_write(tsdata, PMOD_REGISTER_OPMODE,
+ PMOD_REGISTER_HIBERNATE);
+ if (ret)
+ dev_warn(dev, "Failed to set hibernate mode\n");
+
+ if (tsdata->suspend_mode == EDT_PMODE_HIBERNATE)
+ return 0;
+
+ /*
+ * Power-off according the datasheet. Cut the power may leaf the irq
+ * line in an undefined state depending on the host pull resistor
+ * settings. Disable the irq to avoid adjusting each host till the
+ * device is back in a full functional state.
+ */
+ disable_irq(tsdata->client->irq);
+
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ usleep_range(1000, 2000);
+
+ ret = regulator_disable(tsdata->vcc);
+ if (ret)
+ dev_warn(dev, "Failed to disable vcc\n");
+
+ return 0;
+}
+
+static int __maybe_unused edt_ft5x06_ts_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+ int ret = 0;
+
+ if (device_may_wakeup(dev))
+ return 0;
+
+ if (tsdata->suspend_mode == EDT_PMODE_NOT_SUPPORTED)
+ return 0;
+
+ if (tsdata->suspend_mode == EDT_PMODE_POWEROFF) {
+ struct gpio_desc *reset_gpio = tsdata->reset_gpio;
+
+ /*
+ * We can't check if the regulator is a dummy or a real
+ * regulator. So we need to specify the 5ms reset time (T_rst)
+ * here instead of the 100us T_rtp time. We also need to wait
+ * 300ms in case it was a real supply and the power was cutted
+ * of. Toggle the reset pin is also a way to exit the hibernate
+ * mode.
+ */
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ ret = regulator_enable(tsdata->vcc);
+ if (ret) {
+ dev_err(dev, "Failed to enable vcc\n");
+ return ret;
+ }
+
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ msleep(300);
+
+ edt_ft5x06_restore_reg_parameters(tsdata);
+ enable_irq(tsdata->client->irq);
+
+ if (tsdata->factory_mode)
+ ret = edt_ft5x06_factory_mode(tsdata);
+ } else {
+ struct gpio_desc *wake_gpio = tsdata->wake_gpio;
+
+ gpiod_set_value_cansleep(wake_gpio, 0);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(wake_gpio, 1);
+ }
+
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(edt_ft5x06_ts_pm_ops,
+ edt_ft5x06_ts_suspend, edt_ft5x06_ts_resume);
+
static const struct edt_i2c_chip_data edt_ft5x06_data = {
.max_support_points = 5,
};
@@ -1265,6 +1397,8 @@ static struct i2c_driver edt_ft5x06_ts_driver = {
.driver = {
.name = "edt_ft5x06",
.of_match_table = edt_ft5x06_of_match,
+ .pm = &edt_ft5x06_ts_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.id_table = edt_ft5x06_ts_id,
.probe = edt_ft5x06_ts_probe,
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 2289f9638116..233cb1085bbd 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
@@ -89,6 +90,7 @@
/* FW read command, 0x53 0x?? 0x0, 0x01 */
#define E_ELAN_INFO_FW_VER 0x00
#define E_ELAN_INFO_BC_VER 0x10
+#define E_ELAN_INFO_REK 0xE0
#define E_ELAN_INFO_TEST_VER 0xE0
#define E_ELAN_INFO_FW_ID 0xF0
#define E_INFO_OSR 0xD6
@@ -136,6 +138,7 @@ struct elants_data {
unsigned int y_res;
unsigned int x_max;
unsigned int y_max;
+ struct touchscreen_properties prop;
enum elants_state state;
enum elants_iap_mode iap_mode;
@@ -189,7 +192,8 @@ static int elants_i2c_read(struct i2c_client *client, void *data, size_t size)
static int elants_i2c_execute_command(struct i2c_client *client,
const u8 *cmd, size_t cmd_size,
- u8 *resp, size_t resp_size)
+ u8 *resp, size_t resp_size,
+ int retries, const char *cmd_name)
{
struct i2c_msg msgs[2];
int ret;
@@ -209,30 +213,55 @@ static int elants_i2c_execute_command(struct i2c_client *client,
break;
default:
- dev_err(&client->dev, "%s: invalid command %*ph\n",
- __func__, (int)cmd_size, cmd);
+ dev_err(&client->dev, "(%s): invalid command: %*ph\n",
+ cmd_name, (int)cmd_size, cmd);
return -EINVAL;
}
- msgs[0].addr = client->addr;
- msgs[0].flags = client->flags & I2C_M_TEN;
- msgs[0].len = cmd_size;
- msgs[0].buf = (u8 *)cmd;
+ for (;;) {
+ msgs[0].addr = client->addr;
+ msgs[0].flags = client->flags & I2C_M_TEN;
+ msgs[0].len = cmd_size;
+ msgs[0].buf = (u8 *)cmd;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD;
+ msgs[1].flags |= I2C_M_RD;
+ msgs[1].len = resp_size;
+ msgs[1].buf = resp;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0) {
+ if (--retries > 0) {
+ dev_dbg(&client->dev,
+ "(%s) I2C transfer failed: %pe (retrying)\n",
+ cmd_name, ERR_PTR(ret));
+ continue;
+ }
- msgs[1].addr = client->addr;
- msgs[1].flags = client->flags & I2C_M_TEN;
- msgs[1].flags |= I2C_M_RD;
- msgs[1].len = resp_size;
- msgs[1].buf = resp;
+ dev_err(&client->dev,
+ "(%s) I2C transfer failed: %pe\n",
+ cmd_name, ERR_PTR(ret));
+ return ret;
+ }
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret < 0)
- return ret;
+ if (ret != ARRAY_SIZE(msgs) ||
+ resp[FW_HDR_TYPE] != expected_response) {
+ if (--retries > 0) {
+ dev_dbg(&client->dev,
+ "(%s) unexpected response: %*ph (retrying)\n",
+ cmd_name, ret, resp);
+ continue;
+ }
- if (ret != ARRAY_SIZE(msgs) || resp[FW_HDR_TYPE] != expected_response)
- return -EIO;
+ dev_err(&client->dev,
+ "(%s) unexpected response: %*ph\n",
+ cmd_name, ret, resp);
+ return -EIO;
+ }
- return 0;
+ return 0;
+ }
}
static int elants_i2c_calibrate(struct elants_data *ts)
@@ -305,27 +334,21 @@ static u16 elants_i2c_parse_version(u8 *buf)
static int elants_i2c_query_hw_version(struct elants_data *ts)
{
struct i2c_client *client = ts->client;
- int error, retry_cnt;
+ int retry_cnt = MAX_RETRIES;
const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_FW_ID, 0x00, 0x01 };
u8 resp[HEADER_SIZE];
+ int error;
- for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ while (retry_cnt--) {
error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
- resp, sizeof(resp));
- if (!error) {
- ts->hw_version = elants_i2c_parse_version(resp);
- if (ts->hw_version != 0xffff)
- return 0;
- }
-
- dev_dbg(&client->dev, "read fw id error=%d, buf=%*phC\n",
- error, (int)sizeof(resp), resp);
- }
+ resp, sizeof(resp), 1,
+ "read fw id");
+ if (error)
+ return error;
- if (error) {
- dev_err(&client->dev,
- "Failed to read fw id: %d\n", error);
- return error;
+ ts->hw_version = elants_i2c_parse_version(resp);
+ if (ts->hw_version != 0xffff)
+ return 0;
}
dev_err(&client->dev, "Invalid fw id: %#04x\n", ts->hw_version);
@@ -336,26 +359,27 @@ static int elants_i2c_query_hw_version(struct elants_data *ts)
static int elants_i2c_query_fw_version(struct elants_data *ts)
{
struct i2c_client *client = ts->client;
- int error, retry_cnt;
+ int retry_cnt = MAX_RETRIES;
const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_FW_VER, 0x00, 0x01 };
u8 resp[HEADER_SIZE];
+ int error;
- for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ while (retry_cnt--) {
error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
- resp, sizeof(resp));
- if (!error) {
- ts->fw_version = elants_i2c_parse_version(resp);
- if (ts->fw_version != 0x0000 &&
- ts->fw_version != 0xffff)
- return 0;
- }
+ resp, sizeof(resp), 1,
+ "read fw version");
+ if (error)
+ return error;
+
+ ts->fw_version = elants_i2c_parse_version(resp);
+ if (ts->fw_version != 0x0000 && ts->fw_version != 0xffff)
+ return 0;
- dev_dbg(&client->dev, "read fw version error=%d, buf=%*phC\n",
- error, (int)sizeof(resp), resp);
+ dev_dbg(&client->dev, "(read fw version) resp %*phC\n",
+ (int)sizeof(resp), resp);
}
- dev_err(&client->dev,
- "Failed to read fw version or fw version is invalid\n");
+ dev_err(&client->dev, "Invalid fw ver: %#04x\n", ts->fw_version);
return -EINVAL;
}
@@ -363,30 +387,24 @@ static int elants_i2c_query_fw_version(struct elants_data *ts)
static int elants_i2c_query_test_version(struct elants_data *ts)
{
struct i2c_client *client = ts->client;
- int error, retry_cnt;
+ int error;
u16 version;
const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_TEST_VER, 0x00, 0x01 };
u8 resp[HEADER_SIZE];
- for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
- error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
- resp, sizeof(resp));
- if (!error) {
- version = elants_i2c_parse_version(resp);
- ts->test_version = version >> 8;
- ts->solution_version = version & 0xff;
-
- return 0;
- }
-
- dev_dbg(&client->dev,
- "read test version error rc=%d, buf=%*phC\n",
- error, (int)sizeof(resp), resp);
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp), MAX_RETRIES,
+ "read test version");
+ if (error) {
+ dev_err(&client->dev, "Failed to read test version\n");
+ return error;
}
- dev_err(&client->dev, "Failed to read test version\n");
+ version = elants_i2c_parse_version(resp);
+ ts->test_version = version >> 8;
+ ts->solution_version = version & 0xff;
- return -EINVAL;
+ return 0;
}
static int elants_i2c_query_bc_version(struct elants_data *ts)
@@ -398,13 +416,10 @@ static int elants_i2c_query_bc_version(struct elants_data *ts)
int error;
error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
- resp, sizeof(resp));
- if (error) {
- dev_err(&client->dev,
- "read BC version error=%d, buf=%*phC\n",
- error, (int)sizeof(resp), resp);
+ resp, sizeof(resp), 1,
+ "read BC version");
+ if (error)
return error;
- }
version = elants_i2c_parse_version(resp);
ts->bc_version = version >> 8;
@@ -436,12 +451,10 @@ static int elants_i2c_query_ts_info(struct elants_data *ts)
error = elants_i2c_execute_command(client,
get_resolution_cmd,
sizeof(get_resolution_cmd),
- resp, sizeof(resp));
- if (error) {
- dev_err(&client->dev, "get resolution command failed: %d\n",
- error);
+ resp, sizeof(resp), 1,
+ "get resolution");
+ if (error)
return error;
- }
rows = resp[2] + resp[6] + resp[10];
cols = resp[3] + resp[7] + resp[11];
@@ -449,36 +462,29 @@ static int elants_i2c_query_ts_info(struct elants_data *ts)
/* Process mm_to_pixel information */
error = elants_i2c_execute_command(client,
get_osr_cmd, sizeof(get_osr_cmd),
- resp, sizeof(resp));
- if (error) {
- dev_err(&client->dev, "get osr command failed: %d\n",
- error);
+ resp, sizeof(resp), 1, "get osr");
+ if (error)
return error;
- }
osr = resp[3];
error = elants_i2c_execute_command(client,
get_physical_scan_cmd,
sizeof(get_physical_scan_cmd),
- resp, sizeof(resp));
- if (error) {
- dev_err(&client->dev, "get physical scan command failed: %d\n",
- error);
+ resp, sizeof(resp), 1,
+ "get physical scan");
+ if (error)
return error;
- }
phy_x = get_unaligned_be16(&resp[2]);
error = elants_i2c_execute_command(client,
get_physical_drive_cmd,
sizeof(get_physical_drive_cmd),
- resp, sizeof(resp));
- if (error) {
- dev_err(&client->dev, "get physical drive command failed: %d\n",
- error);
+ resp, sizeof(resp), 1,
+ "get physical drive");
+ if (error)
return error;
- }
phy_y = get_unaligned_be16(&resp[2]);
@@ -633,11 +639,10 @@ static int elants_i2c_validate_remark_id(struct elants_data *ts,
/* Compare TS Remark ID and FW Remark ID */
error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
- resp, sizeof(resp));
- if (error) {
- dev_err(&client->dev, "failed to query Remark ID: %d\n", error);
+ resp, sizeof(resp),
+ 1, "read Remark ID");
+ if (error)
return error;
- }
ts_remark_id = get_unaligned_be16(&resp[3]);
@@ -875,8 +880,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
input_mt_slot(input, i);
input_mt_report_slot_state(input, tool_type, true);
- input_event(input, EV_ABS, ABS_MT_POSITION_X, x);
- input_event(input, EV_ABS, ABS_MT_POSITION_Y, y);
+ touchscreen_report_pos(input, &ts->prop, x, y, true);
input_event(input, EV_ABS, ABS_MT_PRESSURE, p);
input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, w);
@@ -1017,7 +1021,7 @@ out:
*/
static ssize_t calibrate_store(struct device *dev,
struct device_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct elants_data *ts = i2c_get_clientdata(client);
@@ -1063,8 +1067,28 @@ static ssize_t show_iap_mode(struct device *dev,
"Normal" : "Recovery");
}
+static ssize_t show_calibration_count(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_REK, 0x00, 0x01 };
+ u8 resp[HEADER_SIZE];
+ u16 rek_count;
+ int error;
+
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp), 1,
+ "read ReK status");
+ if (error)
+ return sprintf(buf, "%d\n", error);
+
+ rek_count = get_unaligned_be16(&resp[2]);
+ return sprintf(buf, "0x%04x\n", rek_count);
+}
+
static DEVICE_ATTR_WO(calibrate);
static DEVICE_ATTR(iap_mode, S_IRUGO, show_iap_mode, NULL);
+static DEVICE_ATTR(calibration_count, S_IRUGO, show_calibration_count, NULL);
static DEVICE_ATTR(update_fw, S_IWUSR, NULL, write_update_fw);
struct elants_version_attribute {
@@ -1120,6 +1144,7 @@ static struct attribute *elants_attributes[] = {
&dev_attr_calibrate.attr,
&dev_attr_update_fw.attr,
&dev_attr_iap_mode.attr,
+ &dev_attr_calibration_count.attr,
&elants_ver_attr_fw_version.dattr.attr,
&elants_ver_attr_hw_version.dattr.attr,
@@ -1290,25 +1315,7 @@ static int elants_i2c_probe(struct i2c_client *client,
ts->input->name = "Elan Touchscreen";
ts->input->id.bustype = BUS_I2C;
- __set_bit(BTN_TOUCH, ts->input->keybit);
- __set_bit(EV_ABS, ts->input->evbit);
- __set_bit(EV_KEY, ts->input->evbit);
-
- /* Single touch input params setup */
- input_set_abs_params(ts->input, ABS_X, 0, ts->x_max, 0, 0);
- input_set_abs_params(ts->input, ABS_Y, 0, ts->y_max, 0, 0);
- input_set_abs_params(ts->input, ABS_PRESSURE, 0, 255, 0, 0);
- input_abs_set_res(ts->input, ABS_X, ts->x_res);
- input_abs_set_res(ts->input, ABS_Y, ts->y_res);
-
/* Multitouch input params setup */
- error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
- INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
- if (error) {
- dev_err(&client->dev,
- "failed to initialize MT slots: %d\n", error);
- return error;
- }
input_set_abs_params(ts->input, ABS_MT_POSITION_X, 0, ts->x_max, 0, 0);
input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0);
@@ -1320,6 +1327,16 @@ static int elants_i2c_probe(struct i2c_client *client,
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1);
+ touchscreen_parse_properties(ts->input, true, &ts->prop);
+
+ error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to initialize MT slots: %d\n", error);
+ return error;
+ }
+
error = input_register_device(ts->input);
if (error) {
dev_err(&client->dev,
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 247c3aaba2d8..f67efdd040b2 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -391,7 +391,7 @@ static void mip4_clear_input(struct mip4_ts *ts)
/* Screen */
for (i = 0; i < MIP4_MAX_FINGERS; i++) {
input_mt_slot(ts->input, i);
- input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, 0);
+ input_mt_report_slot_inactive(ts->input);
}
/* Keys */
@@ -534,7 +534,7 @@ static void mip4_report_touch(struct mip4_ts *ts, u8 *packet)
} else {
/* Release event */
input_mt_slot(ts->input, id);
- input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, 0);
+ input_mt_report_slot_inactive(ts->input);
}
input_mt_sync_frame(ts->input);
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 2ef1adaed9af..1f96657310b7 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -54,6 +54,7 @@
enum mms_type {
TYPE_MMS114 = 114,
TYPE_MMS152 = 152,
+ TYPE_MMS345L = 345,
};
struct mms114_data {
@@ -250,6 +251,15 @@ static int mms114_get_version(struct mms114_data *data)
int error;
switch (data->type) {
+ case TYPE_MMS345L:
+ error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf);
+ if (error)
+ return error;
+
+ dev_info(dev, "TSP FW Rev: bootloader 0x%x / core 0x%x / config 0x%x\n",
+ buf[0], buf[1], buf[2]);
+ break;
+
case TYPE_MMS152:
error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf);
if (error)
@@ -287,8 +297,8 @@ static int mms114_setup_regs(struct mms114_data *data)
if (error < 0)
return error;
- /* MMS152 has no configuration or power on registers */
- if (data->type == TYPE_MMS152)
+ /* Only MMS114 has configuration and power on registers */
+ if (data->type != TYPE_MMS114)
return 0;
error = mms114_set_active(data, true);
@@ -547,7 +557,7 @@ static int __maybe_unused mms114_suspend(struct device *dev)
/* Release all touch */
for (id = 0; id < MMS114_MAX_TOUCH; id++) {
input_mt_slot(input_dev, id);
- input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(input_dev);
}
input_mt_report_pointer_emulation(input_dev, true);
@@ -597,6 +607,9 @@ static const struct of_device_id mms114_dt_match[] = {
}, {
.compatible = "melfas,mms152",
.data = (void *)TYPE_MMS152,
+ }, {
+ .compatible = "melfas,mms345l",
+ .data = (void *)TYPE_MMS345L,
},
{ }
};
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index 0e2e08f3f433..ef6aaed217cf 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -100,7 +100,7 @@ static void rpi_ts_poll(struct input_dev *input)
released_ids = ts->known_ids & ~modified_ids;
for_each_set_bit(i, &released_ids, RPI_TS_MAX_SUPPORTED_POINTS) {
input_mt_slot(input, i);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, 0);
+ input_mt_report_slot_inactive(input);
modified_ids &= ~(BIT(i));
}
ts->known_ids = modified_ids;
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index b6f95f20f924..b54cc64e4ea6 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -198,7 +198,7 @@ static void stmfts_report_contact_release(struct stmfts_data *sdata,
u8 slot_id = (event[0] & STMFTS_MASK_TOUCH_ID) >> 4;
input_mt_slot(sdata->input, slot_id);
- input_mt_report_slot_state(sdata->input, MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(sdata->input);
input_sync(sdata->input);
}
diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig
index bfa4ca3ab7a9..5b7204ee2eb2 100644
--- a/drivers/interconnect/Kconfig
+++ b/drivers/interconnect/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig INTERCONNECT
- tristate "On-Chip Interconnect management support"
+ bool "On-Chip Interconnect management support"
help
Support for management of the on-chip interconnects.
@@ -11,6 +11,7 @@ menuconfig INTERCONNECT
if INTERCONNECT
+source "drivers/interconnect/imx/Kconfig"
source "drivers/interconnect/qcom/Kconfig"
endif
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
index 725029ae7a2c..4825c287ca13 100644
--- a/drivers/interconnect/Makefile
+++ b/drivers/interconnect/Makefile
@@ -4,4 +4,5 @@ CFLAGS_core.o := -I$(src)
icc-core-objs := core.o
obj-$(CONFIG_INTERCONNECT) += icc-core.o
+obj-$(CONFIG_INTERCONNECT_IMX) += imx/
obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 2c6515e3ecf1..e5f998744501 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -158,6 +158,7 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
path->reqs[i].node = node;
path->reqs[i].dev = dev;
+ path->reqs[i].enabled = true;
/* reference to previous node was saved during path traversal */
node = node->reverse;
}
@@ -249,9 +250,12 @@ static int aggregate_requests(struct icc_node *node)
if (p->pre_aggregate)
p->pre_aggregate(node);
- hlist_for_each_entry(r, &node->req_list, req_node)
+ hlist_for_each_entry(r, &node->req_list, req_node) {
+ if (!r->enabled)
+ continue;
p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
&node->avg_bw, &node->peak_bw);
+ }
return 0;
}
@@ -350,10 +354,35 @@ static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
return node;
}
+static void devm_icc_release(struct device *dev, void *res)
+{
+ icc_put(*(struct icc_path **)res);
+}
+
+struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
+{
+ struct icc_path **ptr, *path;
+
+ ptr = devres_alloc(devm_icc_release, sizeof(**ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ path = of_icc_get(dev, name);
+ if (!IS_ERR(path)) {
+ *ptr = path;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return path;
+}
+EXPORT_SYMBOL_GPL(devm_of_icc_get);
+
/**
- * of_icc_get() - get a path handle from a DT node based on name
+ * of_icc_get_by_index() - get a path handle from a DT node based on index
* @dev: device pointer for the consumer device
- * @name: interconnect path name
+ * @idx: interconnect path index
*
* This function will search for a path between two endpoints and return an
* icc_path handle on success. Use icc_put() to release constraints when they
@@ -365,13 +394,12 @@ static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
* Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
* when the API is disabled or the "interconnects" DT property is missing.
*/
-struct icc_path *of_icc_get(struct device *dev, const char *name)
+struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
{
- struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+ struct icc_path *path;
struct icc_node *src_node, *dst_node;
- struct device_node *np = NULL;
+ struct device_node *np;
struct of_phandle_args src_args, dst_args;
- int idx = 0;
int ret;
if (!dev || !dev->of_node)
@@ -391,12 +419,6 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
* lets support only global ids and extend this in the future if needed
* without breaking DT compatibility.
*/
- if (name) {
- idx = of_property_match_string(np, "interconnect-names", name);
- if (idx < 0)
- return ERR_PTR(idx);
- }
-
ret = of_parse_phandle_with_args(np, "interconnects",
"#interconnect-cells", idx * 2,
&src_args);
@@ -439,12 +461,8 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
return path;
}
- if (name)
- path->name = kstrdup_const(name, GFP_KERNEL);
- else
- path->name = kasprintf(GFP_KERNEL, "%s-%s",
- src_node->name, dst_node->name);
-
+ path->name = kasprintf(GFP_KERNEL, "%s-%s",
+ src_node->name, dst_node->name);
if (!path->name) {
kfree(path);
return ERR_PTR(-ENOMEM);
@@ -452,6 +470,53 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
return path;
}
+EXPORT_SYMBOL_GPL(of_icc_get_by_index);
+
+/**
+ * of_icc_get() - get a path handle from a DT node based on name
+ * @dev: device pointer for the consumer device
+ * @name: interconnect path name
+ *
+ * This function will search for a path between two endpoints and return an
+ * icc_path handle on success. Use icc_put() to release constraints when they
+ * are not needed anymore.
+ * If the interconnect API is disabled, NULL is returned and the consumer
+ * drivers will still build. Drivers are free to handle this specifically,
+ * but they don't have to.
+ *
+ * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
+ * when the API is disabled or the "interconnects" DT property is missing.
+ */
+struct icc_path *of_icc_get(struct device *dev, const char *name)
+{
+ struct device_node *np;
+ int idx = 0;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-ENODEV);
+
+ np = dev->of_node;
+
+ /*
+ * When the consumer DT node do not have "interconnects" property
+ * return a NULL path to skip setting constraints.
+ */
+ if (!of_find_property(np, "interconnects", NULL))
+ return NULL;
+
+ /*
+ * We use a combination of phandle and specifier for endpoint. For now
+ * lets support only global ids and extend this in the future if needed
+ * without breaking DT compatibility.
+ */
+ if (name) {
+ idx = of_property_match_string(np, "interconnect-names", name);
+ if (idx < 0)
+ return ERR_PTR(idx);
+ }
+
+ return of_icc_get_by_index(dev, idx);
+}
EXPORT_SYMBOL_GPL(of_icc_get);
/**
@@ -479,6 +544,24 @@ void icc_set_tag(struct icc_path *path, u32 tag)
EXPORT_SYMBOL_GPL(icc_set_tag);
/**
+ * icc_get_name() - Get name of the icc path
+ * @path: reference to the path returned by icc_get()
+ *
+ * This function is used by an interconnect consumer to get the name of the icc
+ * path.
+ *
+ * Returns a valid pointer on success, or NULL otherwise.
+ */
+const char *icc_get_name(struct icc_path *path)
+{
+ if (!path)
+ return NULL;
+
+ return path->name;
+}
+EXPORT_SYMBOL_GPL(icc_get_name);
+
+/**
* icc_set_bw() - set bandwidth constraints on an interconnect path
* @path: reference to the path returned by icc_get()
* @avg_bw: average bandwidth in kilobytes per second
@@ -546,6 +629,39 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
}
EXPORT_SYMBOL_GPL(icc_set_bw);
+static int __icc_enable(struct icc_path *path, bool enable)
+{
+ int i;
+
+ if (!path)
+ return 0;
+
+ if (WARN_ON(IS_ERR(path) || !path->num_nodes))
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ for (i = 0; i < path->num_nodes; i++)
+ path->reqs[i].enabled = enable;
+
+ mutex_unlock(&icc_lock);
+
+ return icc_set_bw(path, path->reqs[0].avg_bw,
+ path->reqs[0].peak_bw);
+}
+
+int icc_enable(struct icc_path *path)
+{
+ return __icc_enable(path, true);
+}
+EXPORT_SYMBOL_GPL(icc_enable);
+
+int icc_disable(struct icc_path *path)
+{
+ return __icc_enable(path, false);
+}
+EXPORT_SYMBOL_GPL(icc_disable);
+
/**
* icc_get() - return a handle for path between two endpoints
* @dev: the device requesting the path
@@ -908,12 +1024,7 @@ static int __init icc_init(void)
return 0;
}
-static void __exit icc_exit(void)
-{
- debugfs_remove_recursive(icc_debugfs_dir);
-}
-module_init(icc_init);
-module_exit(icc_exit);
+device_initcall(icc_init);
MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
MODULE_DESCRIPTION("Interconnect Driver Core");
diff --git a/drivers/interconnect/imx/Kconfig b/drivers/interconnect/imx/Kconfig
new file mode 100644
index 000000000000..be2928362bb7
--- /dev/null
+++ b/drivers/interconnect/imx/Kconfig
@@ -0,0 +1,17 @@
+config INTERCONNECT_IMX
+ tristate "i.MX interconnect drivers"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ Generic interconnect drivers for i.MX SOCs
+
+config INTERCONNECT_IMX8MM
+ tristate "i.MX8MM interconnect driver"
+ depends on INTERCONNECT_IMX
+
+config INTERCONNECT_IMX8MN
+ tristate "i.MX8MN interconnect driver"
+ depends on INTERCONNECT_IMX
+
+config INTERCONNECT_IMX8MQ
+ tristate "i.MX8MQ interconnect driver"
+ depends on INTERCONNECT_IMX
diff --git a/drivers/interconnect/imx/Makefile b/drivers/interconnect/imx/Makefile
new file mode 100644
index 000000000000..21fd5233754f
--- /dev/null
+++ b/drivers/interconnect/imx/Makefile
@@ -0,0 +1,9 @@
+imx-interconnect-objs := imx.o
+imx8mm-interconnect-objs := imx8mm.o
+imx8mq-interconnect-objs := imx8mq.o
+imx8mn-interconnect-objs := imx8mn.o
+
+obj-$(CONFIG_INTERCONNECT_IMX) += imx-interconnect.o
+obj-$(CONFIG_INTERCONNECT_IMX8MM) += imx8mm-interconnect.o
+obj-$(CONFIG_INTERCONNECT_IMX8MQ) += imx8mq-interconnect.o
+obj-$(CONFIG_INTERCONNECT_IMX8MN) += imx8mn-interconnect.o
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
new file mode 100644
index 000000000000..ac420f86008e
--- /dev/null
+++ b/drivers/interconnect/imx/imx.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Copyright (c) 2019-2020, NXP
+ * Author: Alexandre Bailon <abailon@baylibre.com>
+ * Author: Leonard Crestez <leonard.crestez@nxp.com>
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+
+#include "imx.h"
+
+/* private icc_node data */
+struct imx_icc_node {
+ const struct imx_icc_node_desc *desc;
+ struct device *qos_dev;
+ struct dev_pm_qos_request qos_req;
+};
+
+static int imx_icc_node_set(struct icc_node *node)
+{
+ struct device *dev = node->provider->dev;
+ struct imx_icc_node *node_data = node->data;
+ u64 freq;
+
+ if (!node_data->qos_dev)
+ return 0;
+
+ freq = (node->avg_bw + node->peak_bw) * node_data->desc->adj->bw_mul;
+ do_div(freq, node_data->desc->adj->bw_div);
+ dev_dbg(dev, "node %s device %s avg_bw %ukBps peak_bw %ukBps min_freq %llukHz\n",
+ node->name, dev_name(node_data->qos_dev),
+ node->avg_bw, node->peak_bw, freq);
+
+ if (freq > S32_MAX) {
+ dev_err(dev, "%s can't request more than S32_MAX freq\n",
+ node->name);
+ return -ERANGE;
+ }
+
+ dev_pm_qos_update_request(&node_data->qos_req, freq);
+
+ return 0;
+}
+
+static int imx_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ return imx_icc_node_set(dst);
+}
+
+/* imx_icc_node_destroy() - Destroy an imx icc_node, including private data */
+static void imx_icc_node_destroy(struct icc_node *node)
+{
+ struct imx_icc_node *node_data = node->data;
+ int ret;
+
+ if (dev_pm_qos_request_active(&node_data->qos_req)) {
+ ret = dev_pm_qos_remove_request(&node_data->qos_req);
+ if (ret)
+ dev_warn(node->provider->dev,
+ "failed to remove qos request for %s\n",
+ dev_name(node_data->qos_dev));
+ }
+
+ put_device(node_data->qos_dev);
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+}
+
+static int imx_icc_node_init_qos(struct icc_provider *provider,
+ struct icc_node *node)
+{
+ struct imx_icc_node *node_data = node->data;
+ const struct imx_icc_node_adj_desc *adj = node_data->desc->adj;
+ struct device *dev = provider->dev;
+ struct device_node *dn = NULL;
+ struct platform_device *pdev;
+
+ if (adj->main_noc) {
+ node_data->qos_dev = dev;
+ dev_dbg(dev, "icc node %s[%d] is main noc itself\n",
+ node->name, node->id);
+ } else {
+ dn = of_parse_phandle(dev->of_node, adj->phandle_name, 0);
+ if (!dn) {
+ dev_warn(dev, "Failed to parse %s\n",
+ adj->phandle_name);
+ return -ENODEV;
+ }
+ /* Allow scaling to be disabled on a per-node basis */
+ if (!dn || !of_device_is_available(dn)) {
+ dev_warn(dev, "Missing property %s, skip scaling %s\n",
+ adj->phandle_name, node->name);
+ return 0;
+ }
+
+ pdev = of_find_device_by_node(dn);
+ of_node_put(dn);
+ if (!pdev) {
+ dev_warn(dev, "node %s[%d] missing device for %pOF\n",
+ node->name, node->id, dn);
+ return -EPROBE_DEFER;
+ }
+ node_data->qos_dev = &pdev->dev;
+ dev_dbg(dev, "node %s[%d] has device node %pOF\n",
+ node->name, node->id, dn);
+ }
+
+ return dev_pm_qos_add_request(node_data->qos_dev,
+ &node_data->qos_req,
+ DEV_PM_QOS_MIN_FREQUENCY, 0);
+}
+
+static struct icc_node *imx_icc_node_add(struct icc_provider *provider,
+ const struct imx_icc_node_desc *node_desc)
+{
+ struct device *dev = provider->dev;
+ struct imx_icc_node *node_data;
+ struct icc_node *node;
+ int ret;
+
+ node = icc_node_create(node_desc->id);
+ if (IS_ERR(node)) {
+ dev_err(dev, "failed to create node %d\n", node_desc->id);
+ return node;
+ }
+
+ if (node->data) {
+ dev_err(dev, "already created node %s id=%d\n",
+ node_desc->name, node_desc->id);
+ return ERR_PTR(-EEXIST);
+ }
+
+ node_data = devm_kzalloc(dev, sizeof(*node_data), GFP_KERNEL);
+ if (!node_data) {
+ icc_node_destroy(node->id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ node->name = node_desc->name;
+ node->data = node_data;
+ node_data->desc = node_desc;
+ icc_node_add(node, provider);
+
+ if (node_desc->adj) {
+ ret = imx_icc_node_init_qos(provider, node);
+ if (ret < 0) {
+ imx_icc_node_destroy(node);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return node;
+}
+
+static void imx_icc_unregister_nodes(struct icc_provider *provider)
+{
+ struct icc_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &provider->nodes, node_list)
+ imx_icc_node_destroy(node);
+}
+
+static int imx_icc_register_nodes(struct icc_provider *provider,
+ const struct imx_icc_node_desc *descs,
+ int count)
+{
+ struct icc_onecell_data *provider_data = provider->data;
+ int ret;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct icc_node *node;
+ const struct imx_icc_node_desc *node_desc = &descs[i];
+ size_t j;
+
+ node = imx_icc_node_add(provider, node_desc);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ if (ret != -EPROBE_DEFER)
+ dev_err(provider->dev, "failed to add %s: %d\n",
+ node_desc->name, ret);
+ goto err;
+ }
+ provider_data->nodes[node->id] = node;
+
+ for (j = 0; j < node_desc->num_links; j++) {
+ ret = icc_link_create(node, node_desc->links[j]);
+ if (ret) {
+ dev_err(provider->dev, "failed to link node %d to %d: %d\n",
+ node->id, node_desc->links[j], ret);
+ goto err;
+ }
+ }
+ }
+
+ return 0;
+
+err:
+ imx_icc_unregister_nodes(provider);
+
+ return ret;
+}
+
+static int get_max_node_id(struct imx_icc_node_desc *nodes, int nodes_count)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < nodes_count; ++i)
+ if (nodes[i].id > ret)
+ ret = nodes[i].id;
+
+ return ret;
+}
+
+int imx_icc_register(struct platform_device *pdev,
+ struct imx_icc_node_desc *nodes, int nodes_count)
+{
+ struct device *dev = &pdev->dev;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ int max_node_id;
+ int ret;
+
+ /* icc_onecell_data is indexed by node_id, unlike nodes param */
+ max_node_id = get_max_node_id(nodes, nodes_count);
+ data = devm_kzalloc(dev, struct_size(data, nodes, max_node_id),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->num_nodes = max_node_id;
+
+ provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL);
+ if (!provider)
+ return -ENOMEM;
+ provider->set = imx_icc_set;
+ provider->aggregate = icc_std_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+ provider->dev = dev->parent;
+ platform_set_drvdata(pdev, provider);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ return ret;
+ }
+
+ ret = imx_icc_register_nodes(provider, nodes, nodes_count);
+ if (ret)
+ goto provider_del;
+
+ return 0;
+
+provider_del:
+ icc_provider_del(provider);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_icc_register);
+
+int imx_icc_unregister(struct platform_device *pdev)
+{
+ struct icc_provider *provider = platform_get_drvdata(pdev);
+ int ret;
+
+ imx_icc_unregister_nodes(provider);
+
+ ret = icc_provider_del(provider);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_icc_unregister);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/imx/imx.h b/drivers/interconnect/imx/imx.h
new file mode 100644
index 000000000000..75da51076c68
--- /dev/null
+++ b/drivers/interconnect/imx/imx.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Copyright (c) 2019-2020, NXP
+ * Author: Alexandre Bailon <abailon@baylibre.com>
+ * Author: Leonard Crestez <leonard.crestez@nxp.com>
+ */
+#ifndef __DRIVERS_INTERCONNECT_IMX_H
+#define __DRIVERS_INTERCONNECT_IMX_H
+
+#include <linux/kernel.h>
+
+#define IMX_ICC_MAX_LINKS 4
+
+/*
+ * struct imx_icc_node_adj - Describe a dynamic adjustable node
+ */
+struct imx_icc_node_adj_desc {
+ unsigned int bw_mul, bw_div;
+ const char *phandle_name;
+ bool main_noc;
+};
+
+/*
+ * struct imx_icc_node - Describe an interconnect node
+ * @name: name of the node
+ * @id: an unique id to identify the node
+ * @links: an array of slaves' node id
+ * @num_links: number of id defined in links
+ */
+struct imx_icc_node_desc {
+ const char *name;
+ u16 id;
+ u16 links[IMX_ICC_MAX_LINKS];
+ u16 num_links;
+ const struct imx_icc_node_adj_desc *adj;
+};
+
+#define DEFINE_BUS_INTERCONNECT(_name, _id, _adj, ...) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .adj = _adj, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+#define DEFINE_BUS_MASTER(_name, _id, _dest_id) \
+ DEFINE_BUS_INTERCONNECT(_name, _id, NULL, _dest_id)
+
+#define DEFINE_BUS_SLAVE(_name, _id, _adj) \
+ DEFINE_BUS_INTERCONNECT(_name, _id, _adj)
+
+int imx_icc_register(struct platform_device *pdev,
+ struct imx_icc_node_desc *nodes,
+ int nodes_count);
+int imx_icc_unregister(struct platform_device *pdev);
+
+#endif /* __DRIVERS_INTERCONNECT_IMX_H */
diff --git a/drivers/interconnect/imx/imx8mm.c b/drivers/interconnect/imx/imx8mm.c
new file mode 100644
index 000000000000..1083490bb391
--- /dev/null
+++ b/drivers/interconnect/imx/imx8mm.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX8MM SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Copyright (c) 2019-2020, NXP
+ * Author: Alexandre Bailon <abailon@baylibre.com>
+ * Author: Leonard Crestez <leonard.crestez@nxp.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/interconnect/imx8mm.h>
+
+#include "imx.h"
+
+static const struct imx_icc_node_adj_desc imx8mm_dram_adj = {
+ .bw_mul = 1,
+ .bw_div = 16,
+ .phandle_name = "fsl,ddrc",
+};
+
+static const struct imx_icc_node_adj_desc imx8mm_noc_adj = {
+ .bw_mul = 1,
+ .bw_div = 16,
+ .main_noc = true,
+};
+
+/*
+ * Describe bus masters, slaves and connections between them
+ *
+ * This is a simplified subset of the bus diagram, there are several other
+ * PL301 nics which are skipped/merged into PL301_MAIN
+ */
+static struct imx_icc_node_desc nodes[] = {
+ DEFINE_BUS_INTERCONNECT("NOC", IMX8MM_ICN_NOC, &imx8mm_noc_adj,
+ IMX8MM_ICS_DRAM, IMX8MM_ICN_MAIN),
+
+ DEFINE_BUS_SLAVE("DRAM", IMX8MM_ICS_DRAM, &imx8mm_dram_adj),
+ DEFINE_BUS_SLAVE("OCRAM", IMX8MM_ICS_OCRAM, NULL),
+ DEFINE_BUS_MASTER("A53", IMX8MM_ICM_A53, IMX8MM_ICN_NOC),
+
+ /* VPUMIX */
+ DEFINE_BUS_MASTER("VPU H1", IMX8MM_ICM_VPU_H1, IMX8MM_ICN_VIDEO),
+ DEFINE_BUS_MASTER("VPU G1", IMX8MM_ICM_VPU_G1, IMX8MM_ICN_VIDEO),
+ DEFINE_BUS_MASTER("VPU G2", IMX8MM_ICM_VPU_G2, IMX8MM_ICN_VIDEO),
+ DEFINE_BUS_INTERCONNECT("PL301_VIDEO", IMX8MM_ICN_VIDEO, NULL, IMX8MM_ICN_NOC),
+
+ /* GPUMIX */
+ DEFINE_BUS_MASTER("GPU 2D", IMX8MM_ICM_GPU2D, IMX8MM_ICN_GPU),
+ DEFINE_BUS_MASTER("GPU 3D", IMX8MM_ICM_GPU3D, IMX8MM_ICN_GPU),
+ DEFINE_BUS_INTERCONNECT("PL301_GPU", IMX8MM_ICN_GPU, NULL, IMX8MM_ICN_NOC),
+
+ /* DISPLAYMIX */
+ DEFINE_BUS_MASTER("CSI", IMX8MM_ICM_CSI, IMX8MM_ICN_MIPI),
+ DEFINE_BUS_MASTER("LCDIF", IMX8MM_ICM_LCDIF, IMX8MM_ICN_MIPI),
+ DEFINE_BUS_INTERCONNECT("PL301_MIPI", IMX8MM_ICN_MIPI, NULL, IMX8MM_ICN_NOC),
+
+ /* HSIO */
+ DEFINE_BUS_MASTER("USB1", IMX8MM_ICM_USB1, IMX8MM_ICN_HSIO),
+ DEFINE_BUS_MASTER("USB2", IMX8MM_ICM_USB2, IMX8MM_ICN_HSIO),
+ DEFINE_BUS_MASTER("PCIE", IMX8MM_ICM_PCIE, IMX8MM_ICN_HSIO),
+ DEFINE_BUS_INTERCONNECT("PL301_HSIO", IMX8MM_ICN_HSIO, NULL, IMX8MM_ICN_NOC),
+
+ /* Audio */
+ DEFINE_BUS_MASTER("SDMA2", IMX8MM_ICM_SDMA2, IMX8MM_ICN_AUDIO),
+ DEFINE_BUS_MASTER("SDMA3", IMX8MM_ICM_SDMA3, IMX8MM_ICN_AUDIO),
+ DEFINE_BUS_INTERCONNECT("PL301_AUDIO", IMX8MM_ICN_AUDIO, NULL, IMX8MM_ICN_MAIN),
+
+ /* Ethernet */
+ DEFINE_BUS_MASTER("ENET", IMX8MM_ICM_ENET, IMX8MM_ICN_ENET),
+ DEFINE_BUS_INTERCONNECT("PL301_ENET", IMX8MM_ICN_ENET, NULL, IMX8MM_ICN_MAIN),
+
+ /* Other */
+ DEFINE_BUS_MASTER("SDMA1", IMX8MM_ICM_SDMA1, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_MASTER("NAND", IMX8MM_ICM_NAND, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC1", IMX8MM_ICM_USDHC1, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC2", IMX8MM_ICM_USDHC2, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC3", IMX8MM_ICM_USDHC3, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MM_ICN_MAIN, NULL,
+ IMX8MM_ICN_NOC, IMX8MM_ICS_OCRAM),
+};
+
+static int imx8mm_icc_probe(struct platform_device *pdev)
+{
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes));
+}
+
+static int imx8mm_icc_remove(struct platform_device *pdev)
+{
+ return imx_icc_unregister(pdev);
+}
+
+static struct platform_driver imx8mm_icc_driver = {
+ .probe = imx8mm_icc_probe,
+ .remove = imx8mm_icc_remove,
+ .driver = {
+ .name = "imx8mm-interconnect",
+ },
+};
+
+module_platform_driver(imx8mm_icc_driver);
+MODULE_AUTHOR("Alexandre Bailon <abailon@baylibre.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:imx8mm-interconnect");
diff --git a/drivers/interconnect/imx/imx8mn.c b/drivers/interconnect/imx/imx8mn.c
new file mode 100644
index 000000000000..ad97e55fd4e5
--- /dev/null
+++ b/drivers/interconnect/imx/imx8mn.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX8MN SoC
+ *
+ * Copyright (c) 2019-2020, NXP
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/interconnect/imx8mn.h>
+
+#include "imx.h"
+
+static const struct imx_icc_node_adj_desc imx8mn_dram_adj = {
+ .bw_mul = 1,
+ .bw_div = 4,
+ .phandle_name = "fsl,ddrc",
+};
+
+static const struct imx_icc_node_adj_desc imx8mn_noc_adj = {
+ .bw_mul = 1,
+ .bw_div = 4,
+ .main_noc = true,
+};
+
+/*
+ * Describe bus masters, slaves and connections between them
+ *
+ * This is a simplified subset of the bus diagram, there are several other
+ * PL301 nics which are skipped/merged into PL301_MAIN
+ */
+static struct imx_icc_node_desc nodes[] = {
+ DEFINE_BUS_INTERCONNECT("NOC", IMX8MN_ICN_NOC, &imx8mn_noc_adj,
+ IMX8MN_ICS_DRAM, IMX8MN_ICN_MAIN),
+
+ DEFINE_BUS_SLAVE("DRAM", IMX8MN_ICS_DRAM, &imx8mn_dram_adj),
+ DEFINE_BUS_SLAVE("OCRAM", IMX8MN_ICS_OCRAM, NULL),
+ DEFINE_BUS_MASTER("A53", IMX8MN_ICM_A53, IMX8MN_ICN_NOC),
+
+ /* GPUMIX */
+ DEFINE_BUS_MASTER("GPU", IMX8MN_ICM_GPU, IMX8MN_ICN_GPU),
+ DEFINE_BUS_INTERCONNECT("PL301_GPU", IMX8MN_ICN_GPU, NULL, IMX8MN_ICN_NOC),
+
+ /* DISPLAYMIX */
+ DEFINE_BUS_MASTER("CSI1", IMX8MN_ICM_CSI1, IMX8MN_ICN_MIPI),
+ DEFINE_BUS_MASTER("CSI2", IMX8MN_ICM_CSI2, IMX8MN_ICN_MIPI),
+ DEFINE_BUS_MASTER("ISI", IMX8MN_ICM_ISI, IMX8MN_ICN_MIPI),
+ DEFINE_BUS_MASTER("LCDIF", IMX8MN_ICM_LCDIF, IMX8MN_ICN_MIPI),
+ DEFINE_BUS_INTERCONNECT("PL301_MIPI", IMX8MN_ICN_MIPI, NULL, IMX8MN_ICN_NOC),
+
+ /* USB goes straight to NOC */
+ DEFINE_BUS_MASTER("USB", IMX8MN_ICM_USB, IMX8MN_ICN_NOC),
+
+ /* Audio */
+ DEFINE_BUS_MASTER("SDMA2", IMX8MN_ICM_SDMA2, IMX8MN_ICN_AUDIO),
+ DEFINE_BUS_MASTER("SDMA3", IMX8MN_ICM_SDMA3, IMX8MN_ICN_AUDIO),
+ DEFINE_BUS_INTERCONNECT("PL301_AUDIO", IMX8MN_ICN_AUDIO, NULL, IMX8MN_ICN_MAIN),
+
+ /* Ethernet */
+ DEFINE_BUS_MASTER("ENET", IMX8MN_ICM_ENET, IMX8MN_ICN_ENET),
+ DEFINE_BUS_INTERCONNECT("PL301_ENET", IMX8MN_ICN_ENET, NULL, IMX8MN_ICN_MAIN),
+
+ /* Other */
+ DEFINE_BUS_MASTER("SDMA1", IMX8MN_ICM_SDMA1, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_MASTER("NAND", IMX8MN_ICM_NAND, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC1", IMX8MN_ICM_USDHC1, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC2", IMX8MN_ICM_USDHC2, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC3", IMX8MN_ICM_USDHC3, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MN_ICN_MAIN, NULL,
+ IMX8MN_ICN_NOC, IMX8MN_ICS_OCRAM),
+};
+
+static int imx8mn_icc_probe(struct platform_device *pdev)
+{
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes));
+}
+
+static int imx8mn_icc_remove(struct platform_device *pdev)
+{
+ return imx_icc_unregister(pdev);
+}
+
+static struct platform_driver imx8mn_icc_driver = {
+ .probe = imx8mn_icc_probe,
+ .remove = imx8mn_icc_remove,
+ .driver = {
+ .name = "imx8mn-interconnect",
+ },
+};
+
+module_platform_driver(imx8mn_icc_driver);
+MODULE_ALIAS("platform:imx8mn-interconnect");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
new file mode 100644
index 000000000000..ba43a15aefec
--- /dev/null
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX8MQ SoC
+ *
+ * Copyright (c) 2019-2020, NXP
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/interconnect/imx8mq.h>
+
+#include "imx.h"
+
+static const struct imx_icc_node_adj_desc imx8mq_dram_adj = {
+ .bw_mul = 1,
+ .bw_div = 4,
+ .phandle_name = "fsl,ddrc",
+};
+
+static const struct imx_icc_node_adj_desc imx8mq_noc_adj = {
+ .bw_mul = 1,
+ .bw_div = 4,
+ .main_noc = true,
+};
+
+/*
+ * Describe bus masters, slaves and connections between them
+ *
+ * This is a simplified subset of the bus diagram, there are several other
+ * PL301 nics which are skipped/merged into PL301_MAIN
+ */
+static struct imx_icc_node_desc nodes[] = {
+ DEFINE_BUS_INTERCONNECT("NOC", IMX8MQ_ICN_NOC, &imx8mq_noc_adj,
+ IMX8MQ_ICS_DRAM, IMX8MQ_ICN_MAIN),
+
+ DEFINE_BUS_SLAVE("DRAM", IMX8MQ_ICS_DRAM, &imx8mq_dram_adj),
+ DEFINE_BUS_SLAVE("OCRAM", IMX8MQ_ICS_OCRAM, NULL),
+ DEFINE_BUS_MASTER("A53", IMX8MQ_ICM_A53, IMX8MQ_ICN_NOC),
+
+ /* VPUMIX */
+ DEFINE_BUS_MASTER("VPU", IMX8MQ_ICM_VPU, IMX8MQ_ICN_VIDEO),
+ DEFINE_BUS_INTERCONNECT("PL301_VIDEO", IMX8MQ_ICN_VIDEO, NULL, IMX8MQ_ICN_NOC),
+
+ /* GPUMIX */
+ DEFINE_BUS_MASTER("GPU", IMX8MQ_ICM_GPU, IMX8MQ_ICN_GPU),
+ DEFINE_BUS_INTERCONNECT("PL301_GPU", IMX8MQ_ICN_GPU, NULL, IMX8MQ_ICN_NOC),
+
+ /* DISPMIX (only for DCSS) */
+ DEFINE_BUS_MASTER("DC", IMX8MQ_ICM_DCSS, IMX8MQ_ICN_DCSS),
+ DEFINE_BUS_INTERCONNECT("PL301_DC", IMX8MQ_ICN_DCSS, NULL, IMX8MQ_ICN_NOC),
+
+ /* USBMIX */
+ DEFINE_BUS_MASTER("USB1", IMX8MQ_ICM_USB1, IMX8MQ_ICN_USB),
+ DEFINE_BUS_MASTER("USB2", IMX8MQ_ICM_USB2, IMX8MQ_ICN_USB),
+ DEFINE_BUS_INTERCONNECT("PL301_USB", IMX8MQ_ICN_USB, NULL, IMX8MQ_ICN_NOC),
+
+ /* PL301_DISPLAY (IPs other than DCSS, inside SUPERMIX) */
+ DEFINE_BUS_MASTER("CSI1", IMX8MQ_ICM_CSI1, IMX8MQ_ICN_DISPLAY),
+ DEFINE_BUS_MASTER("CSI2", IMX8MQ_ICM_CSI2, IMX8MQ_ICN_DISPLAY),
+ DEFINE_BUS_MASTER("LCDIF", IMX8MQ_ICM_LCDIF, IMX8MQ_ICN_DISPLAY),
+ DEFINE_BUS_INTERCONNECT("PL301_DISPLAY", IMX8MQ_ICN_DISPLAY, NULL, IMX8MQ_ICN_MAIN),
+
+ /* AUDIO */
+ DEFINE_BUS_MASTER("SDMA2", IMX8MQ_ICM_SDMA2, IMX8MQ_ICN_AUDIO),
+ DEFINE_BUS_INTERCONNECT("PL301_AUDIO", IMX8MQ_ICN_AUDIO, NULL, IMX8MQ_ICN_DISPLAY),
+
+ /* ENET */
+ DEFINE_BUS_MASTER("ENET", IMX8MQ_ICM_ENET, IMX8MQ_ICN_ENET),
+ DEFINE_BUS_INTERCONNECT("PL301_ENET", IMX8MQ_ICN_ENET, NULL, IMX8MQ_ICN_MAIN),
+
+ /* OTHER */
+ DEFINE_BUS_MASTER("SDMA1", IMX8MQ_ICM_SDMA1, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("NAND", IMX8MQ_ICM_NAND, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC1", IMX8MQ_ICM_USDHC1, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC2", IMX8MQ_ICM_USDHC2, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("PCIE1", IMX8MQ_ICM_PCIE1, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("PCIE2", IMX8MQ_ICM_PCIE2, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MQ_ICN_MAIN, NULL,
+ IMX8MQ_ICN_NOC, IMX8MQ_ICS_OCRAM),
+};
+
+static int imx8mq_icc_probe(struct platform_device *pdev)
+{
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes));
+}
+
+static int imx8mq_icc_remove(struct platform_device *pdev)
+{
+ return imx_icc_unregister(pdev);
+}
+
+static struct platform_driver imx8mq_icc_driver = {
+ .probe = imx8mq_icc_probe,
+ .remove = imx8mq_icc_remove,
+ .driver = {
+ .name = "imx8mq-interconnect",
+ },
+};
+
+module_platform_driver(imx8mq_icc_driver);
+MODULE_ALIAS("platform:imx8mq-interconnect");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/internal.h b/drivers/interconnect/internal.h
index bf18cb7239df..f5f82a5c939e 100644
--- a/drivers/interconnect/internal.h
+++ b/drivers/interconnect/internal.h
@@ -14,6 +14,7 @@
* @req_node: entry in list of requests for the particular @node
* @node: the interconnect node to which this constraint applies
* @dev: reference to the device that sets the constraints
+ * @enabled: indicates whether the path with this request is enabled
* @tag: path tag (optional)
* @avg_bw: an integer describing the average bandwidth in kBps
* @peak_bw: an integer describing the peak bandwidth in kBps
@@ -22,6 +23,7 @@ struct icc_req {
struct hlist_node req_node;
struct icc_node *node;
struct device *dev;
+ bool enabled;
u32 tag;
u32 avg_bw;
u32 peak_bw;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 2ab07ce17abb..aca76383f201 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -303,6 +303,15 @@ config ROCKCHIP_IOMMU
Say Y here if you are using a Rockchip SoC that includes an IOMMU
device.
+config SUN50I_IOMMU
+ bool "Allwinner H6 IOMMU Support"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ select ARM_DMA_USE_IOMMU
+ select IOMMU_API
+ select IOMMU_DMA
+ help
+ Support for the IOMMU introduced in the Allwinner H6 SoCs.
+
config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support"
depends on ARCH_TEGRA_2x_SOC
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 9f33fdb3bb05..57cf4ba5e27c 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_MTK_IOMMU_V1) += mtk_iommu_v1.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
+obj-$(CONFIG_SUN50I_IOMMU) += sun50i-iommu.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2883ac389abb..311ef7105c6d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -22,7 +22,6 @@
#include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
#include <linux/iommu-helper.h>
-#include <linux/iommu.h>
#include <linux/delay.h>
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
@@ -43,8 +42,7 @@
#include <asm/gart.h>
#include <asm/dma.h>
-#include "amd_iommu_proto.h"
-#include "amd_iommu_types.h"
+#include "amd_iommu.h"
#include "irq_remapping.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
@@ -71,6 +69,8 @@
*/
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
+#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
+
static DEFINE_SPINLOCK(pd_bitmap_lock);
/* List of all available dev_data structures */
@@ -99,7 +99,6 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
-static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
static void update_and_flush_device_table(struct protection_domain *domain,
struct domain_pgtable *pgtable);
@@ -280,12 +279,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
return dev_data;
}
-struct iommu_dev_data *get_dev_data(struct device *dev)
-{
- return dev->archdata.iommu;
-}
-EXPORT_SYMBOL(get_dev_data);
-
/*
* Find or create an IOMMU group for a acpihid device.
*/
@@ -314,16 +307,15 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
static bool pci_iommuv2_capable(struct pci_dev *pdev)
{
static const int caps[] = {
- PCI_EXT_CAP_ID_ATS,
PCI_EXT_CAP_ID_PRI,
PCI_EXT_CAP_ID_PASID,
};
int i, pos;
- if (pci_ats_disabled())
+ if (!pci_ats_supported(pdev))
return false;
- for (i = 0; i < 3; ++i) {
+ for (i = 0; i < 2; ++i) {
pos = pci_find_ext_capability(pdev, caps[i]);
if (pos == 0)
return false;
@@ -336,7 +328,7 @@ static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
{
struct iommu_dev_data *dev_data;
- dev_data = get_dev_data(&pdev->dev);
+ dev_data = dev_iommu_priv_get(&pdev->dev);
return dev_data->errata & (1 << erratum) ? true : false;
}
@@ -349,7 +341,7 @@ static bool check_device(struct device *dev)
{
int devid;
- if (!dev || !dev->dma_mask)
+ if (!dev)
return false;
devid = get_device_id(dev);
@@ -366,32 +358,18 @@ static bool check_device(struct device *dev)
return true;
}
-static void init_iommu_group(struct device *dev)
-{
- struct iommu_group *group;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return;
-
- iommu_group_put(group);
-}
-
static int iommu_init_device(struct device *dev)
{
struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
int devid;
- if (dev->archdata.iommu)
+ if (dev_iommu_priv_get(dev))
return 0;
devid = get_device_id(dev);
if (devid < 0)
return devid;
- iommu = amd_iommu_rlookup_table[devid];
-
dev_data = find_dev_data(devid);
if (!dev_data)
return -ENOMEM;
@@ -412,9 +390,7 @@ static int iommu_init_device(struct device *dev)
dev_data->iommu_v2 = iommu->is_iommu_v2;
}
- dev->archdata.iommu = dev_data;
-
- iommu_device_link(&iommu->iommu, dev);
+ dev_iommu_priv_set(dev, dev_data);
return 0;
}
@@ -433,31 +409,18 @@ static void iommu_ignore_device(struct device *dev)
setup_aliases(dev);
}
-static void iommu_uninit_device(struct device *dev)
+static void amd_iommu_uninit_device(struct device *dev)
{
struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
- int devid;
- devid = get_device_id(dev);
- if (devid < 0)
- return;
-
- iommu = amd_iommu_rlookup_table[devid];
-
- dev_data = search_dev_data(devid);
+ dev_data = dev_iommu_priv_get(dev);
if (!dev_data)
return;
if (dev_data->domain)
detach_device(dev);
- iommu_device_unlink(&iommu->iommu, dev);
-
- iommu_group_remove_device(dev);
-
- /* Remove dma-ops */
- dev->dma_ops = NULL;
+ dev_iommu_priv_set(dev, NULL);
/*
* We keep dev_data around for unplugged devices and reuse it when the
@@ -521,7 +484,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
- dev_data = get_dev_data(&pdev->dev);
+ dev_data = dev_iommu_priv_get(&pdev->dev);
if (dev_data && __ratelimit(&dev_data->rs)) {
pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
@@ -1418,20 +1381,19 @@ static struct page *free_sub_pt(unsigned long root, int mode,
return freelist;
}
-static void free_pagetable(struct protection_domain *domain)
+static void free_pagetable(struct domain_pgtable *pgtable)
{
- struct domain_pgtable pgtable;
struct page *freelist = NULL;
unsigned long root;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- atomic64_set(&domain->pt_root, 0);
+ if (pgtable->mode == PAGE_MODE_NONE)
+ return;
- BUG_ON(pgtable.mode < PAGE_MODE_NONE ||
- pgtable.mode > PAGE_MODE_6_LEVEL);
+ BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
+ pgtable->mode > PAGE_MODE_6_LEVEL);
- root = (unsigned long)pgtable.root;
- freelist = free_sub_pt(root, pgtable.mode, freelist);
+ root = (unsigned long)pgtable->root;
+ freelist = free_sub_pt(root, pgtable->mode, freelist);
free_page_list(freelist);
}
@@ -1844,70 +1806,6 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
-/*
- * Free a domain, only used if something went wrong in the
- * allocation path and we need to free an already allocated page table
- */
-static void dma_ops_domain_free(struct protection_domain *domain)
-{
- if (!domain)
- return;
-
- iommu_put_dma_cookie(&domain->domain);
-
- free_pagetable(domain);
-
- if (domain->id)
- domain_id_free(domain->id);
-
- kfree(domain);
-}
-
-/*
- * Allocates a new protection domain usable for the dma_ops functions.
- * It also initializes the page table and the address allocator data
- * structures required for the dma_ops interface
- */
-static struct protection_domain *dma_ops_domain_alloc(void)
-{
- struct protection_domain *domain;
- u64 *pt_root, root;
-
- domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL);
- if (!domain)
- return NULL;
-
- if (protection_domain_init(domain))
- goto free_domain;
-
- pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!pt_root)
- goto free_domain;
-
- root = amd_iommu_domain_encode_pgtable(pt_root, PAGE_MODE_3_LEVEL);
- atomic64_set(&domain->pt_root, root);
- domain->flags = PD_DMA_OPS_MASK;
-
- if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
- goto free_domain;
-
- return domain;
-
-free_domain:
- dma_ops_domain_free(domain);
-
- return NULL;
-}
-
-/*
- * little helper function to check whether a given protection domain is a
- * dma_ops domain
- */
-static bool dma_ops_domain(struct protection_domain *domain)
-{
- return domain->flags & PD_DMA_OPS_MASK;
-}
-
static void set_dte_entry(u16 devid, struct protection_domain *domain,
struct domain_pgtable *pgtable,
bool ats, bool ppr)
@@ -2119,14 +2017,14 @@ out_err:
static int attach_device(struct device *dev,
struct protection_domain *domain)
{
- struct pci_dev *pdev;
struct iommu_dev_data *dev_data;
+ struct pci_dev *pdev;
unsigned long flags;
int ret;
spin_lock_irqsave(&domain->lock, flags);
- dev_data = get_dev_data(dev);
+ dev_data = dev_iommu_priv_get(dev);
spin_lock(&dev_data->lock);
@@ -2139,8 +2037,10 @@ static int attach_device(struct device *dev,
pdev = to_pci_dev(dev);
if (domain->flags & PD_IOMMUV2_MASK) {
+ struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
+
ret = -EINVAL;
- if (!dev_data->passthrough)
+ if (def_domain->type != IOMMU_DOMAIN_IDENTITY)
goto out;
if (dev_data->iommu_v2) {
@@ -2188,7 +2088,7 @@ static void detach_device(struct device *dev)
struct iommu_dev_data *dev_data;
unsigned long flags;
- dev_data = get_dev_data(dev);
+ dev_data = dev_iommu_priv_get(dev);
domain = dev_data->domain;
spin_lock_irqsave(&domain->lock, flags);
@@ -2222,68 +2122,60 @@ out:
spin_unlock_irqrestore(&domain->lock, flags);
}
-static int amd_iommu_add_device(struct device *dev)
+static struct iommu_device *amd_iommu_probe_device(struct device *dev)
{
- struct iommu_dev_data *dev_data;
- struct iommu_domain *domain;
+ struct iommu_device *iommu_dev;
struct amd_iommu *iommu;
int ret, devid;
- if (!check_device(dev) || get_dev_data(dev))
- return 0;
+ if (!check_device(dev))
+ return ERR_PTR(-ENODEV);
devid = get_device_id(dev);
if (devid < 0)
- return devid;
+ return ERR_PTR(devid);
iommu = amd_iommu_rlookup_table[devid];
+ if (dev_iommu_priv_get(dev))
+ return &iommu->iommu;
+
ret = iommu_init_device(dev);
if (ret) {
if (ret != -ENOTSUPP)
dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
-
+ iommu_dev = ERR_PTR(ret);
iommu_ignore_device(dev);
- dev->dma_ops = NULL;
- goto out;
+ } else {
+ iommu_dev = &iommu->iommu;
}
- init_iommu_group(dev);
- dev_data = get_dev_data(dev);
+ iommu_completion_wait(iommu);
- BUG_ON(!dev_data);
+ return iommu_dev;
+}
- if (dev_data->iommu_v2)
- iommu_request_dm_for_dev(dev);
+static void amd_iommu_probe_finalize(struct device *dev)
+{
+ struct iommu_domain *domain;
/* Domains are initialized for this device - have a look what we ended up with */
domain = iommu_get_domain_for_dev(dev);
- if (domain->type == IOMMU_DOMAIN_IDENTITY)
- dev_data->passthrough = true;
- else if (domain->type == IOMMU_DOMAIN_DMA)
+ if (domain->type == IOMMU_DOMAIN_DMA)
iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
-
-out:
- iommu_completion_wait(iommu);
-
- return 0;
}
-static void amd_iommu_remove_device(struct device *dev)
+static void amd_iommu_release_device(struct device *dev)
{
+ int devid = get_device_id(dev);
struct amd_iommu *iommu;
- int devid;
if (!check_device(dev))
return;
- devid = get_device_id(dev);
- if (devid < 0)
- return;
-
iommu = amd_iommu_rlookup_table[devid];
- iommu_uninit_device(dev);
+ amd_iommu_uninit_device(dev);
iommu_completion_wait(iommu);
}
@@ -2418,27 +2310,46 @@ static void cleanup_domain(struct protection_domain *domain)
static void protection_domain_free(struct protection_domain *domain)
{
+ struct domain_pgtable pgtable;
+
if (!domain)
return;
if (domain->id)
domain_id_free(domain->id);
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+ atomic64_set(&domain->pt_root, 0);
+ free_pagetable(&pgtable);
+
kfree(domain);
}
-static int protection_domain_init(struct protection_domain *domain)
+static int protection_domain_init(struct protection_domain *domain, int mode)
{
+ u64 *pt_root = NULL, root;
+
+ BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
+
spin_lock_init(&domain->lock);
domain->id = domain_id_alloc();
if (!domain->id)
return -ENOMEM;
INIT_LIST_HEAD(&domain->dev_list);
+ if (mode != PAGE_MODE_NONE) {
+ pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pt_root)
+ return -ENOMEM;
+ }
+
+ root = amd_iommu_domain_encode_pgtable(pt_root, mode);
+ atomic64_set(&domain->pt_root, root);
+
return 0;
}
-static struct protection_domain *protection_domain_alloc(void)
+static struct protection_domain *protection_domain_alloc(int mode)
{
struct protection_domain *domain;
@@ -2446,7 +2357,7 @@ static struct protection_domain *protection_domain_alloc(void)
if (!domain)
return NULL;
- if (protection_domain_init(domain))
+ if (protection_domain_init(domain, mode))
goto out_err;
return domain;
@@ -2459,54 +2370,35 @@ out_err:
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
- struct protection_domain *pdomain;
- u64 *pt_root, root;
+ struct protection_domain *domain;
+ int mode = DEFAULT_PGTABLE_LEVEL;
- switch (type) {
- case IOMMU_DOMAIN_UNMANAGED:
- pdomain = protection_domain_alloc();
- if (!pdomain)
- return NULL;
+ if (type == IOMMU_DOMAIN_IDENTITY)
+ mode = PAGE_MODE_NONE;
- pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!pt_root) {
- protection_domain_free(pdomain);
- return NULL;
- }
+ domain = protection_domain_alloc(mode);
+ if (!domain)
+ return NULL;
- root = amd_iommu_domain_encode_pgtable(pt_root, PAGE_MODE_3_LEVEL);
- atomic64_set(&pdomain->pt_root, root);
+ domain->domain.geometry.aperture_start = 0;
+ domain->domain.geometry.aperture_end = ~0ULL;
+ domain->domain.geometry.force_aperture = true;
- pdomain->domain.geometry.aperture_start = 0;
- pdomain->domain.geometry.aperture_end = ~0ULL;
- pdomain->domain.geometry.force_aperture = true;
+ if (type == IOMMU_DOMAIN_DMA &&
+ iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
+ goto free_domain;
- break;
- case IOMMU_DOMAIN_DMA:
- pdomain = dma_ops_domain_alloc();
- if (!pdomain) {
- pr_err("Failed to allocate\n");
- return NULL;
- }
- break;
- case IOMMU_DOMAIN_IDENTITY:
- pdomain = protection_domain_alloc();
- if (!pdomain)
- return NULL;
+ return &domain->domain;
- atomic64_set(&pdomain->pt_root, PAGE_MODE_NONE);
- break;
- default:
- return NULL;
- }
+free_domain:
+ protection_domain_free(domain);
- return &pdomain->domain;
+ return NULL;
}
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
- struct domain_pgtable pgtable;
domain = to_pdomain(dom);
@@ -2518,29 +2410,19 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
if (!dom)
return;
- switch (dom->type) {
- case IOMMU_DOMAIN_DMA:
- /* Now release the domain */
- dma_ops_domain_free(domain);
- break;
- default:
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (pgtable.mode != PAGE_MODE_NONE)
- free_pagetable(domain);
+ if (dom->type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(&domain->domain);
- if (domain->flags & PD_IOMMUV2_MASK)
- free_gcr3_table(domain);
+ if (domain->flags & PD_IOMMUV2_MASK)
+ free_gcr3_table(domain);
- protection_domain_free(domain);
- break;
- }
+ protection_domain_free(domain);
}
static void amd_iommu_detach_device(struct iommu_domain *dom,
struct device *dev)
{
- struct iommu_dev_data *dev_data = dev->archdata.iommu;
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
struct amd_iommu *iommu;
int devid;
@@ -2578,7 +2460,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
if (!check_device(dev))
return -EINVAL;
- dev_data = dev->archdata.iommu;
+ dev_data = dev_iommu_priv_get(dev);
dev_data->defer_attach = false;
iommu = amd_iommu_rlookup_table[dev_data->devid];
@@ -2734,12 +2616,14 @@ static void amd_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, head);
}
-static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
- struct device *dev)
+bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
+ struct device *dev)
{
- struct iommu_dev_data *dev_data = dev->archdata.iommu;
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+
return dev_data->defer_attach;
}
+EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred);
static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
@@ -2758,6 +2642,20 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
amd_iommu_flush_iotlb_all(domain);
}
+static int amd_iommu_def_domain_type(struct device *dev)
+{
+ struct iommu_dev_data *dev_data;
+
+ dev_data = dev_iommu_priv_get(dev);
+ if (!dev_data)
+ return 0;
+
+ if (dev_data->iommu_v2)
+ return IOMMU_DOMAIN_IDENTITY;
+
+ return 0;
+}
+
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
@@ -2767,8 +2665,9 @@ const struct iommu_ops amd_iommu_ops = {
.map = amd_iommu_map,
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
- .add_device = amd_iommu_add_device,
- .remove_device = amd_iommu_remove_device,
+ .probe_device = amd_iommu_probe_device,
+ .release_device = amd_iommu_release_device,
+ .probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
.domain_get_attr = amd_iommu_domain_get_attr,
.get_resv_regions = amd_iommu_get_resv_regions,
@@ -2777,6 +2676,7 @@ const struct iommu_ops amd_iommu_ops = {
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
.iotlb_sync = amd_iommu_iotlb_sync,
+ .def_domain_type = amd_iommu_def_domain_type,
};
/*****************************************************************************
@@ -2807,7 +2707,6 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
struct protection_domain *domain = to_pdomain(dom);
struct domain_pgtable pgtable;
unsigned long flags;
- u64 pt_root;
spin_lock_irqsave(&domain->lock, flags);
@@ -2815,18 +2714,13 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
amd_iommu_domain_get_pgtable(domain, &pgtable);
/* Update data structure */
- pt_root = amd_iommu_domain_encode_pgtable(NULL, PAGE_MODE_NONE);
- atomic64_set(&domain->pt_root, pt_root);
+ atomic64_set(&domain->pt_root, 0);
/* Make changes visible to IOMMUs */
update_domain(domain);
- /* Restore old pgtable in domain->ptroot to free page-table */
- pt_root = amd_iommu_domain_encode_pgtable(pgtable.root, pgtable.mode);
- atomic64_set(&domain->pt_root, pt_root);
-
/* Page-table is not visible to IOMMU anymore, so free it */
- free_pagetable(domain);
+ free_pagetable(&pgtable);
spin_unlock_irqrestore(&domain->lock, flags);
}
@@ -3085,7 +2979,7 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
struct amd_iommu *iommu;
struct iommu_cmd cmd;
- dev_data = get_dev_data(&pdev->dev);
+ dev_data = dev_iommu_priv_get(&pdev->dev);
iommu = amd_iommu_rlookup_table[dev_data->devid];
build_complete_ppr(&cmd, dev_data->devid, pasid, status,
@@ -3098,23 +2992,27 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr);
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
{
struct protection_domain *pdomain;
- struct iommu_domain *io_domain;
+ struct iommu_dev_data *dev_data;
struct device *dev = &pdev->dev;
+ struct iommu_domain *io_domain;
if (!check_device(dev))
return NULL;
- pdomain = get_dev_data(dev)->domain;
- if (pdomain == NULL && get_dev_data(dev)->defer_attach) {
- get_dev_data(dev)->defer_attach = false;
- io_domain = iommu_get_domain_for_dev(dev);
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+ pdomain = dev_data->domain;
+ io_domain = iommu_get_domain_for_dev(dev);
+
+ if (pdomain == NULL && dev_data->defer_attach) {
+ dev_data->defer_attach = false;
pdomain = to_pdomain(io_domain);
attach_device(dev, pdomain);
}
+
if (pdomain == NULL)
return NULL;
- if (!dma_ops_domain(pdomain))
+ if (io_domain->type != IOMMU_DOMAIN_DMA)
return NULL;
/* Only return IOMMUv2 domains */
@@ -3132,7 +3030,7 @@ void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
if (!amd_iommu_v2_supported())
return;
- dev_data = get_dev_data(&pdev->dev);
+ dev_data = dev_iommu_priv_get(&pdev->dev);
dev_data->errata |= (1 << erratum);
}
EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
@@ -3151,11 +3049,8 @@ int amd_iommu_device_info(struct pci_dev *pdev,
memset(info, 0, sizeof(*info));
- if (!pci_ats_disabled()) {
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
- if (pos)
- info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
- }
+ if (pci_ats_supported(pdev))
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (pos)
diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h
index 12d540d9b59b..f892992c8744 100644
--- a/drivers/iommu/amd_iommu.h
+++ b/drivers/iommu/amd_iommu.h
@@ -1,9 +1,103 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
#ifndef AMD_IOMMU_H
#define AMD_IOMMU_H
-int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line);
+#include <linux/iommu.h>
+
+#include "amd_iommu_types.h"
+
+extern int amd_iommu_get_num_iommus(void);
+extern int amd_iommu_init_dma_ops(void);
+extern int amd_iommu_init_passthrough(void);
+extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
+extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
+extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
+extern int amd_iommu_init_devices(void);
+extern void amd_iommu_uninit_devices(void);
+extern void amd_iommu_init_notifier(void);
+extern int amd_iommu_init_api(void);
+
+#ifdef CONFIG_AMD_IOMMU_DEBUGFS
+void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
+#else
+static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
+#endif
+
+/* Needed for interrupt remapping */
+extern int amd_iommu_prepare(void);
+extern int amd_iommu_enable(void);
+extern void amd_iommu_disable(void);
+extern int amd_iommu_reenable(int);
+extern int amd_iommu_enable_faulting(void);
+extern int amd_iommu_guest_ir;
+
+/* IOMMUv2 specific functions */
+struct iommu_domain;
+
+extern bool amd_iommu_v2_supported(void);
+extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
+extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
+extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
+extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
+extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+ u64 address);
+extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
+extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+ unsigned long cr3);
+extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
+extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
+
+#ifdef CONFIG_IRQ_REMAP
+extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
+#else
+static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
+{
+ return 0;
+}
+#endif
+
+#define PPR_SUCCESS 0x0
+#define PPR_INVALID 0x1
+#define PPR_FAILURE 0xf
+
+extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+ int status, int tag);
+
+static inline bool is_rd890_iommu(struct pci_dev *pdev)
+{
+ return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
+ (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
+}
+
+static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
+{
+ if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
+ return false;
+
+ return !!(iommu->features & f);
+}
+
+static inline u64 iommu_virt_to_phys(void *vaddr)
+{
+ return (u64)__sme_set(virt_to_phys(vaddr));
+}
+
+static inline void *iommu_phys_to_virt(unsigned long paddr)
+{
+ return phys_to_virt(__sme_clr(paddr));
+}
+
+extern bool translation_pre_enabled(struct amd_iommu *iommu);
+extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
+ struct device *dev);
+extern int __init add_special_device(u8 type, u8 id, u16 *devid,
+ bool cmd_line);
#ifdef CONFIG_DMI
void amd_iommu_apply_ivrs_quirks(void);
diff --git a/drivers/iommu/amd_iommu_debugfs.c b/drivers/iommu/amd_iommu_debugfs.c
index c6a5c737ef09..545372fcc72f 100644
--- a/drivers/iommu/amd_iommu_debugfs.c
+++ b/drivers/iommu/amd_iommu_debugfs.c
@@ -8,10 +8,9 @@
*/
#include <linux/debugfs.h>
-#include <linux/iommu.h>
#include <linux/pci.h>
-#include "amd_iommu_proto.h"
-#include "amd_iommu_types.h"
+
+#include "amd_iommu.h"
static struct dentry *amd_iommu_debugfs;
static DEFINE_MUTEX(amd_iommu_debugfs_lock);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 5b81fd16f5fa..3faff7f80fd2 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -18,7 +18,6 @@
#include <linux/msi.h>
#include <linux/amd-iommu.h>
#include <linux/export.h>
-#include <linux/iommu.h>
#include <linux/kmemleak.h>
#include <linux/mem_encrypt.h>
#include <asm/pci-direct.h>
@@ -32,9 +31,8 @@
#include <asm/irq_remapping.h>
#include <linux/crash_dump.h>
+
#include "amd_iommu.h"
-#include "amd_iommu_proto.h"
-#include "amd_iommu_types.h"
#include "irq_remapping.h"
/*
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
deleted file mode 100644
index 92c2ba6468a0..000000000000
--- a/drivers/iommu/amd_iommu_proto.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <jroedel@suse.de>
- */
-
-#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
-#define _ASM_X86_AMD_IOMMU_PROTO_H
-
-#include "amd_iommu_types.h"
-
-extern int amd_iommu_get_num_iommus(void);
-extern int amd_iommu_init_dma_ops(void);
-extern int amd_iommu_init_passthrough(void);
-extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
-extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
-extern void amd_iommu_apply_erratum_63(u16 devid);
-extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
-extern int amd_iommu_init_devices(void);
-extern void amd_iommu_uninit_devices(void);
-extern void amd_iommu_init_notifier(void);
-extern int amd_iommu_init_api(void);
-
-#ifdef CONFIG_AMD_IOMMU_DEBUGFS
-void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
-#else
-static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
-#endif
-
-/* Needed for interrupt remapping */
-extern int amd_iommu_prepare(void);
-extern int amd_iommu_enable(void);
-extern void amd_iommu_disable(void);
-extern int amd_iommu_reenable(int);
-extern int amd_iommu_enable_faulting(void);
-extern int amd_iommu_guest_ir;
-
-/* IOMMUv2 specific functions */
-struct iommu_domain;
-
-extern bool amd_iommu_v2_supported(void);
-extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
-extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
-extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
-extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
-extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
- u64 address);
-extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
-extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
- unsigned long cr3);
-extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
-extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
-
-#ifdef CONFIG_IRQ_REMAP
-extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
-#else
-static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
-{
- return 0;
-}
-#endif
-
-#define PPR_SUCCESS 0x0
-#define PPR_INVALID 0x1
-#define PPR_FAILURE 0xf
-
-extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
- int status, int tag);
-
-static inline bool is_rd890_iommu(struct pci_dev *pdev)
-{
- return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
- (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
-}
-
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
-{
- if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
- return false;
-
- return !!(iommu->features & f);
-}
-
-static inline u64 iommu_virt_to_phys(void *vaddr)
-{
- return (u64)__sme_set(virt_to_phys(vaddr));
-}
-
-static inline void *iommu_phys_to_virt(unsigned long paddr)
-{
- return phys_to_virt(__sme_clr(paddr));
-}
-
-extern bool translation_pre_enabled(struct amd_iommu *iommu);
-extern struct iommu_dev_data *get_dev_data(struct device *dev);
-#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 7a8fdec138bd..30a5d412255a 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -395,10 +395,10 @@
#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
extern bool amd_iommu_dump;
-#define DUMP_printk(format, arg...) \
- do { \
- if (amd_iommu_dump) \
- printk(KERN_INFO "AMD-Vi: " format, ## arg); \
+#define DUMP_printk(format, arg...) \
+ do { \
+ if (amd_iommu_dump) \
+ pr_info("AMD-Vi: " format, ## arg); \
} while(0);
/* global flag if IOMMUs cache non-present entries */
@@ -645,7 +645,6 @@ struct iommu_dev_data {
struct pci_dev *pdev;
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
- bool passthrough; /* Device is identity mapped */
struct {
bool enabled;
int qdep;
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d6d85debd01b..e4b025c5637c 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -13,13 +13,11 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
-#include <linux/iommu.h>
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/gfp.h>
-#include "amd_iommu_types.h"
-#include "amd_iommu_proto.h"
+#include "amd_iommu.h"
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
@@ -487,7 +485,7 @@ static void do_fault(struct work_struct *work)
flags |= FAULT_FLAG_WRITE;
flags |= FAULT_FLAG_REMOTE;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_extend_vma(mm, address);
if (!vma || address < vma->vm_start)
/* failed to get a vma in the right range */
@@ -499,7 +497,7 @@ static void do_fault(struct work_struct *work)
ret = handle_mm_fault(vma, address, flags);
out:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (ret & VM_FAULT_ERROR)
/* failed to service fault */
@@ -517,13 +515,12 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
struct amd_iommu_fault *iommu_fault;
struct pasid_state *pasid_state;
struct device_state *dev_state;
+ struct pci_dev *pdev = NULL;
unsigned long flags;
struct fault *fault;
bool finish;
u16 tag, devid;
int ret;
- struct iommu_dev_data *dev_data;
- struct pci_dev *pdev = NULL;
iommu_fault = data;
tag = iommu_fault->tag & 0x1ff;
@@ -534,12 +531,11 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
devid & 0xff);
if (!pdev)
return -ENODEV;
- dev_data = get_dev_data(&pdev->dev);
- /* In kdump kernel pci dev is not initialized yet -> send INVALID */
ret = NOTIFY_DONE;
- if (translation_pre_enabled(amd_iommu_rlookup_table[devid])
- && dev_data->defer_attach) {
+
+ /* In kdump kernel pci dev is not initialized yet -> send INVALID */
+ if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) {
amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
PPR_INVALID, tag);
goto out;
diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c
index 74d97a886e93..c75b9d957b70 100644
--- a/drivers/iommu/arm-smmu-impl.c
+++ b/drivers/iommu/arm-smmu-impl.c
@@ -150,6 +150,8 @@ static const struct arm_smmu_impl arm_mmu500_impl = {
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
{
+ const struct device_node *np = smmu->dev->of_node;
+
/*
* We will inevitably have to combine model-specific implementation
* quirks with platform-specific integration quirks, but everything
@@ -166,11 +168,11 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
break;
}
- if (of_property_read_bool(smmu->dev->of_node,
- "calxeda,smmu-secure-config-access"))
+ if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl;
- if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm845-smmu-500"))
+ if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") ||
+ of_device_is_compatible(np, "qcom,sc7180-smmu-500"))
return qcom_smmu_impl_init(smmu);
return smmu;
diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c
index 24c071c1d8b0..cf01d0215a39 100644
--- a/drivers/iommu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm-smmu-qcom.c
@@ -3,6 +3,7 @@
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
+#include <linux/of_device.h>
#include <linux/qcom_scm.h>
#include "arm-smmu.h"
@@ -11,12 +12,29 @@ struct qcom_smmu {
struct arm_smmu_device smmu;
};
+static const struct of_device_id qcom_smmu_client_of_match[] = {
+ { .compatible = "qcom,adreno" },
+ { .compatible = "qcom,mdp4" },
+ { .compatible = "qcom,mdss" },
+ { .compatible = "qcom,sc7180-mdss" },
+ { .compatible = "qcom,sc7180-mss-pil" },
+ { .compatible = "qcom,sdm845-mdss" },
+ { .compatible = "qcom,sdm845-mss-pil" },
+ { }
+};
+
+static int qcom_smmu_def_domain_type(struct device *dev)
+{
+ const struct of_device_id *match =
+ of_match_device(qcom_smmu_client_of_match, dev);
+
+ return match ? IOMMU_DOMAIN_IDENTITY : 0;
+}
+
static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
{
int ret;
- arm_mmu500_reset(smmu);
-
/*
* To address performance degradation in non-real time clients,
* such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
@@ -30,8 +48,21 @@ static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
return ret;
}
+static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
+{
+ const struct device_node *np = smmu->dev->of_node;
+
+ arm_mmu500_reset(smmu);
+
+ if (of_device_is_compatible(np, "qcom,sdm845-smmu-500"))
+ return qcom_sdm845_smmu500_reset(smmu);
+
+ return 0;
+}
+
static const struct arm_smmu_impl qcom_smmu_impl = {
- .reset = qcom_sdm845_smmu500_reset,
+ .def_domain_type = qcom_smmu_def_domain_type,
+ .reset = qcom_smmu500_reset,
};
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 82508730feb7..f578677a5c41 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -171,6 +171,8 @@
#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
+#define ARM_SMMU_REG_SZ 0xe00
+
/* Common MSI config fields */
#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
#define MSI_CFG2_SH GENMASK(5, 4)
@@ -628,6 +630,7 @@ struct arm_smmu_strtab_cfg {
struct arm_smmu_device {
struct device *dev;
void __iomem *base;
+ void __iomem *page1;
#define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
#define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
@@ -664,7 +667,6 @@ struct arm_smmu_device {
#define ARM_SMMU_MAX_ASIDS (1 << 16)
unsigned int asid_bits;
- DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
#define ARM_SMMU_MAX_VMIDS (1 << 16)
unsigned int vmid_bits;
@@ -724,6 +726,8 @@ struct arm_smmu_option_prop {
const char *prop;
};
+static DEFINE_XARRAY_ALLOC1(asid_xa);
+
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
{ ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
@@ -733,9 +737,8 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset,
struct arm_smmu_device *smmu)
{
- if ((offset > SZ_64K) &&
- (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY))
- offset -= SZ_64K;
+ if (offset > SZ_64K)
+ return smmu->page1 + offset - SZ_64K;
return smmu->base + offset;
}
@@ -1763,6 +1766,14 @@ static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
cdcfg->cdtab = NULL;
}
+static void arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
+{
+ if (!cd->asid)
+ return;
+
+ xa_erase(&asid_xa, cd->asid);
+}
+
/* Stream table manipulation functions */
static void
arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
@@ -2448,10 +2459,9 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
- if (cfg->cdcfg.cdtab) {
+ if (cfg->cdcfg.cdtab)
arm_smmu_free_cd_tables(smmu_domain);
- arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
- }
+ arm_smmu_free_asid(&cfg->cd);
} else {
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
if (cfg->vmid)
@@ -2466,14 +2476,15 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int ret;
- int asid;
+ u32 asid;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
- asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
- if (asid < 0)
- return asid;
+ ret = xa_alloc(&asid_xa, &asid, &cfg->cd,
+ XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
+ if (ret)
+ return ret;
cfg->s1cdmax = master->ssid_bits;
@@ -2506,7 +2517,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
out_free_cd_tables:
arm_smmu_free_cd_tables(smmu_domain);
out_free_asid:
- arm_smmu_bitmap_free(smmu->asid_map, asid);
+ arm_smmu_free_asid(&cfg->cd);
return ret;
}
@@ -2652,26 +2663,20 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
}
}
-#ifdef CONFIG_PCI_ATS
static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
{
- struct pci_dev *pdev;
+ struct device *dev = master->dev;
struct arm_smmu_device *smmu = master->smmu;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) ||
- !(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled())
+ if (!(smmu->features & ARM_SMMU_FEAT_ATS))
return false;
- pdev = to_pci_dev(master->dev);
- return !pdev->untrusted && pdev->ats_cap;
-}
-#else
-static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
-{
- return false;
+ if (!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS))
+ return false;
+
+ return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
}
-#endif
static void arm_smmu_enable_ats(struct arm_smmu_master *master)
{
@@ -2914,27 +2919,26 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
static struct iommu_ops arm_smmu_ops;
-static int arm_smmu_add_device(struct device *dev)
+static struct iommu_device *arm_smmu_probe_device(struct device *dev)
{
int i, ret;
struct arm_smmu_device *smmu;
struct arm_smmu_master *master;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct iommu_group *group;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
if (!smmu)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
master->dev = dev;
master->smmu = smmu;
@@ -2975,43 +2979,24 @@ static int arm_smmu_add_device(struct device *dev)
master->ssid_bits = min_t(u8, master->ssid_bits,
CTXDESC_LINEAR_CDMAX);
- ret = iommu_device_link(&smmu->iommu, dev);
- if (ret)
- goto err_disable_pasid;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto err_unlink;
- }
+ return &smmu->iommu;
- iommu_group_put(group);
- return 0;
-
-err_unlink:
- iommu_device_unlink(&smmu->iommu, dev);
-err_disable_pasid:
- arm_smmu_disable_pasid(master);
err_free_master:
kfree(master);
dev_iommu_priv_set(dev, NULL);
- return ret;
+ return ERR_PTR(ret);
}
-static void arm_smmu_remove_device(struct device *dev)
+static void arm_smmu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_master *master;
- struct arm_smmu_device *smmu;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
master = dev_iommu_priv_get(dev);
- smmu = master->smmu;
arm_smmu_detach_dev(master);
- iommu_group_remove_device(dev);
- iommu_device_unlink(&smmu->iommu, dev);
arm_smmu_disable_pasid(master);
kfree(master);
iommu_fwspec_free(dev);
@@ -3138,8 +3123,8 @@ static struct iommu_ops arm_smmu_ops = {
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
- .add_device = arm_smmu_add_device,
- .remove_device = arm_smmu_remove_device,
+ .probe_device = arm_smmu_probe_device,
+ .release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
@@ -4021,6 +4006,18 @@ err_reset_pci_ops: __maybe_unused;
return err;
}
+static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
+ resource_size_t size)
+{
+ struct resource res = {
+ .flags = IORESOURCE_MEM,
+ .start = start,
+ .end = start + size - 1,
+ };
+
+ return devm_ioremap_resource(dev, &res);
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
int irq, ret;
@@ -4056,10 +4053,23 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
ioaddr = res->start;
- smmu->base = devm_ioremap_resource(dev, res);
+ /*
+ * Don't map the IMPLEMENTATION DEFINED regions, since they may contain
+ * the PMCG registers which are reserved by the PMU driver.
+ */
+ smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
+ if (arm_smmu_resource_size(smmu) > SZ_64K) {
+ smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K,
+ ARM_SMMU_REG_SZ);
+ if (IS_ERR(smmu->page1))
+ return PTR_ERR(smmu->page1);
+ } else {
+ smmu->page1 = smmu->base;
+ }
+
/* Interrupt lines */
irq = platform_get_irq_byname_optional(pdev, "combined");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index a6a5796e9c41..243bc4cb2705 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -220,7 +220,7 @@ static int arm_smmu_register_legacy_master(struct device *dev,
* With the legacy DT binding in play, we have no guarantees about
* probe order, but then we're also not doing default domains, so we can
* delay setting bus ops until we're sure every possible SMMU is ready,
- * and that way ensure that no add_device() calls get missed.
+ * and that way ensure that no probe_device() calls get missed.
*/
static int arm_smmu_legacy_bus_init(void)
{
@@ -1062,7 +1062,6 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
struct arm_smmu_device *smmu = cfg->smmu;
struct arm_smmu_smr *smrs = smmu->smrs;
- struct iommu_group *group;
int i, idx, ret;
mutex_lock(&smmu->stream_map_mutex);
@@ -1090,18 +1089,9 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
cfg->smendx[i] = (s16)idx;
}
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto out_err;
- }
- iommu_group_put(group);
-
/* It worked! Now, poke the actual hardware */
- for_each_cfg_sme(cfg, fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx)
arm_smmu_write_sme(smmu, idx);
- smmu->s2crs[idx].group = group;
- }
mutex_unlock(&smmu->stream_map_mutex);
return 0;
@@ -1172,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/*
* FIXME: The arch/arm DMA API code tries to attach devices to its own
- * domains between of_xlate() and add_device() - we have no way to cope
+ * domains between of_xlate() and probe_device() - we have no way to cope
* with that, so until ARM gets converted to rely on groups and default
* domains, just say no (but more politely than by dereferencing NULL).
* This should be at least a WARN_ON once that's sorted.
@@ -1382,7 +1372,7 @@ struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
return dev ? dev_get_drvdata(dev) : NULL;
}
-static int arm_smmu_add_device(struct device *dev)
+static struct iommu_device *arm_smmu_probe_device(struct device *dev)
{
struct arm_smmu_device *smmu = NULL;
struct arm_smmu_master_cfg *cfg;
@@ -1403,7 +1393,7 @@ static int arm_smmu_add_device(struct device *dev)
} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
} else {
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
ret = -EINVAL;
@@ -1444,21 +1434,19 @@ static int arm_smmu_add_device(struct device *dev)
if (ret)
goto out_cfg_free;
- iommu_device_link(&smmu->iommu, dev);
-
device_link_add(dev, smmu->dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
- return 0;
+ return &smmu->iommu;
out_cfg_free:
kfree(cfg);
out_free:
iommu_fwspec_free(dev);
- return ret;
+ return ERR_PTR(ret);
}
-static void arm_smmu_remove_device(struct device *dev)
+static void arm_smmu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_master_cfg *cfg;
@@ -1475,13 +1463,11 @@ static void arm_smmu_remove_device(struct device *dev)
if (ret < 0)
return;
- iommu_device_unlink(&smmu->iommu, dev);
arm_smmu_master_free_smes(cfg, fwspec);
arm_smmu_rpm_put(smmu);
dev_iommu_priv_set(dev, NULL);
- iommu_group_remove_device(dev);
kfree(cfg);
iommu_fwspec_free(dev);
}
@@ -1512,6 +1498,11 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
else
group = generic_device_group(dev);
+ /* Remember group for faster lookups */
+ if (!IS_ERR(group))
+ for_each_cfg_sme(cfg, fwspec, i, idx)
+ smmu->s2crs[idx].group = group;
+
return group;
}
@@ -1618,6 +1609,17 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
+static int arm_smmu_def_domain_type(struct device *dev)
+{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+ const struct arm_smmu_impl *impl = cfg->smmu->impl;
+
+ if (impl && impl->def_domain_type)
+ return impl->def_domain_type(dev);
+
+ return 0;
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1628,14 +1630,15 @@ static struct iommu_ops arm_smmu_ops = {
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
- .add_device = arm_smmu_add_device,
- .remove_device = arm_smmu_remove_device,
+ .probe_device = arm_smmu_probe_device,
+ .release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
+ .def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -2253,7 +2256,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
return -ENODEV;
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
- dev_err(&pdev->dev, "removing device with active domains!\n");
+ dev_notice(&pdev->dev, "disabling translation\n");
arm_smmu_bus_init(NULL);
iommu_device_unregister(&smmu->iommu);
diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h
index 8d1cd54d82a6..d172c024be61 100644
--- a/drivers/iommu/arm-smmu.h
+++ b/drivers/iommu/arm-smmu.h
@@ -386,6 +386,7 @@ struct arm_smmu_impl {
int (*init_context)(struct arm_smmu_domain *smmu_domain);
void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
int status);
+ int (*def_domain_type)(struct device *dev);
};
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index ba128d1cdaee..4959f5df21bd 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -952,7 +952,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
/* Non-coherent atomic allocation? Easy */
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- dma_free_from_pool(cpu_addr, alloc_size))
+ dma_free_from_pool(dev, cpu_addr, alloc_size))
return;
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
@@ -1035,7 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!gfpflags_allow_blocking(gfp) && !coherent)
- cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+ cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
+ gfp);
else
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
if (!cpu_addr)
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index f77dae7ba7d4..60a2970c37ff 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -963,6 +963,7 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
warn_invalid_dmar(phys_addr, " returns all ones");
goto unmap;
}
+ iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
@@ -1156,12 +1157,11 @@ static inline void reclaim_free_desc(struct q_inval *qi)
}
}
-static int qi_check_fault(struct intel_iommu *iommu, int index)
+static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
{
u32 fault;
int head, tail;
struct q_inval *qi = iommu->qi;
- int wait_index = (index + 1) % QI_LENGTH;
int shift = qi_shift(iommu);
if (qi->desc_status[wait_index] == QI_ABORT)
@@ -1224,17 +1224,21 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
}
/*
- * Submit the queued invalidation descriptor to the remapping
- * hardware unit and wait for its completion.
+ * Function to submit invalidation descriptors of all types to the queued
+ * invalidation interface(QI). Multiple descriptors can be submitted at a
+ * time, a wait descriptor will be appended to each submission to ensure
+ * hardware has completed the invalidation before return. Wait descriptors
+ * can be part of the submission but it will not be polled for completion.
*/
-int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
+int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+ unsigned int count, unsigned long options)
{
- int rc;
struct q_inval *qi = iommu->qi;
- int offset, shift, length;
struct qi_desc wait_desc;
int wait_index, index;
unsigned long flags;
+ int offset, shift;
+ int rc, i;
if (!qi)
return 0;
@@ -1243,32 +1247,41 @@ restart:
rc = 0;
raw_spin_lock_irqsave(&qi->q_lock, flags);
- while (qi->free_cnt < 3) {
+ /*
+ * Check if we have enough empty slots in the queue to submit,
+ * the calculation is based on:
+ * # of desc + 1 wait desc + 1 space between head and tail
+ */
+ while (qi->free_cnt < count + 2) {
raw_spin_unlock_irqrestore(&qi->q_lock, flags);
cpu_relax();
raw_spin_lock_irqsave(&qi->q_lock, flags);
}
index = qi->free_head;
- wait_index = (index + 1) % QI_LENGTH;
+ wait_index = (index + count) % QI_LENGTH;
shift = qi_shift(iommu);
- length = 1 << shift;
- qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
+ for (i = 0; i < count; i++) {
+ offset = ((index + i) % QI_LENGTH) << shift;
+ memcpy(qi->desc + offset, &desc[i], 1 << shift);
+ qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
+ }
+ qi->desc_status[wait_index] = QI_IN_USE;
- offset = index << shift;
- memcpy(qi->desc + offset, desc, length);
wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
+ if (options & QI_OPT_WAIT_DRAIN)
+ wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
wait_desc.qw2 = 0;
wait_desc.qw3 = 0;
offset = wait_index << shift;
- memcpy(qi->desc + offset, &wait_desc, length);
+ memcpy(qi->desc + offset, &wait_desc, 1 << shift);
- qi->free_head = (qi->free_head + 2) % QI_LENGTH;
- qi->free_cnt -= 2;
+ qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
+ qi->free_cnt -= count + 1;
/*
* update the HW tail register indicating the presence of
@@ -1284,7 +1297,7 @@ restart:
* a deadlock where the interrupt context can wait indefinitely
* for free slots in the queue.
*/
- rc = qi_check_fault(iommu, index);
+ rc = qi_check_fault(iommu, index, wait_index);
if (rc)
break;
@@ -1293,7 +1306,8 @@ restart:
raw_spin_lock(&qi->q_lock);
}
- qi->desc_status[index] = QI_DONE;
+ for (i = 0; i < count; i++)
+ qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
reclaim_free_desc(qi);
raw_spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -1317,7 +1331,7 @@ void qi_global_iec(struct intel_iommu *iommu)
desc.qw3 = 0;
/* should never fail */
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
}
void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
@@ -1331,7 +1345,7 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
}
void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
@@ -1355,7 +1369,7 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
}
void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
@@ -1377,7 +1391,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
}
/* PASID-based IOTLB invalidation */
@@ -1418,7 +1432,46 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
QI_EIOTLB_AM(mask);
}
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
+}
+
+/* PASID-based device IOTLB Invalidate */
+void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u32 pasid, u16 qdep, u64 addr,
+ unsigned int size_order, u64 granu)
+{
+ unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
+ struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
+
+ desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
+ QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
+ QI_DEV_IOTLB_PFSID(pfsid);
+ desc.qw1 = QI_DEV_EIOTLB_GLOB(granu);
+
+ /*
+ * If S bit is 0, we only flush a single page. If S bit is set,
+ * The least significant zero bit indicates the invalidation address
+ * range. VT-d spec 6.5.2.6.
+ * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
+ * size order = 0 is PAGE_SIZE 4KB
+ * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
+ * ECAP.
+ */
+ desc.qw1 |= addr & ~mask;
+ if (size_order)
+ desc.qw1 |= QI_DEV_EIOTLB_SIZE;
+
+ qi_submit_sync(iommu, &desc, 1, 0);
+}
+
+void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
+ u64 granu, int pasid)
+{
+ struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
+
+ desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
+ QI_PC_GRAN(granu) | QI_PC_TYPE;
+ qi_submit_sync(iommu, &desc, 1, 0);
}
/*
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 186ff5cc975c..60c8a56e4a3f 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1235,19 +1235,13 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
return phys;
}
-static int exynos_iommu_add_device(struct device *dev)
+static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct sysmmu_drvdata *data;
- struct iommu_group *group;
if (!has_sysmmu(dev))
- return -ENODEV;
-
- group = iommu_group_get_for_dev(dev);
-
- if (IS_ERR(group))
- return PTR_ERR(group);
+ return ERR_PTR(-ENODEV);
list_for_each_entry(data, &owner->controllers, owner_node) {
/*
@@ -1259,12 +1253,15 @@ static int exynos_iommu_add_device(struct device *dev)
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME);
}
- iommu_group_put(group);
- return 0;
+ /* There is always at least one entry, see exynos_iommu_of_xlate() */
+ data = list_first_entry(&owner->controllers,
+ struct sysmmu_drvdata, owner_node);
+
+ return &data->iommu;
}
-static void exynos_iommu_remove_device(struct device *dev)
+static void exynos_iommu_release_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct sysmmu_drvdata *data;
@@ -1282,7 +1279,6 @@ static void exynos_iommu_remove_device(struct device *dev)
iommu_group_put(group);
}
}
- iommu_group_remove_device(dev);
list_for_each_entry(data, &owner->controllers, owner_node)
device_link_del(data->link);
@@ -1331,8 +1327,8 @@ static const struct iommu_ops exynos_iommu_ops = {
.unmap = exynos_iommu_unmap,
.iova_to_phys = exynos_iommu_iova_to_phys,
.device_group = generic_device_group,
- .add_device = exynos_iommu_add_device,
- .remove_device = exynos_iommu_remove_device,
+ .probe_device = exynos_iommu_probe_device,
+ .release_device = exynos_iommu_release_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
.of_xlate = exynos_iommu_of_xlate,
};
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 06828e2698d5..928d37771ece 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -1016,25 +1016,13 @@ static struct iommu_group *fsl_pamu_device_group(struct device *dev)
return group;
}
-static int fsl_pamu_add_device(struct device *dev)
+static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
{
- struct iommu_group *group;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
-
- iommu_device_link(&pamu_iommu, dev);
-
- return 0;
+ return &pamu_iommu;
}
-static void fsl_pamu_remove_device(struct device *dev)
+static void fsl_pamu_release_device(struct device *dev)
{
- iommu_device_unlink(&pamu_iommu, dev);
- iommu_group_remove_device(dev);
}
static const struct iommu_ops fsl_pamu_ops = {
@@ -1048,8 +1036,8 @@ static const struct iommu_ops fsl_pamu_ops = {
.iova_to_phys = fsl_pamu_iova_to_phys,
.domain_set_attr = fsl_pamu_set_domain_attr,
.domain_get_attr = fsl_pamu_get_domain_attr,
- .add_device = fsl_pamu_add_device,
- .remove_device = fsl_pamu_remove_device,
+ .probe_device = fsl_pamu_probe_device,
+ .release_device = fsl_pamu_release_device,
.device_group = fsl_pamu_device_group,
};
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index a386b83e0e34..3c0c67a99c7b 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -131,7 +131,7 @@ static int hyperv_irq_remapping_activate(struct irq_domain *domain,
return 0;
}
-static struct irq_domain_ops hyperv_ir_domain_ops = {
+static const struct irq_domain_ops hyperv_ir_domain_ops = {
.alloc = hyperv_irq_remapping_alloc,
.free = hyperv_irq_remapping_free,
.activate = hyperv_irq_remapping_activate,
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 3eb1fe240fb0..cf1ebb98e418 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -372,6 +372,66 @@ static int domain_translation_struct_show(struct seq_file *m, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
+static void invalidation_queue_entry_show(struct seq_file *m,
+ struct intel_iommu *iommu)
+{
+ int index, shift = qi_shift(iommu);
+ struct qi_desc *desc;
+ int offset;
+
+ if (ecap_smts(iommu->ecap))
+ seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tqw2\t\t\tqw3\t\t\tstatus\n");
+ else
+ seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tstatus\n");
+
+ for (index = 0; index < QI_LENGTH; index++) {
+ offset = index << shift;
+ desc = iommu->qi->desc + offset;
+ if (ecap_smts(iommu->ecap))
+ seq_printf(m, "%5d\t%016llx\t%016llx\t%016llx\t%016llx\t%016x\n",
+ index, desc->qw0, desc->qw1,
+ desc->qw2, desc->qw3,
+ iommu->qi->desc_status[index]);
+ else
+ seq_printf(m, "%5d\t%016llx\t%016llx\t%016x\n",
+ index, desc->qw0, desc->qw1,
+ iommu->qi->desc_status[index]);
+ }
+}
+
+static int invalidation_queue_show(struct seq_file *m, void *unused)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+ struct q_inval *qi;
+ int shift;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ qi = iommu->qi;
+ shift = qi_shift(iommu);
+
+ if (!qi || !ecap_qis(iommu->ecap))
+ continue;
+
+ seq_printf(m, "Invalidation queue on IOMMU: %s\n", iommu->name);
+
+ raw_spin_lock_irqsave(&qi->q_lock, flags);
+ seq_printf(m, " Base: 0x%llx\tHead: %lld\tTail: %lld\n",
+ (u64)virt_to_phys(qi->desc),
+ dmar_readq(iommu->reg + DMAR_IQH_REG) >> shift,
+ dmar_readq(iommu->reg + DMAR_IQT_REG) >> shift);
+ invalidation_queue_entry_show(m, iommu);
+ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
+ seq_putc(m, '\n');
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(invalidation_queue);
+
#ifdef CONFIG_IRQ_REMAP
static void ir_tbl_remap_entry_show(struct seq_file *m,
struct intel_iommu *iommu)
@@ -490,6 +550,8 @@ void __init intel_iommu_debugfs_init(void)
debugfs_create_file("domain_translation_struct", 0444,
intel_iommu_debug, NULL,
&domain_translation_struct_fops);
+ debugfs_create_file("invalidation_queue", 0444, intel_iommu_debug,
+ NULL, &invalidation_queue_fops);
#ifdef CONFIG_IRQ_REMAP
debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
NULL, &ir_translation_struct_fops);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0182cff2c7ac..648a785e078a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -296,31 +296,6 @@ static inline void context_clear_entry(struct context_entry *context)
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
-/* si_domain contains mulitple devices */
-#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
-
-/*
- * This is a DMA domain allocated through the iommu domain allocation
- * interface. But one or more devices belonging to this domain have
- * been chosen to use a private domain. We should avoid to use the
- * map/unmap/iova_to_phys APIs on it.
- */
-#define DOMAIN_FLAG_LOSE_CHILDREN BIT(1)
-
-/*
- * When VT-d works in the scalable mode, it allows DMA translation to
- * happen through either first level or second level page table. This
- * bit marks that the DMA translation for the domain goes through the
- * first level page table, otherwise, it goes through the second level.
- */
-#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(2)
-
-/*
- * Domain represents a virtual machine which demands iommu nested
- * translation mode support.
- */
-#define DOMAIN_FLAG_NESTING_MODE BIT(3)
-
#define for_each_domain_iommu(idx, domain) \
for (idx = 0; idx < g_num_of_iommus; idx++) \
if (domain->iommu_refcnt[idx])
@@ -355,11 +330,6 @@ static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct device *dev);
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
-static void domain_context_clear(struct intel_iommu *iommu,
- struct device *dev);
-static int domain_detach_iommu(struct dmar_domain *domain,
- struct intel_iommu *iommu);
-static bool device_is_rmrr_locked(struct device *dev);
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -395,6 +365,21 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
+struct device_domain_info *get_domain_info(struct device *dev)
+{
+ struct device_domain_info *info;
+
+ if (!dev)
+ return NULL;
+
+ info = dev->archdata.iommu;
+ if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO ||
+ info == DEFER_DEVICE_DOMAIN_INFO))
+ return NULL;
+
+ return info;
+}
+
DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
@@ -446,12 +431,6 @@ static void init_translation_status(struct intel_iommu *iommu)
iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
}
-/* Convert generic 'struct iommu_domain to private struct dmar_domain */
-static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
-{
- return container_of(dom, struct dmar_domain, domain);
-}
-
static int __init intel_iommu_setup(char *str)
{
if (!str)
@@ -480,8 +459,7 @@ static int __init intel_iommu_setup(char *str)
pr_info("Intel-IOMMU: scalable mode supported\n");
intel_iommu_sm = 1;
} else if (!strncmp(str, "tboot_noforce", 13)) {
- printk(KERN_INFO
- "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
+ pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
intel_iommu_tboot_noforce = 1;
} else if (!strncmp(str, "nobounce", 8)) {
pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
@@ -1454,8 +1432,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
!pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
info->pri_enabled = 1;
#endif
- if (!pdev->untrusted && info->ats_supported &&
- pci_ats_page_aligned(pdev) &&
+ if (info->ats_supported && pci_ats_page_aligned(pdev) &&
!pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
domain_update_iotlb(info->domain);
@@ -1763,6 +1740,9 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if (ecap_prs(iommu->ecap))
intel_svm_finish_prq(iommu);
}
+ if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap))
+ ioasid_unregister_allocator(&iommu->pasid_allocator);
+
#endif
}
@@ -1911,11 +1891,6 @@ static int dmar_init_reserved_ranges(void)
return 0;
}
-static void domain_reserve_special_ranges(struct dmar_domain *domain)
-{
- copy_reserved_iova(&reserved_iova_list, &domain->iovad);
-}
-
static inline int guestwidth_to_adjustwidth(int gaw)
{
int agaw;
@@ -1930,65 +1905,6 @@ static inline int guestwidth_to_adjustwidth(int gaw)
return agaw;
}
-static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
- int guest_width)
-{
- int adjust_width, agaw;
- unsigned long sagaw;
- int ret;
-
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
-
- if (!intel_iommu_strict) {
- ret = init_iova_flush_queue(&domain->iovad,
- iommu_flush_iova, iova_entry_free);
- if (ret)
- pr_info("iova flush queue initialization failed\n");
- }
-
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- if (guest_width > cap_mgaw(iommu->cap))
- guest_width = cap_mgaw(iommu->cap);
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- agaw = width_to_agaw(adjust_width);
- sagaw = cap_sagaw(iommu->cap);
- if (!test_bit(agaw, &sagaw)) {
- /* hardware doesn't support it, choose a bigger one */
- pr_debug("Hardware doesn't support agaw %d\n", agaw);
- agaw = find_next_bit(&sagaw, 5, agaw);
- if (agaw >= 5)
- return -ENODEV;
- }
- domain->agaw = agaw;
-
- if (ecap_coherent(iommu->ecap))
- domain->iommu_coherency = 1;
- else
- domain->iommu_coherency = 0;
-
- if (ecap_sc_support(iommu->ecap))
- domain->iommu_snooping = 1;
- else
- domain->iommu_snooping = 0;
-
- if (intel_iommu_superpage)
- domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
- else
- domain->iommu_superpage = 0;
-
- domain->nid = iommu->node;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
static void domain_exit(struct dmar_domain *domain)
{
@@ -1996,7 +1912,8 @@ static void domain_exit(struct dmar_domain *domain)
domain_remove_dev_info(domain);
/* destroy iovas */
- put_iova_domain(&domain->iovad);
+ if (domain->domain.type == IOMMU_DOMAIN_DMA)
+ put_iova_domain(&domain->iovad);
if (domain->pgd) {
struct page *freelist;
@@ -2518,11 +2435,8 @@ struct dmar_domain *find_domain(struct device *dev)
if (unlikely(attach_deferred(dev) || iommu_dummy(dev)))
return NULL;
- if (dev_is_pci(dev))
- dev = &pci_real_dma_dev(to_pci_dev(dev))->dev;
-
/* No lock here, assumes no domain exit in normal case */
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (likely(info))
return info->domain;
@@ -2545,7 +2459,7 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
struct device_domain_info *info;
list_for_each_entry(info, &device_domain_list, global)
- if (info->iommu->segment == segment && info->bus == bus &&
+ if (info->segment == segment && info->bus == bus &&
info->devfn == devfn)
return info;
@@ -2582,6 +2496,12 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
flags);
}
+static bool dev_is_real_dma_subdevice(struct device *dev)
+{
+ return dev && dev_is_pci(dev) &&
+ pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
+}
+
static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
int bus, int devfn,
struct device *dev,
@@ -2596,8 +2516,18 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (!info)
return NULL;
- info->bus = bus;
- info->devfn = devfn;
+ if (!dev_is_real_dma_subdevice(dev)) {
+ info->bus = bus;
+ info->devfn = devfn;
+ info->segment = iommu->segment;
+ } else {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ info->bus = pdev->bus->number;
+ info->devfn = pdev->devfn;
+ info->segment = pci_domain_nr(pdev->bus);
+ }
+
info->ats_supported = info->pasid_supported = info->pri_supported = 0;
info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
info->ats_qdep = 0;
@@ -2611,10 +2541,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
- if (!pdev->untrusted &&
- !pci_ats_disabled() &&
- ecap_dev_iotlb_support(iommu->ecap) &&
- pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
+ if (ecap_dev_iotlb_support(iommu->ecap) &&
+ pci_ats_supported(pdev) &&
dmar_find_matched_atsr_unit(pdev))
info->ats_supported = 1;
@@ -2637,7 +2565,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (!found) {
struct device_domain_info *info2;
- info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
+ info2 = dmar_search_domain_by_dev_info(info->segment, info->bus,
+ info->devfn);
if (info2) {
found = info2->domain;
info2->dev = dev;
@@ -2704,108 +2633,10 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
return domain;
}
-static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
-{
- *(u16 *)opaque = alias;
- return 0;
-}
-
-static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
-{
- struct device_domain_info *info;
- struct dmar_domain *domain = NULL;
- struct intel_iommu *iommu;
- u16 dma_alias;
- unsigned long flags;
- u8 bus, devfn;
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return NULL;
-
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
-
- spin_lock_irqsave(&device_domain_lock, flags);
- info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
- PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff);
- if (info) {
- iommu = info->iommu;
- domain = info->domain;
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- /* DMA alias already has a domain, use it */
- if (info)
- goto out;
- }
-
- /* Allocate and initialize new domain for the device */
- domain = alloc_domain(0);
- if (!domain)
- return NULL;
- if (domain_init(domain, iommu, gaw)) {
- domain_exit(domain);
- return NULL;
- }
-
-out:
- return domain;
-}
-
-static struct dmar_domain *set_domain_for_dev(struct device *dev,
- struct dmar_domain *domain)
-{
- struct intel_iommu *iommu;
- struct dmar_domain *tmp;
- u16 req_id, dma_alias;
- u8 bus, devfn;
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return NULL;
-
- req_id = ((u16)bus << 8) | devfn;
-
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
-
- /* register PCI DMA alias device */
- if (req_id != dma_alias) {
- tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff, NULL, domain);
-
- if (!tmp || tmp != domain)
- return tmp;
- }
- }
-
- tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
- if (!tmp || tmp != domain)
- return tmp;
-
- return domain;
-}
-
static int iommu_domain_identity_map(struct dmar_domain *domain,
- unsigned long long start,
- unsigned long long end)
+ unsigned long first_vpfn,
+ unsigned long last_vpfn)
{
- unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
- unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
-
- if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
- dma_to_mm_pfn(last_vpfn))) {
- pr_err("Reserving iova failed\n");
- return -ENOMEM;
- }
-
- pr_debug("Mapping reserved region %llx-%llx\n", start, end);
/*
* RMRR range might have overlap with physical memory range,
* clear it first
@@ -2817,45 +2648,6 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
DMA_PTE_READ|DMA_PTE_WRITE);
}
-static int domain_prepare_identity_map(struct device *dev,
- struct dmar_domain *domain,
- unsigned long long start,
- unsigned long long end)
-{
- /* For _hardware_ passthrough, don't bother. But for software
- passthrough, we do it anyway -- it may indicate a memory
- range which is reserved in E820, so which didn't get set
- up to start with in si_domain */
- if (domain == si_domain && hw_pass_through) {
- dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
- start, end);
- return 0;
- }
-
- dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
-
- if (end < start) {
- WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- return -EIO;
- }
-
- if (end >> agaw_to_width(domain->agaw)) {
- WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- agaw_to_width(domain->agaw),
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- return -EIO;
- }
-
- return iommu_domain_identity_map(domain, start, end);
-}
-
static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_init(int hw)
@@ -2882,7 +2674,8 @@ static int __init si_domain_init(int hw)
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
ret = iommu_domain_identity_map(si_domain,
- PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
+ mm_to_dma_pfn(start_pfn),
+ mm_to_dma_pfn(end_pfn));
if (ret)
return ret;
}
@@ -2911,17 +2704,6 @@ static int __init si_domain_init(int hw)
return 0;
}
-static int identity_mapping(struct device *dev)
-{
- struct device_domain_info *info;
-
- info = dev->archdata.iommu;
- if (info)
- return (info->domain == si_domain);
-
- return 0;
-}
-
static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct dmar_domain *ndomain;
@@ -3048,31 +2830,6 @@ static int device_def_domain_type(struct device *dev)
if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
return IOMMU_DOMAIN_IDENTITY;
-
- /*
- * We want to start off with all devices in the 1:1 domain, and
- * take them out later if we find they can't access all of memory.
- *
- * However, we can't do this for PCI devices behind bridges,
- * because all PCI devices behind the same bridge will end up
- * with the same source-id on their transactions.
- *
- * Practically speaking, we can't change things around for these
- * devices at run-time, because we can't be sure there'll be no
- * DMA transactions in flight for any of their siblings.
- *
- * So PCI devices (unless they're on the root bus) as well as
- * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
- * the 1:1 domain, just in _case_ one of their siblings turns out
- * not to be able to map all of memory.
- */
- if (!pci_is_pcie(pdev)) {
- if (!pci_is_root_bus(pdev->bus))
- return IOMMU_DOMAIN_DMA;
- if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
- return IOMMU_DOMAIN_DMA;
- } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
- return IOMMU_DOMAIN_DMA;
}
return 0;
@@ -3297,6 +3054,85 @@ out_unmap:
return ret;
}
+#ifdef CONFIG_INTEL_IOMMU_SVM
+static ioasid_t intel_vcmd_ioasid_alloc(ioasid_t min, ioasid_t max, void *data)
+{
+ struct intel_iommu *iommu = data;
+ ioasid_t ioasid;
+
+ if (!iommu)
+ return INVALID_IOASID;
+ /*
+ * VT-d virtual command interface always uses the full 20 bit
+ * PASID range. Host can partition guest PASID range based on
+ * policies but it is out of guest's control.
+ */
+ if (min < PASID_MIN || max > intel_pasid_max_id)
+ return INVALID_IOASID;
+
+ if (vcmd_alloc_pasid(iommu, &ioasid))
+ return INVALID_IOASID;
+
+ return ioasid;
+}
+
+static void intel_vcmd_ioasid_free(ioasid_t ioasid, void *data)
+{
+ struct intel_iommu *iommu = data;
+
+ if (!iommu)
+ return;
+ /*
+ * Sanity check the ioasid owner is done at upper layer, e.g. VFIO
+ * We can only free the PASID when all the devices are unbound.
+ */
+ if (ioasid_find(NULL, ioasid, NULL)) {
+ pr_alert("Cannot free active IOASID %d\n", ioasid);
+ return;
+ }
+ vcmd_free_pasid(iommu, ioasid);
+}
+
+static void register_pasid_allocator(struct intel_iommu *iommu)
+{
+ /*
+ * If we are running in the host, no need for custom allocator
+ * in that PASIDs are allocated from the host system-wide.
+ */
+ if (!cap_caching_mode(iommu->cap))
+ return;
+
+ if (!sm_supported(iommu)) {
+ pr_warn("VT-d Scalable Mode not enabled, no PASID allocation\n");
+ return;
+ }
+
+ /*
+ * Register a custom PASID allocator if we are running in a guest,
+ * guest PASID must be obtained via virtual command interface.
+ * There can be multiple vIOMMUs in each guest but only one allocator
+ * is active. All vIOMMU allocators will eventually be calling the same
+ * host allocator.
+ */
+ if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap))
+ return;
+
+ pr_info("Register custom PASID allocator\n");
+ iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc;
+ iommu->pasid_allocator.free = intel_vcmd_ioasid_free;
+ iommu->pasid_allocator.pdata = (void *)iommu;
+ if (ioasid_register_allocator(&iommu->pasid_allocator)) {
+ pr_warn("Custom PASID allocator failed, scalable mode disabled\n");
+ /*
+ * Disable scalable mode on this IOMMU if there
+ * is no custom allocator. Mixing SM capable vIOMMU
+ * and non-SM vIOMMU are not supported.
+ */
+ intel_iommu_sm = 0;
+ }
+}
+#endif
+
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
@@ -3414,6 +3250,9 @@ static int __init init_dmars(void)
*/
for_each_active_iommu(iommu, drhd) {
iommu_flush_write_buffer(iommu);
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ register_pasid_allocator(iommu);
+#endif
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
@@ -3531,100 +3370,6 @@ static unsigned long intel_alloc_iova(struct device *dev,
return iova_pfn;
}
-static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
-{
- struct dmar_domain *domain, *tmp;
- struct dmar_rmrr_unit *rmrr;
- struct device *i_dev;
- int i, ret;
-
- /* Device shouldn't be attached by any domains. */
- domain = find_domain(dev);
- if (domain)
- return NULL;
-
- domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain)
- goto out;
-
- /* We have a new domain - setup possible RMRRs for the device */
- rcu_read_lock();
- for_each_rmrr_units(rmrr) {
- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
- i, i_dev) {
- if (i_dev != dev)
- continue;
-
- ret = domain_prepare_identity_map(dev, domain,
- rmrr->base_address,
- rmrr->end_address);
- if (ret)
- dev_err(dev, "Mapping reserved region failed\n");
- }
- }
- rcu_read_unlock();
-
- tmp = set_domain_for_dev(dev, domain);
- if (!tmp || domain != tmp) {
- domain_exit(domain);
- domain = tmp;
- }
-
-out:
- if (!domain)
- dev_err(dev, "Allocating domain failed\n");
- else
- domain->domain.type = IOMMU_DOMAIN_DMA;
-
- return domain;
-}
-
-/* Check if the dev needs to go through non-identity map and unmap process.*/
-static bool iommu_need_mapping(struct device *dev)
-{
- int ret;
-
- if (iommu_dummy(dev))
- return false;
-
- if (unlikely(attach_deferred(dev)))
- do_deferred_attach(dev);
-
- ret = identity_mapping(dev);
- if (ret) {
- u64 dma_mask = *dev->dma_mask;
-
- if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
- dma_mask = dev->coherent_dma_mask;
-
- if (dma_mask >= dma_direct_get_required_mask(dev))
- return false;
-
- /*
- * 32 bit DMA is removed from si_domain and fall back to
- * non-identity mapping.
- */
- dmar_remove_one_dev_info(dev);
- ret = iommu_request_dma_domain_for_dev(dev);
- if (ret) {
- struct iommu_domain *domain;
- struct dmar_domain *dmar_domain;
-
- domain = iommu_get_domain_for_dev(dev);
- if (domain) {
- dmar_domain = to_dmar_domain(domain);
- dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
- }
- dmar_remove_one_dev_info(dev);
- get_private_domain_for_dev(dev);
- }
-
- dev_info(dev, "32bit DMA uses non-identity mapping\n");
- }
-
- return true;
-}
-
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
size_t size, int dir, u64 dma_mask)
{
@@ -3638,6 +3383,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
+ if (unlikely(attach_deferred(dev)))
+ do_deferred_attach(dev);
+
domain = find_domain(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@@ -3689,20 +3437,15 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- if (iommu_need_mapping(dev))
- return __intel_map_single(dev, page_to_phys(page) + offset,
- size, dir, *dev->dma_mask);
- return dma_direct_map_page(dev, page, offset, size, dir, attrs);
+ return __intel_map_single(dev, page_to_phys(page) + offset,
+ size, dir, *dev->dma_mask);
}
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- if (iommu_need_mapping(dev))
- return __intel_map_single(dev, phys_addr, size, dir,
- *dev->dma_mask);
- return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
+ return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
}
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3753,17 +3496,13 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- if (iommu_need_mapping(dev))
- intel_unmap(dev, dev_addr, size);
- else
- dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
+ intel_unmap(dev, dev_addr, size);
}
static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- if (iommu_need_mapping(dev))
- intel_unmap(dev, dev_addr, size);
+ intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3773,8 +3512,8 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
struct page *page = NULL;
int order;
- if (!iommu_need_mapping(dev))
- return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
+ if (unlikely(attach_deferred(dev)))
+ do_deferred_attach(dev);
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -3809,9 +3548,6 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
int order;
struct page *page = virt_to_page(vaddr);
- if (!iommu_need_mapping(dev))
- return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
-
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -3829,9 +3565,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg;
int i;
- if (!iommu_need_mapping(dev))
- return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
-
for_each_sg(sglist, sg, nelems, i) {
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
}
@@ -3855,8 +3588,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (!iommu_need_mapping(dev))
- return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
+
+ if (unlikely(attach_deferred(dev)))
+ do_deferred_attach(dev);
domain = find_domain(dev);
if (!domain)
@@ -3903,8 +3637,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
static u64 intel_get_required_mask(struct device *dev)
{
- if (!iommu_need_mapping(dev))
- return dma_direct_get_required_mask(dev);
return DMA_BIT_MASK(32);
}
@@ -4813,58 +4545,37 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
unsigned long val, void *v)
{
struct memory_notify *mhp = v;
- unsigned long long start, end;
- unsigned long start_vpfn, last_vpfn;
+ unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
+ unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
+ mhp->nr_pages - 1);
switch (val) {
case MEM_GOING_ONLINE:
- start = mhp->start_pfn << PAGE_SHIFT;
- end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
- if (iommu_domain_identity_map(si_domain, start, end)) {
- pr_warn("Failed to build identity map for [%llx-%llx]\n",
- start, end);
+ if (iommu_domain_identity_map(si_domain,
+ start_vpfn, last_vpfn)) {
+ pr_warn("Failed to build identity map for [%lx-%lx]\n",
+ start_vpfn, last_vpfn);
return NOTIFY_BAD;
}
break;
case MEM_OFFLINE:
case MEM_CANCEL_ONLINE:
- start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
- last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
- while (start_vpfn <= last_vpfn) {
- struct iova *iova;
+ {
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
struct page *freelist;
- iova = find_iova(&si_domain->iovad, start_vpfn);
- if (iova == NULL) {
- pr_debug("Failed get IOVA for PFN %lx\n",
- start_vpfn);
- break;
- }
-
- iova = split_and_remove_iova(&si_domain->iovad, iova,
- start_vpfn, last_vpfn);
- if (iova == NULL) {
- pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
- start_vpfn, last_vpfn);
- return NOTIFY_BAD;
- }
-
- freelist = domain_unmap(si_domain, iova->pfn_lo,
- iova->pfn_hi);
+ freelist = domain_unmap(si_domain,
+ start_vpfn, last_vpfn);
rcu_read_lock();
for_each_active_iommu(iommu, drhd)
iommu_flush_iotlb_psi(iommu, si_domain,
- iova->pfn_lo, iova_size(iova),
+ start_vpfn, mhp->nr_pages,
!freelist, 0);
rcu_read_unlock();
dma_free_pagelist(freelist);
-
- start_vpfn = iova->pfn_hi + 1;
- free_iova_mem(iova);
}
break;
}
@@ -4892,8 +4603,9 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
for (did = 0; did < cap_ndoms(iommu->cap); did++) {
domain = get_iommu_domain(iommu, (u16)did);
- if (!domain)
+ if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA)
continue;
+
free_cpu_cached_iovas(cpu, &domain->iovad);
}
}
@@ -5186,18 +4898,6 @@ int __init intel_iommu_init(void)
}
up_write(&dmar_global_lock);
-#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
- /*
- * If the system has no untrusted device or the user has decided
- * to disable the bounce page mechanisms, we don't need swiotlb.
- * Mark this and the pre-allocated bounce pages will be released
- * later.
- */
- if (!has_untrusted_dev() || intel_no_bounce)
- swiotlb = 0;
-#endif
- dma_ops = &intel_dma_ops;
-
init_iommu_pm_ops();
down_read(&dmar_global_lock);
@@ -5283,10 +4983,11 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
if (info->dev) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, info->dev,
- PASID_RID2PASID);
+ PASID_RID2PASID, false);
iommu_disable_dev_iotlb(info);
- domain_context_clear(iommu, info->dev);
+ if (!dev_is_real_dma_subdevice(info->dev))
+ domain_context_clear(iommu, info->dev);
intel_pasid_free_table(info->dev);
}
@@ -5296,12 +4997,6 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
domain_detach_iommu(domain, iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
- /* free the private domain */
- if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
- !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
- list_empty(&domain->devices))
- domain_exit(info->domain);
-
free_devinfo_mem(info);
}
@@ -5311,9 +5006,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&device_domain_lock, flags);
- info = dev->archdata.iommu;
- if (info && info != DEFER_DEVICE_DOMAIN_INFO
- && info != DUMMY_DEVICE_DOMAIN_INFO)
+ info = get_domain_info(dev);
+ if (info)
__dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -5322,9 +5016,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
- domain_reserve_special_ranges(domain);
-
/* calculate AGAW */
domain->gaw = guest_width;
adjust_width = guestwidth_to_adjustwidth(guest_width);
@@ -5343,11 +5034,21 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
return 0;
}
+static void intel_init_iova_domain(struct dmar_domain *dmar_domain)
+{
+ init_iova_domain(&dmar_domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
+ copy_reserved_iova(&reserved_iova_list, &dmar_domain->iovad);
+
+ if (!intel_iommu_strict &&
+ init_iova_flush_queue(&dmar_domain->iovad,
+ iommu_flush_iova, iova_entry_free))
+ pr_info("iova flush queue initialization failed\n");
+}
+
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
- int ret;
switch (type) {
case IOMMU_DOMAIN_DMA:
@@ -5364,13 +5065,8 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
return NULL;
}
- if (!intel_iommu_strict && type == IOMMU_DOMAIN_DMA) {
- ret = init_iova_flush_queue(&dmar_domain->iovad,
- iommu_flush_iova,
- iova_entry_free);
- if (ret)
- pr_info("iova flush queue initialization failed\n");
- }
+ if (type == IOMMU_DOMAIN_DMA)
+ intel_init_iova_domain(dmar_domain);
domain_update_iommu_cap(dmar_domain);
@@ -5403,7 +5099,7 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
static inline bool
is_aux_domain(struct device *dev, struct iommu_domain *domain)
{
- struct device_domain_info *info = dev->archdata.iommu;
+ struct device_domain_info *info = get_domain_info(dev);
return info && info->auxd_enabled &&
domain->type == IOMMU_DOMAIN_UNMANAGED;
@@ -5412,7 +5108,7 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
static void auxiliary_link_device(struct dmar_domain *domain,
struct device *dev)
{
- struct device_domain_info *info = dev->archdata.iommu;
+ struct device_domain_info *info = get_domain_info(dev);
assert_spin_locked(&device_domain_lock);
if (WARN_ON(!info))
@@ -5425,7 +5121,7 @@ static void auxiliary_link_device(struct dmar_domain *domain,
static void auxiliary_unlink_device(struct dmar_domain *domain,
struct device *dev)
{
- struct device_domain_info *info = dev->archdata.iommu;
+ struct device_domain_info *info = get_domain_info(dev);
assert_spin_locked(&device_domain_lock);
if (WARN_ON(!info))
@@ -5513,13 +5209,13 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
return;
spin_lock_irqsave(&device_domain_lock, flags);
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
iommu = info->iommu;
auxiliary_unlink_device(domain, dev);
spin_lock(&iommu->lock);
- intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
+ intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
domain_detach_iommu(domain, iommu);
spin_unlock(&iommu->lock);
@@ -5626,6 +5322,176 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
aux_domain_remove_dev(to_dmar_domain(domain), dev);
}
+/*
+ * 2D array for converting and sanitizing IOMMU generic TLB granularity to
+ * VT-d granularity. Invalidation is typically included in the unmap operation
+ * as a result of DMA or VFIO unmap. However, for assigned devices guest
+ * owns the first level page tables. Invalidations of translation caches in the
+ * guest are trapped and passed down to the host.
+ *
+ * vIOMMU in the guest will only expose first level page tables, therefore
+ * we do not support IOTLB granularity for request without PASID (second level).
+ *
+ * For example, to find the VT-d granularity encoding for IOTLB
+ * type and page selective granularity within PASID:
+ * X: indexed by iommu cache type
+ * Y: indexed by enum iommu_inv_granularity
+ * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR]
+ */
+
+static const int
+inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = {
+ /*
+ * PASID based IOTLB invalidation: PASID selective (per PASID),
+ * page selective (address granularity)
+ */
+ {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID},
+ /* PASID based dev TLBs */
+ {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL},
+ /* PASID cache */
+ {-EINVAL, -EINVAL, -EINVAL}
+};
+
+static inline int to_vtd_granularity(int type, int granu)
+{
+ return inv_type_granu_table[type][granu];
+}
+
+static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules)
+{
+ u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT;
+
+ /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc.
+ * IOMMU cache invalidate API passes granu_size in bytes, and number of
+ * granu size in contiguous memory.
+ */
+ return order_base_2(nr_pages);
+}
+
+#ifdef CONFIG_INTEL_IOMMU_SVM
+static int
+intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct device_domain_info *info;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+ int cache_type;
+ u8 bus, devfn;
+ u16 did, sid;
+ int ret = 0;
+ u64 size = 0;
+
+ if (!inv_info || !dmar_domain ||
+ inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
+ return -EINVAL;
+
+ if (!dev || !dev_is_pci(dev))
+ return -ENODEV;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
+ return -ENODEV;
+
+ if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
+ return -EINVAL;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock(&iommu->lock);
+ info = get_domain_info(dev);
+ if (!info) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ did = dmar_domain->iommu_did[iommu->seq_id];
+ sid = PCI_DEVID(bus, devfn);
+
+ /* Size is only valid in address selective invalidation */
+ if (inv_info->granularity != IOMMU_INV_GRANU_PASID)
+ size = to_vtd_size(inv_info->addr_info.granule_size,
+ inv_info->addr_info.nb_granules);
+
+ for_each_set_bit(cache_type,
+ (unsigned long *)&inv_info->cache,
+ IOMMU_CACHE_INV_TYPE_NR) {
+ int granu = 0;
+ u64 pasid = 0;
+
+ granu = to_vtd_granularity(cache_type, inv_info->granularity);
+ if (granu == -EINVAL) {
+ pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n",
+ cache_type, inv_info->granularity);
+ break;
+ }
+
+ /*
+ * PASID is stored in different locations based on the
+ * granularity.
+ */
+ if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
+ (inv_info->pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
+ pasid = inv_info->pasid_info.pasid;
+ else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
+ (inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
+ pasid = inv_info->addr_info.pasid;
+
+ switch (BIT(cache_type)) {
+ case IOMMU_CACHE_INV_TYPE_IOTLB:
+ if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
+ size &&
+ (inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
+ pr_err_ratelimited("Address out of range, 0x%llx, size order %llu\n",
+ inv_info->addr_info.addr, size);
+ ret = -ERANGE;
+ goto out_unlock;
+ }
+
+ /*
+ * If granu is PASID-selective, address is ignored.
+ * We use npages = -1 to indicate that.
+ */
+ qi_flush_piotlb(iommu, did, pasid,
+ mm_to_dma_pfn(inv_info->addr_info.addr),
+ (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
+ inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
+
+ /*
+ * Always flush device IOTLB if ATS is enabled. vIOMMU
+ * in the guest may assume IOTLB flush is inclusive,
+ * which is more efficient.
+ */
+ if (info->ats_enabled)
+ qi_flush_dev_iotlb_pasid(iommu, sid,
+ info->pfsid, pasid,
+ info->ats_qdep,
+ inv_info->addr_info.addr,
+ size, granu);
+ break;
+ case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
+ if (info->ats_enabled)
+ qi_flush_dev_iotlb_pasid(iommu, sid,
+ info->pfsid, pasid,
+ info->ats_qdep,
+ inv_info->addr_info.addr,
+ size, granu);
+ else
+ pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
+ break;
+ default:
+ dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
+ cache_type);
+ ret = -EINVAL;
+ }
+ }
+out_unlock:
+ spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return ret;
+}
+#endif
+
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot, gfp_t gfp)
@@ -5781,78 +5647,22 @@ static bool intel_iommu_capable(enum iommu_cap cap)
return false;
}
-static int intel_iommu_add_device(struct device *dev)
+static struct iommu_device *intel_iommu_probe_device(struct device *dev)
{
- struct dmar_domain *dmar_domain;
- struct iommu_domain *domain;
struct intel_iommu *iommu;
- struct iommu_group *group;
u8 bus, devfn;
- int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
- return -ENODEV;
-
- iommu_device_link(&iommu->iommu, dev);
+ return ERR_PTR(-ENODEV);
if (translation_pre_enabled(iommu))
dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
- group = iommu_group_get_for_dev(dev);
-
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto unlink;
- }
-
- iommu_group_put(group);
-
- domain = iommu_get_domain_for_dev(dev);
- dmar_domain = to_dmar_domain(domain);
- if (domain->type == IOMMU_DOMAIN_DMA) {
- if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
- ret = iommu_request_dm_for_dev(dev);
- if (ret) {
- dmar_remove_one_dev_info(dev);
- dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
- domain_add_dev_info(si_domain, dev);
- dev_info(dev,
- "Device uses a private identity domain.\n");
- }
- }
- } else {
- if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
- ret = iommu_request_dma_domain_for_dev(dev);
- if (ret) {
- dmar_remove_one_dev_info(dev);
- dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
- if (!get_private_domain_for_dev(dev)) {
- dev_warn(dev,
- "Failed to get a private domain.\n");
- ret = -ENOMEM;
- goto unlink;
- }
-
- dev_info(dev,
- "Device uses a private dma domain.\n");
- }
- }
- }
-
- if (device_needs_bounce(dev)) {
- dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
- set_dma_ops(dev, &bounce_dma_ops);
- }
-
- return 0;
-
-unlink:
- iommu_device_unlink(&iommu->iommu, dev);
- return ret;
+ return &iommu->iommu;
}
-static void intel_iommu_remove_device(struct device *dev)
+static void intel_iommu_release_device(struct device *dev)
{
struct intel_iommu *iommu;
u8 bus, devfn;
@@ -5863,11 +5673,19 @@ static void intel_iommu_remove_device(struct device *dev)
dmar_remove_one_dev_info(dev);
- iommu_group_remove_device(dev);
+ set_dma_ops(dev, NULL);
+}
- iommu_device_unlink(&iommu->iommu, dev);
+static void intel_iommu_probe_finalize(struct device *dev)
+{
+ struct iommu_domain *domain;
+ domain = iommu_get_domain_for_dev(dev);
if (device_needs_bounce(dev))
+ set_dma_ops(dev, &bounce_dma_ops);
+ else if (domain && domain->type == IOMMU_DOMAIN_DMA)
+ set_dma_ops(dev, &intel_dma_ops);
+ else
set_dma_ops(dev, NULL);
}
@@ -5945,7 +5763,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
spin_lock(&iommu->lock);
ret = -EINVAL;
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (!info || !info->pasid_supported)
goto out;
@@ -6041,7 +5859,7 @@ static int intel_iommu_enable_auxd(struct device *dev)
return -ENODEV;
spin_lock_irqsave(&device_domain_lock, flags);
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
info->auxd_enabled = 1;
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -6054,7 +5872,7 @@ static int intel_iommu_disable_auxd(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&device_domain_lock, flags);
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (!WARN_ON(!info))
info->auxd_enabled = 0;
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -6107,6 +5925,14 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
return !!siov_find_pci_dvsec(to_pci_dev(dev));
}
+ if (feat == IOMMU_DEV_FEAT_SVA) {
+ struct device_domain_info *info = get_domain_info(dev);
+
+ return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) &&
+ info->pasid_supported && info->pri_supported &&
+ info->ats_supported;
+ }
+
return false;
}
@@ -6116,6 +5942,16 @@ intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
if (feat == IOMMU_DEV_FEAT_AUX)
return intel_iommu_enable_auxd(dev);
+ if (feat == IOMMU_DEV_FEAT_SVA) {
+ struct device_domain_info *info = get_domain_info(dev);
+
+ if (!info)
+ return -EINVAL;
+
+ if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
+ return 0;
+ }
+
return -ENODEV;
}
@@ -6131,7 +5967,7 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
static bool
intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
{
- struct device_domain_info *info = dev->archdata.iommu;
+ struct device_domain_info *info = get_domain_info(dev);
if (feat == IOMMU_DEV_FEAT_AUX)
return scalable_mode_support() && info && info->auxd_enabled;
@@ -6198,8 +6034,9 @@ const struct iommu_ops intel_iommu_ops = {
.map = intel_iommu_map,
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
- .add_device = intel_iommu_add_device,
- .remove_device = intel_iommu_remove_device,
+ .probe_device = intel_iommu_probe_device,
+ .probe_finalize = intel_iommu_probe_finalize,
+ .release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.apply_resv_region = intel_iommu_apply_resv_region,
@@ -6209,7 +6046,16 @@ const struct iommu_ops intel_iommu_ops = {
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
+ .def_domain_type = device_def_domain_type,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ .cache_invalidate = intel_iommu_sva_invalidate,
+ .sva_bind_gpasid = intel_svm_bind_gpasid,
+ .sva_unbind_gpasid = intel_svm_unbind_gpasid,
+ .sva_bind = intel_svm_bind,
+ .sva_unbind = intel_svm_unbind,
+ .sva_get_pasid = intel_svm_get_pasid,
+#endif
};
static void quirk_iommu_igfx(struct pci_dev *dev)
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
index 22b30f10b396..c81f0f17c6ba 100644
--- a/drivers/iommu/intel-pasid.c
+++ b/drivers/iommu/intel-pasid.c
@@ -27,6 +27,63 @@
static DEFINE_SPINLOCK(pasid_lock);
u32 intel_pasid_max_id = PASID_MAX;
+int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid)
+{
+ unsigned long flags;
+ u8 status_code;
+ int ret = 0;
+ u64 res;
+
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
+ IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
+ !(res & VCMD_VRSP_IP), res);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+
+ status_code = VCMD_VRSP_SC(res);
+ switch (status_code) {
+ case VCMD_VRSP_SC_SUCCESS:
+ *pasid = VCMD_VRSP_RESULT_PASID(res);
+ break;
+ case VCMD_VRSP_SC_NO_PASID_AVAIL:
+ pr_info("IOMMU: %s: No PASID available\n", iommu->name);
+ ret = -ENOSPC;
+ break;
+ default:
+ ret = -ENODEV;
+ pr_warn("IOMMU: %s: Unexpected error code %d\n",
+ iommu->name, status_code);
+ }
+
+ return ret;
+}
+
+void vcmd_free_pasid(struct intel_iommu *iommu, unsigned int pasid)
+{
+ unsigned long flags;
+ u8 status_code;
+ u64 res;
+
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ dmar_writeq(iommu->reg + DMAR_VCMD_REG,
+ VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
+ IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
+ !(res & VCMD_VRSP_IP), res);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+
+ status_code = VCMD_VRSP_SC(res);
+ switch (status_code) {
+ case VCMD_VRSP_SC_SUCCESS:
+ break;
+ case VCMD_VRSP_SC_INVALID_PASID:
+ pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
+ break;
+ default:
+ pr_warn("IOMMU: %s: Unexpected error code %d\n",
+ iommu->name, status_code);
+ }
+}
+
/*
* Per device pasid table management:
*/
@@ -94,7 +151,7 @@ int intel_pasid_alloc_table(struct device *dev)
int size;
might_sleep();
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table))
return -EINVAL;
@@ -141,7 +198,7 @@ void intel_pasid_free_table(struct device *dev)
struct pasid_entry *table;
int i, max_pde;
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (!info || !dev_is_pci(dev) || !info->pasid_table)
return;
@@ -167,7 +224,7 @@ struct pasid_table *intel_pasid_get_table(struct device *dev)
{
struct device_domain_info *info;
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (!info)
return NULL;
@@ -178,7 +235,7 @@ int intel_pasid_get_dev_max_id(struct device *dev)
{
struct device_domain_info *info;
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (!info || !info->pasid_table)
return 0;
@@ -199,7 +256,7 @@ struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid)
return NULL;
dir = pasid_table->table;
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
dir_index = pasid >> PASID_PDE_SHIFT;
index = pasid & PASID_PTE_MASK;
@@ -235,7 +292,20 @@ static inline void pasid_clear_entry(struct pasid_entry *pe)
WRITE_ONCE(pe->val[7], 0);
}
-static void intel_pasid_clear_entry(struct device *dev, int pasid)
+static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
+{
+ WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
+ WRITE_ONCE(pe->val[1], 0);
+ WRITE_ONCE(pe->val[2], 0);
+ WRITE_ONCE(pe->val[3], 0);
+ WRITE_ONCE(pe->val[4], 0);
+ WRITE_ONCE(pe->val[5], 0);
+ WRITE_ONCE(pe->val[6], 0);
+ WRITE_ONCE(pe->val[7], 0);
+}
+
+static void
+intel_pasid_clear_entry(struct device *dev, int pasid, bool fault_ignore)
{
struct pasid_entry *pe;
@@ -243,7 +313,10 @@ static void intel_pasid_clear_entry(struct device *dev, int pasid)
if (WARN_ON(!pe))
return;
- pasid_clear_entry(pe);
+ if (fault_ignore && pasid_pte_is_present(pe))
+ pasid_clear_entry_with_fpd(pe);
+ else
+ pasid_clear_entry(pe);
}
static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
@@ -359,18 +432,29 @@ pasid_set_flpm(struct pasid_entry *pe, u64 value)
pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
}
+/*
+ * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
+ * of a scalable mode PASID entry.
+ */
+static inline void
+pasid_set_eafe(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
+}
+
static void
pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
u16 did, int pasid)
{
struct qi_desc desc;
- desc.qw0 = QI_PC_DID(did) | QI_PC_PASID_SEL | QI_PC_PASID(pasid);
+ desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) |
+ QI_PC_PASID(pasid) | QI_PC_TYPE;
desc.qw1 = 0;
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
}
static void
@@ -384,7 +468,7 @@ iotlb_invalidation_with_pasid(struct intel_iommu *iommu, u16 did, u32 pasid)
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
}
static void
@@ -394,7 +478,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
struct device_domain_info *info;
u16 sid, qdep, pfsid;
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (!info || !info->ats_enabled)
return;
@@ -405,8 +489,8 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
}
-void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
- struct device *dev, int pasid)
+void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
+ int pasid, bool fault_ignore)
{
struct pasid_entry *pte;
u16 did;
@@ -416,7 +500,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
return;
did = pasid_get_domain_id(pte);
- intel_pasid_clear_entry(dev, pasid);
+ intel_pasid_clear_entry(dev, pasid, fault_ignore);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
@@ -492,7 +576,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
/* Setup Present and PASID Granular Transfer Type: */
- pasid_set_translation_type(pte, 1);
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
pasid_set_present(pte);
pasid_flush_caches(iommu, pte, pasid, did);
@@ -500,6 +584,25 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
}
/*
+ * Skip top levels of page tables for iommu which has less agaw
+ * than default. Unnecessary for PT mode.
+ */
+static inline int iommu_skip_agaw(struct dmar_domain *domain,
+ struct intel_iommu *iommu,
+ struct dma_pte **pgd)
+{
+ int agaw;
+
+ for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+ *pgd = phys_to_virt(dma_pte_addr(*pgd));
+ if (!dma_pte_present(*pgd))
+ return -EINVAL;
+ }
+
+ return agaw;
+}
+
+/*
* Set up the scalable mode pasid entry for second only translation type.
*/
int intel_pasid_setup_second_level(struct intel_iommu *iommu,
@@ -522,17 +625,11 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
return -EINVAL;
}
- /*
- * Skip top levels of page tables for iommu which has less agaw
- * than default. Unnecessary for PT mode.
- */
pgd = domain->pgd;
- for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd)) {
- dev_err(dev, "Invalid domain page table\n");
- return -EINVAL;
- }
+ agaw = iommu_skip_agaw(domain, iommu, &pgd);
+ if (agaw < 0) {
+ dev_err(dev, "Invalid domain page table\n");
+ return -EINVAL;
}
pgd_val = virt_to_phys(pgd);
@@ -548,7 +645,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
pasid_set_domain_id(pte, did);
pasid_set_slptr(pte, pgd_val);
pasid_set_address_width(pte, agaw);
- pasid_set_translation_type(pte, 2);
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
pasid_set_fault_enable(pte);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
@@ -582,7 +679,7 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, iommu->agaw);
- pasid_set_translation_type(pte, 4);
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT);
pasid_set_fault_enable(pte);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
@@ -596,3 +693,161 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
return 0;
}
+
+static int
+intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
+ struct iommu_gpasid_bind_data_vtd *pasid_data)
+{
+ /*
+ * Not all guest PASID table entry fields are passed down during bind,
+ * here we only set up the ones that are dependent on guest settings.
+ * Execution related bits such as NXE, SMEP are not supported.
+ * Other fields, such as snoop related, are set based on host needs
+ * regardless of guest settings.
+ */
+ if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) {
+ if (!ecap_srs(iommu->ecap)) {
+ pr_err_ratelimited("No supervisor request support on %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
+ pasid_set_sre(pte);
+ }
+
+ if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {
+ if (!ecap_eafs(iommu->ecap)) {
+ pr_err_ratelimited("No extended access flag support on %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
+ pasid_set_eafe(pte);
+ }
+
+ /*
+ * Memory type is only applicable to devices inside processor coherent
+ * domain. Will add MTS support once coherent devices are available.
+ */
+ if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) {
+ pr_warn_ratelimited("No memory type support %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_pasid_setup_nested() - Set up PASID entry for nested translation.
+ * This could be used for guest shared virtual address. In this case, the
+ * first level page tables are used for GVA-GPA translation in the guest,
+ * second level page tables are used for GPA-HPA translation.
+ *
+ * @iommu: IOMMU which the device belong to
+ * @dev: Device to be set up for translation
+ * @gpgd: FLPTPTR: First Level Page translation pointer in GPA
+ * @pasid: PASID to be programmed in the device PASID table
+ * @pasid_data: Additional PASID info from the guest bind request
+ * @domain: Domain info for setting up second level page tables
+ * @addr_width: Address width of the first level (guest)
+ */
+int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
+ pgd_t *gpgd, int pasid,
+ struct iommu_gpasid_bind_data_vtd *pasid_data,
+ struct dmar_domain *domain, int addr_width)
+{
+ struct pasid_entry *pte;
+ struct dma_pte *pgd;
+ int ret = 0;
+ u64 pgd_val;
+ int agaw;
+ u16 did;
+
+ if (!ecap_nest(iommu->ecap)) {
+ pr_err_ratelimited("IOMMU: %s: No nested translation support\n",
+ iommu->name);
+ return -EINVAL;
+ }
+
+ if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) {
+ pr_err_ratelimited("Domain is not in nesting mode, %x\n",
+ domain->flags);
+ return -EINVAL;
+ }
+
+ pte = intel_pasid_get_entry(dev, pasid);
+ if (WARN_ON(!pte))
+ return -EINVAL;
+
+ /*
+ * Caller must ensure PASID entry is not in use, i.e. not bind the
+ * same PASID to the same device twice.
+ */
+ if (pasid_pte_is_present(pte))
+ return -EBUSY;
+
+ pasid_clear_entry(pte);
+
+ /* Sanity checking performed by caller to make sure address
+ * width matching in two dimensions:
+ * 1. CPU vs. IOMMU
+ * 2. Guest vs. Host.
+ */
+ switch (addr_width) {
+#ifdef CONFIG_X86
+ case ADDR_WIDTH_5LEVEL:
+ if (!cpu_feature_enabled(X86_FEATURE_LA57) ||
+ !cap_5lp_support(iommu->cap)) {
+ dev_err_ratelimited(dev,
+ "5-level paging not supported\n");
+ return -EINVAL;
+ }
+
+ pasid_set_flpm(pte, 1);
+ break;
+#endif
+ case ADDR_WIDTH_4LEVEL:
+ pasid_set_flpm(pte, 0);
+ break;
+ default:
+ dev_err_ratelimited(dev, "Invalid guest address width %d\n",
+ addr_width);
+ return -EINVAL;
+ }
+
+ /* First level PGD is in GPA, must be supported by the second level */
+ if ((uintptr_t)gpgd > domain->max_addr) {
+ dev_err_ratelimited(dev,
+ "Guest PGD %lx not supported, max %llx\n",
+ (uintptr_t)gpgd, domain->max_addr);
+ return -EINVAL;
+ }
+ pasid_set_flptr(pte, (uintptr_t)gpgd);
+
+ ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data);
+ if (ret)
+ return ret;
+
+ /* Setup the second level based on the given domain */
+ pgd = domain->pgd;
+
+ agaw = iommu_skip_agaw(domain, iommu, &pgd);
+ if (agaw < 0) {
+ dev_err_ratelimited(dev, "Invalid domain page table\n");
+ return -EINVAL;
+ }
+ pgd_val = virt_to_phys(pgd);
+ pasid_set_slptr(pte, pgd_val);
+ pasid_set_fault_enable(pte);
+
+ did = domain->iommu_did[iommu->seq_id];
+ pasid_set_domain_id(pte, did);
+
+ pasid_set_address_width(pte, agaw);
+ pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
+ pasid_set_present(pte);
+ pasid_flush_caches(iommu, pte, pasid, did);
+
+ return ret;
+}
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h
index 92de6df24ccb..c5318d40e0fa 100644
--- a/drivers/iommu/intel-pasid.h
+++ b/drivers/iommu/intel-pasid.h
@@ -15,6 +15,7 @@
#define PASID_MAX 0x100000
#define PASID_PTE_MASK 0x3F
#define PASID_PTE_PRESENT 1
+#define PASID_PTE_FPD 2
#define PDE_PFN_MASK PAGE_MASK
#define PASID_PDE_SHIFT 6
#define MAX_NR_PASID_BITS 20
@@ -23,6 +24,16 @@
#define is_pasid_enabled(entry) (((entry)->lo >> 3) & 0x1)
#define get_pasid_dir_size(entry) (1 << ((((entry)->lo >> 9) & 0x7) + 7))
+/* Virtual command interface for enlightened pasid management. */
+#define VCMD_CMD_ALLOC 0x1
+#define VCMD_CMD_FREE 0x2
+#define VCMD_VRSP_IP 0x1
+#define VCMD_VRSP_SC(e) (((e) >> 1) & 0x3)
+#define VCMD_VRSP_SC_SUCCESS 0
+#define VCMD_VRSP_SC_NO_PASID_AVAIL 1
+#define VCMD_VRSP_SC_INVALID_PASID 1
+#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 8) & 0xfffff)
+#define VCMD_CMD_OPERAND(e) ((e) << 8)
/*
* Domain ID reserved for pasid entries programmed for first-level
* only and pass-through transfer modes.
@@ -36,6 +47,7 @@
* to vmalloc or even module mappings.
*/
#define PASID_FLAG_SUPERVISOR_MODE BIT(0)
+#define PASID_FLAG_NESTED BIT(1)
/*
* The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
@@ -51,6 +63,11 @@ struct pasid_entry {
u64 val[8];
};
+#define PASID_ENTRY_PGTT_FL_ONLY (1)
+#define PASID_ENTRY_PGTT_SL_ONLY (2)
+#define PASID_ENTRY_PGTT_NESTED (3)
+#define PASID_ENTRY_PGTT_PT (4)
+
/* The representative of a PASID table */
struct pasid_table {
void *table; /* pasid table pointer */
@@ -99,7 +116,13 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, int pasid);
+int intel_pasid_setup_nested(struct intel_iommu *iommu,
+ struct device *dev, pgd_t *pgd, int pasid,
+ struct iommu_gpasid_bind_data_vtd *pasid_data,
+ struct dmar_domain *domain, int addr_width);
void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
- struct device *dev, int pasid);
-
+ struct device *dev, int pasid,
+ bool fault_ignore);
+int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid);
+void vcmd_free_pasid(struct intel_iommu *iommu, unsigned int pasid);
#endif /* __INTEL_PASID_H */
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 2998418f0a38..6c87c807a0ab 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -23,6 +23,7 @@
#include "intel-pasid.h"
static irqreturn_t prq_event_thread(int irq, void *d);
+static void intel_svm_drain_prq(struct device *dev, int pasid);
#define PRQ_ORDER 0
@@ -66,6 +67,8 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
+ init_completion(&iommu->prq_complete);
+
return 0;
}
@@ -138,7 +141,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, svm->iommu);
+ qi_submit_sync(svm->iommu, &desc, 1, 0);
if (sdev->dev_iotlb) {
desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
@@ -162,7 +165,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, svm->iommu);
+ qi_submit_sync(svm->iommu, &desc, 1, 0);
}
}
@@ -206,10 +209,9 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* *has* to handle gracefully without affecting other processes.
*/
rcu_read_lock();
- list_for_each_entry_rcu(sdev, &svm->devs, list) {
- intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
- }
+ list_for_each_entry_rcu(sdev, &svm->devs, list)
+ intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
+ svm->pasid, true);
rcu_read_unlock();
}
@@ -226,13 +228,212 @@ static LIST_HEAD(global_svm_list);
list_for_each_entry((sdev), &(svm)->devs, list) \
if ((d) != (sdev)->dev) {} else
-int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
+int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
+ struct iommu_gpasid_bind_data *data)
+{
+ struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct dmar_domain *dmar_domain;
+ struct intel_svm_dev *sdev;
+ struct intel_svm *svm;
+ int ret = 0;
+
+ if (WARN_ON(!iommu) || !data)
+ return -EINVAL;
+
+ if (data->version != IOMMU_GPASID_BIND_VERSION_1 ||
+ data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
+ return -EINVAL;
+
+ if (!dev_is_pci(dev))
+ return -ENOTSUPP;
+
+ /* VT-d supports devices with full 20 bit PASIDs only */
+ if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX)
+ return -EINVAL;
+
+ /*
+ * We only check host PASID range, we have no knowledge to check
+ * guest PASID range.
+ */
+ if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
+ return -EINVAL;
+
+ dmar_domain = to_dmar_domain(domain);
+
+ mutex_lock(&pasid_mutex);
+ svm = ioasid_find(NULL, data->hpasid, NULL);
+ if (IS_ERR(svm)) {
+ ret = PTR_ERR(svm);
+ goto out;
+ }
+
+ if (svm) {
+ /*
+ * If we found svm for the PASID, there must be at
+ * least one device bond, otherwise svm should be freed.
+ */
+ if (WARN_ON(list_empty(&svm->devs))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for_each_svm_dev(sdev, svm, dev) {
+ /*
+ * For devices with aux domains, we should allow
+ * multiple bind calls with the same PASID and pdev.
+ */
+ if (iommu_dev_feature_enabled(dev,
+ IOMMU_DEV_FEAT_AUX)) {
+ sdev->users++;
+ } else {
+ dev_warn_ratelimited(dev,
+ "Already bound with PASID %u\n",
+ svm->pasid);
+ ret = -EBUSY;
+ }
+ goto out;
+ }
+ } else {
+ /* We come here when PASID has never been bond to a device. */
+ svm = kzalloc(sizeof(*svm), GFP_KERNEL);
+ if (!svm) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* REVISIT: upper layer/VFIO can track host process that bind
+ * the PASID. ioasid_set = mm might be sufficient for vfio to
+ * check pasid VMM ownership. We can drop the following line
+ * once VFIO and IOASID set check is in place.
+ */
+ svm->mm = get_task_mm(current);
+ svm->pasid = data->hpasid;
+ if (data->flags & IOMMU_SVA_GPASID_VAL) {
+ svm->gpasid = data->gpasid;
+ svm->flags |= SVM_FLAG_GUEST_PASID;
+ }
+ ioasid_set_data(data->hpasid, svm);
+ INIT_LIST_HEAD_RCU(&svm->devs);
+ mmput(svm->mm);
+ }
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!sdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ sdev->dev = dev;
+
+ /* Only count users if device has aux domains */
+ if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
+ sdev->users = 1;
+
+ /* Set up device context entry for PASID if not enabled already */
+ ret = intel_iommu_enable_pasid(iommu, sdev->dev);
+ if (ret) {
+ dev_err_ratelimited(dev, "Failed to enable PASID capability\n");
+ kfree(sdev);
+ goto out;
+ }
+
+ /*
+ * PASID table is per device for better security. Therefore, for
+ * each bind of a new device even with an existing PASID, we need to
+ * call the nested mode setup function here.
+ */
+ spin_lock(&iommu->lock);
+ ret = intel_pasid_setup_nested(iommu, dev,
+ (pgd_t *)(uintptr_t)data->gpgd,
+ data->hpasid, &data->vtd, dmar_domain,
+ data->addr_width);
+ spin_unlock(&iommu->lock);
+ if (ret) {
+ dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
+ data->hpasid, ret);
+ /*
+ * PASID entry should be in cleared state if nested mode
+ * set up failed. So we only need to clear IOASID tracking
+ * data such that free call will succeed.
+ */
+ kfree(sdev);
+ goto out;
+ }
+
+ svm->flags |= SVM_FLAG_GUEST_MODE;
+
+ init_rcu_head(&sdev->rcu);
+ list_add_rcu(&sdev->list, &svm->devs);
+ out:
+ if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) {
+ ioasid_set_data(data->hpasid, NULL);
+ kfree(svm);
+ }
+
+ mutex_unlock(&pasid_mutex);
+ return ret;
+}
+
+int intel_svm_unbind_gpasid(struct device *dev, int pasid)
+{
+ struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct intel_svm_dev *sdev;
+ struct intel_svm *svm;
+ int ret = -EINVAL;
+
+ if (WARN_ON(!iommu))
+ return -EINVAL;
+
+ mutex_lock(&pasid_mutex);
+ svm = ioasid_find(NULL, pasid, NULL);
+ if (!svm) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (IS_ERR(svm)) {
+ ret = PTR_ERR(svm);
+ goto out;
+ }
+
+ for_each_svm_dev(sdev, svm, dev) {
+ ret = 0;
+ if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
+ sdev->users--;
+ if (!sdev->users) {
+ list_del_rcu(&sdev->list);
+ intel_pasid_tear_down_entry(iommu, dev,
+ svm->pasid, false);
+ intel_svm_drain_prq(dev, svm->pasid);
+ kfree_rcu(sdev, rcu);
+
+ if (list_empty(&svm->devs)) {
+ /*
+ * We do not free the IOASID here in that
+ * IOMMU driver did not allocate it.
+ * Unlike native SVM, IOASID for guest use was
+ * allocated prior to the bind call.
+ * In any case, if the free call comes before
+ * the unbind, IOMMU driver will get notified
+ * and perform cleanup.
+ */
+ ioasid_set_data(pasid, NULL);
+ kfree(svm);
+ }
+ }
+ break;
+ }
+out:
+ mutex_unlock(&pasid_mutex);
+ return ret;
+}
+
+/* Caller must hold pasid_mutex, mm reference */
+static int
+intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops,
+ struct mm_struct *mm, struct intel_svm_dev **sd)
{
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
- struct mm_struct *mm = NULL;
int pasid_max;
int ret;
@@ -249,16 +450,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
} else
pasid_max = 1 << 20;
+ /* Bind supervisor PASID shuld have mm = NULL */
if (flags & SVM_FLAG_SUPERVISOR_MODE) {
- if (!ecap_srs(iommu->ecap))
+ if (!ecap_srs(iommu->ecap) || mm) {
+ pr_err("Supervisor PASID with user provided mm.\n");
return -EINVAL;
- } else if (pasid) {
- mm = get_task_mm(current);
- BUG_ON(!mm);
+ }
}
- mutex_lock(&pasid_mutex);
- if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) {
+ if (!(flags & SVM_FLAG_PRIVATE_PASID)) {
struct intel_svm *t;
list_for_each_entry(t, &global_svm_list, list) {
@@ -296,19 +496,12 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
sdev->dev = dev;
ret = intel_iommu_enable_pasid(iommu, dev);
- if (ret || !pasid) {
- /* If they don't actually want to assign a PASID, this is
- * just an enabling check/preparation. */
- kfree(sdev);
- goto out;
- }
-
- info = dev->archdata.iommu;
- if (!info || !info->pasid_supported) {
+ if (ret) {
kfree(sdev);
goto out;
}
+ info = get_domain_info(dev);
sdev->did = FLPT_DEFAULT_DID;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
if (info->ats_enabled) {
@@ -397,26 +590,24 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
}
}
list_add_rcu(&sdev->list, &svm->devs);
-
- success:
- *pasid = svm->pasid;
+success:
+ sdev->pasid = svm->pasid;
+ sdev->sva.dev = dev;
+ if (sd)
+ *sd = sdev;
ret = 0;
out:
- mutex_unlock(&pasid_mutex);
- if (mm)
- mmput(mm);
return ret;
}
-EXPORT_SYMBOL_GPL(intel_svm_bind_mm);
-int intel_svm_unbind_mm(struct device *dev, int pasid)
+/* Caller must hold pasid_mutex */
+static int intel_svm_unbind_mm(struct device *dev, int pasid)
{
struct intel_svm_dev *sdev;
struct intel_iommu *iommu;
struct intel_svm *svm;
int ret = -EINVAL;
- mutex_lock(&pasid_mutex);
iommu = intel_svm_device_to_iommu(dev);
if (!iommu)
goto out;
@@ -442,8 +633,9 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
* to use. We have a *shared* PASID table, because it's
* large and has to be physically contiguous. So it's
* hard to be as defensive as we might like. */
- intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
+ intel_pasid_tear_down_entry(iommu, dev,
+ svm->pasid, false);
+ intel_svm_drain_prq(dev, svm->pasid);
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
@@ -462,45 +654,9 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
break;
}
out:
- mutex_unlock(&pasid_mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(intel_svm_unbind_mm);
-
-int intel_svm_is_pasid_valid(struct device *dev, int pasid)
-{
- struct intel_iommu *iommu;
- struct intel_svm *svm;
- int ret = -EINVAL;
-
- mutex_lock(&pasid_mutex);
- iommu = intel_svm_device_to_iommu(dev);
- if (!iommu)
- goto out;
-
- svm = ioasid_find(NULL, pasid, NULL);
- if (!svm)
- goto out;
-
- if (IS_ERR(svm)) {
- ret = PTR_ERR(svm);
- goto out;
- }
- /* init_mm is used in this case */
- if (!svm->mm)
- ret = 1;
- else if (atomic_read(&svm->mm->mm_users) > 0)
- ret = 1;
- else
- ret = 0;
-
- out:
- mutex_unlock(&pasid_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid);
/* Page request queue descriptor */
struct page_req_dsc {
@@ -557,6 +713,93 @@ static bool is_canonical_address(u64 addr)
return (((saddr << shift) >> shift) == saddr);
}
+/**
+ * intel_svm_drain_prq - Drain page requests and responses for a pasid
+ * @dev: target device
+ * @pasid: pasid for draining
+ *
+ * Drain all pending page requests and responses related to @pasid in both
+ * software and hardware. This is supposed to be called after the device
+ * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
+ * and DevTLB have been invalidated.
+ *
+ * It waits until all pending page requests for @pasid in the page fault
+ * queue are completed by the prq handling thread. Then follow the steps
+ * described in VT-d spec CH7.10 to drain all page requests and page
+ * responses pending in the hardware.
+ */
+static void intel_svm_drain_prq(struct device *dev, int pasid)
+{
+ struct device_domain_info *info;
+ struct dmar_domain *domain;
+ struct intel_iommu *iommu;
+ struct qi_desc desc[3];
+ struct pci_dev *pdev;
+ int head, tail;
+ u16 sid, did;
+ int qdep;
+
+ info = get_domain_info(dev);
+ if (WARN_ON(!info || !dev_is_pci(dev)))
+ return;
+
+ if (!info->pri_enabled)
+ return;
+
+ iommu = info->iommu;
+ domain = info->domain;
+ pdev = to_pci_dev(dev);
+ sid = PCI_DEVID(info->bus, info->devfn);
+ did = domain->iommu_did[iommu->seq_id];
+ qdep = pci_ats_queue_depth(pdev);
+
+ /*
+ * Check and wait until all pending page requests in the queue are
+ * handled by the prq handling thread.
+ */
+prq_retry:
+ reinit_completion(&iommu->prq_complete);
+ tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
+ head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
+ while (head != tail) {
+ struct page_req_dsc *req;
+
+ req = &iommu->prq[head / sizeof(*req)];
+ if (!req->pasid_present || req->pasid != pasid) {
+ head = (head + sizeof(*req)) & PRQ_RING_MASK;
+ continue;
+ }
+
+ wait_for_completion(&iommu->prq_complete);
+ goto prq_retry;
+ }
+
+ /*
+ * Perform steps described in VT-d spec CH7.10 to drain page
+ * requests and responses in hardware.
+ */
+ memset(desc, 0, sizeof(desc));
+ desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
+ QI_IWD_FENCE |
+ QI_IWD_TYPE;
+ desc[1].qw0 = QI_EIOTLB_PASID(pasid) |
+ QI_EIOTLB_DID(did) |
+ QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+ QI_EIOTLB_TYPE;
+ desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) |
+ QI_DEV_EIOTLB_SID(sid) |
+ QI_DEV_EIOTLB_QDEP(qdep) |
+ QI_DEIOTLB_TYPE |
+ QI_DEV_IOTLB_PFSID(info->pfsid);
+qi_retry:
+ reinit_completion(&iommu->prq_complete);
+ qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
+ if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
+ wait_for_completion(&iommu->prq_complete);
+ goto qi_retry;
+ }
+}
+
static irqreturn_t prq_event_thread(int irq, void *d)
{
struct intel_iommu *iommu = d;
@@ -620,7 +863,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!mmget_not_zero(svm->mm))
goto bad_req;
- down_read(&svm->mm->mmap_sem);
+ mmap_read_lock(svm->mm);
vma = find_extend_vma(svm->mm, address);
if (!vma || address < vma->vm_start)
goto invalid;
@@ -635,7 +878,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
result = QI_RESP_SUCCESS;
invalid:
- up_read(&svm->mm->mmap_sem);
+ mmap_read_unlock(svm->mm);
mmput(svm->mm);
bad_req:
/* Accounting for major/minor faults? */
@@ -685,12 +928,75 @@ static irqreturn_t prq_event_thread(int irq, void *d)
sizeof(req->priv_data));
resp.qw2 = 0;
resp.qw3 = 0;
- qi_submit_sync(&resp, iommu);
+ qi_submit_sync(iommu, &resp, 1, 0);
}
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
+ /*
+ * Clear the page request overflow bit and wake up all threads that
+ * are waiting for the completion of this handling.
+ */
+ if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO)
+ writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
+
+ if (!completion_done(&iommu->prq_complete))
+ complete(&iommu->prq_complete);
+
return IRQ_RETVAL(handled);
}
+
+#define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
+struct iommu_sva *
+intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
+{
+ struct iommu_sva *sva = ERR_PTR(-EINVAL);
+ struct intel_svm_dev *sdev = NULL;
+ int flags = 0;
+ int ret;
+
+ /*
+ * TODO: Consolidate with generic iommu-sva bind after it is merged.
+ * It will require shared SVM data structures, i.e. combine io_mm
+ * and intel_svm etc.
+ */
+ if (drvdata)
+ flags = *(int *)drvdata;
+ mutex_lock(&pasid_mutex);
+ ret = intel_svm_bind_mm(dev, flags, NULL, mm, &sdev);
+ if (ret)
+ sva = ERR_PTR(ret);
+ else if (sdev)
+ sva = &sdev->sva;
+ else
+ WARN(!sdev, "SVM bind succeeded with no sdev!\n");
+
+ mutex_unlock(&pasid_mutex);
+
+ return sva;
+}
+
+void intel_svm_unbind(struct iommu_sva *sva)
+{
+ struct intel_svm_dev *sdev;
+
+ mutex_lock(&pasid_mutex);
+ sdev = to_intel_svm_dev(sva);
+ intel_svm_unbind_mm(sdev->dev, sdev->pasid);
+ mutex_unlock(&pasid_mutex);
+}
+
+int intel_svm_get_pasid(struct iommu_sva *sva)
+{
+ struct intel_svm_dev *sdev;
+ int pasid;
+
+ mutex_lock(&pasid_mutex);
+ sdev = to_intel_svm_dev(sva);
+ pasid = sdev->pasid;
+ mutex_unlock(&pasid_mutex);
+
+ return pasid;
+}
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 81e43c1df7ec..a042f123b091 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -151,7 +151,7 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
desc.qw2 = 0;
desc.qw3 = 0;
- return qi_submit_sync(&desc, iommu);
+ return qi_submit_sync(iommu, &desc, 1, 0);
}
static int modify_irte(struct irq_2_iommu *irq_iommu,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 03d6a26687bc..d43120eb1dc5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -44,6 +44,7 @@ struct iommu_group {
int id;
struct iommu_domain *default_domain;
struct iommu_domain *domain;
+ struct list_head entry;
};
struct group_device {
@@ -79,6 +80,20 @@ static bool iommu_cmd_line_dma_api(void)
return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
}
+static int iommu_alloc_default_domain(struct iommu_group *group,
+ struct device *dev);
+static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
+ unsigned type);
+static int __iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev);
+static int __iommu_attach_group(struct iommu_domain *domain,
+ struct iommu_group *group);
+static void __iommu_detach_group(struct iommu_domain *domain,
+ struct iommu_group *group);
+static int iommu_create_device_direct_mappings(struct iommu_group *group,
+ struct device *dev);
+static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
+
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
@@ -175,57 +190,118 @@ static void dev_iommu_free(struct device *dev)
dev->iommu = NULL;
}
-int iommu_probe_device(struct device *dev)
+static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct iommu_device *iommu_dev;
+ struct iommu_group *group;
int ret;
- WARN_ON(dev->iommu_group);
if (!ops)
- return -EINVAL;
+ return -ENODEV;
if (!dev_iommu_get(dev))
return -ENOMEM;
if (!try_module_get(ops->owner)) {
ret = -EINVAL;
- goto err_free_dev_param;
+ goto err_free;
}
- ret = ops->add_device(dev);
- if (ret)
- goto err_module_put;
+ iommu_dev = ops->probe_device(dev);
+ if (IS_ERR(iommu_dev)) {
+ ret = PTR_ERR(iommu_dev);
+ goto out_module_put;
+ }
+
+ dev->iommu->iommu_dev = iommu_dev;
+
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group)) {
+ ret = PTR_ERR(group);
+ goto out_release;
+ }
+ iommu_group_put(group);
+
+ if (group_list && !group->default_domain && list_empty(&group->entry))
+ list_add_tail(&group->entry, group_list);
+
+ iommu_device_link(iommu_dev, dev);
return 0;
-err_module_put:
+out_release:
+ ops->release_device(dev);
+
+out_module_put:
module_put(ops->owner);
-err_free_dev_param:
+
+err_free:
dev_iommu_free(dev);
+
return ret;
}
-void iommu_release_device(struct device *dev)
+int iommu_probe_device(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct iommu_group *group;
+ int ret;
- if (dev->iommu_group)
- ops->remove_device(dev);
+ ret = __iommu_probe_device(dev, NULL);
+ if (ret)
+ goto err_out;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ goto err_release;
+
+ /*
+ * Try to allocate a default domain - needs support from the
+ * IOMMU driver. There are still some drivers which don't
+ * support default domains, so the return value is not yet
+ * checked.
+ */
+ iommu_alloc_default_domain(group, dev);
+
+ if (group->default_domain)
+ ret = __iommu_attach_device(group->default_domain, dev);
+
+ iommu_create_device_direct_mappings(group, dev);
+
+ iommu_group_put(group);
+
+ if (ret)
+ goto err_release;
+
+ if (ops->probe_finalize)
+ ops->probe_finalize(dev);
+
+ return 0;
+
+err_release:
+ iommu_release_device(dev);
+
+err_out:
+ return ret;
- if (dev->iommu) {
- module_put(ops->owner);
- dev_iommu_free(dev);
- }
}
-static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
- unsigned type);
-static int __iommu_attach_device(struct iommu_domain *domain,
- struct device *dev);
-static int __iommu_attach_group(struct iommu_domain *domain,
- struct iommu_group *group);
-static void __iommu_detach_group(struct iommu_domain *domain,
- struct iommu_group *group);
+void iommu_release_device(struct device *dev)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (!dev->iommu)
+ return;
+
+ iommu_device_unlink(dev->iommu->iommu_dev, dev);
+ iommu_group_remove_device(dev);
+
+ ops->release_device(dev);
+
+ module_put(ops->owner);
+ dev_iommu_free(dev);
+}
static int __init iommu_set_def_domain_type(char *str)
{
@@ -497,6 +573,7 @@ struct iommu_group *iommu_group_alloc(void)
group->kobj.kset = iommu_group_kset;
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->devices);
+ INIT_LIST_HEAD(&group->entry);
BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
@@ -638,8 +715,8 @@ int iommu_group_set_name(struct iommu_group *group, const char *name)
}
EXPORT_SYMBOL_GPL(iommu_group_set_name);
-static int iommu_group_create_direct_mappings(struct iommu_group *group,
- struct device *dev)
+static int iommu_create_device_direct_mappings(struct iommu_group *group,
+ struct device *dev)
{
struct iommu_domain *domain = group->default_domain;
struct iommu_resv_region *entry;
@@ -752,8 +829,6 @@ rename:
dev->iommu_group = group;
- iommu_group_create_direct_mappings(group, dev);
-
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
@@ -1371,6 +1446,61 @@ struct iommu_group *fsl_mc_device_group(struct device *dev)
}
EXPORT_SYMBOL_GPL(fsl_mc_device_group);
+static int iommu_get_def_domain_type(struct device *dev)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+ unsigned int type = 0;
+
+ if (ops->def_domain_type)
+ type = ops->def_domain_type(dev);
+
+ return (type == 0) ? iommu_def_domain_type : type;
+}
+
+static int iommu_group_alloc_default_domain(struct bus_type *bus,
+ struct iommu_group *group,
+ unsigned int type)
+{
+ struct iommu_domain *dom;
+
+ dom = __iommu_domain_alloc(bus, type);
+ if (!dom && type != IOMMU_DOMAIN_DMA) {
+ dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
+ if (dom)
+ pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
+ type, group->name);
+ }
+
+ if (!dom)
+ return -ENOMEM;
+
+ group->default_domain = dom;
+ if (!group->domain)
+ group->domain = dom;
+
+ if (!iommu_dma_strict) {
+ int attr = 1;
+ iommu_domain_set_attr(dom,
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
+ &attr);
+ }
+
+ return 0;
+}
+
+static int iommu_alloc_default_domain(struct iommu_group *group,
+ struct device *dev)
+{
+ unsigned int type;
+
+ if (group->default_domain)
+ return 0;
+
+ type = iommu_get_def_domain_type(dev);
+
+ return iommu_group_alloc_default_domain(dev->bus, group, type);
+}
+
/**
* iommu_group_get_for_dev - Find or create the IOMMU group for a device
* @dev: target device
@@ -1381,7 +1511,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group);
* to the returned IOMMU group, which will already include the provided
* device. The reference should be released with iommu_group_put().
*/
-struct iommu_group *iommu_group_get_for_dev(struct device *dev)
+static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_group *group;
@@ -1401,59 +1531,37 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (IS_ERR(group))
return group;
- /*
- * Try to allocate a default domain - needs support from the
- * IOMMU driver.
- */
- if (!group->default_domain) {
- struct iommu_domain *dom;
-
- dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
- if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
- dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
- if (dom) {
- dev_warn(dev,
- "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
- iommu_def_domain_type);
- }
- }
-
- group->default_domain = dom;
- if (!group->domain)
- group->domain = dom;
-
- if (dom && !iommu_dma_strict) {
- int attr = 1;
- iommu_domain_set_attr(dom,
- DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
- &attr);
- }
- }
-
ret = iommu_group_add_device(group, dev);
- if (ret) {
- iommu_group_put(group);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto out_put_group;
return group;
+
+out_put_group:
+ iommu_group_put(group);
+
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(iommu_group_get_for_dev);
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
{
return group->default_domain;
}
-static int add_iommu_group(struct device *dev, void *data)
+static int probe_iommu_group(struct device *dev, void *data)
{
- int ret = iommu_probe_device(dev);
+ struct list_head *group_list = data;
+ struct iommu_group *group;
+ int ret;
- /*
- * We ignore -ENODEV errors for now, as they just mean that the
- * device is not translated by an IOMMU. We still care about
- * other errors and fail to initialize when they happen.
- */
+ /* Device is probed already if in a group */
+ group = iommu_group_get(dev);
+ if (group) {
+ iommu_group_put(group);
+ return 0;
+ }
+
+ ret = __iommu_probe_device(dev, group_list);
if (ret == -ENODEV)
ret = 0;
@@ -1519,10 +1627,152 @@ static int iommu_bus_notifier(struct notifier_block *nb,
return 0;
}
+struct __group_domain_type {
+ struct device *dev;
+ unsigned int type;
+};
+
+static int probe_get_default_domain_type(struct device *dev, void *data)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct __group_domain_type *gtype = data;
+ unsigned int type = 0;
+
+ if (ops->def_domain_type)
+ type = ops->def_domain_type(dev);
+
+ if (type) {
+ if (gtype->type && gtype->type != type) {
+ dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
+ iommu_domain_type_str(type),
+ dev_name(gtype->dev),
+ iommu_domain_type_str(gtype->type));
+ gtype->type = 0;
+ }
+
+ if (!gtype->dev) {
+ gtype->dev = dev;
+ gtype->type = type;
+ }
+ }
+
+ return 0;
+}
+
+static void probe_alloc_default_domain(struct bus_type *bus,
+ struct iommu_group *group)
+{
+ struct __group_domain_type gtype;
+
+ memset(&gtype, 0, sizeof(gtype));
+
+ /* Ask for default domain requirements of all devices in the group */
+ __iommu_group_for_each_dev(group, &gtype,
+ probe_get_default_domain_type);
+
+ if (!gtype.type)
+ gtype.type = iommu_def_domain_type;
+
+ iommu_group_alloc_default_domain(bus, group, gtype.type);
+
+}
+
+static int iommu_group_do_dma_attach(struct device *dev, void *data)
+{
+ struct iommu_domain *domain = data;
+ int ret = 0;
+
+ if (!iommu_is_attach_deferred(domain, dev))
+ ret = __iommu_attach_device(domain, dev);
+
+ return ret;
+}
+
+static int __iommu_group_dma_attach(struct iommu_group *group)
+{
+ return __iommu_group_for_each_dev(group, group->default_domain,
+ iommu_group_do_dma_attach);
+}
+
+static int iommu_group_do_probe_finalize(struct device *dev, void *data)
+{
+ struct iommu_domain *domain = data;
+
+ if (domain->ops->probe_finalize)
+ domain->ops->probe_finalize(dev);
+
+ return 0;
+}
+
+static void __iommu_group_dma_finalize(struct iommu_group *group)
+{
+ __iommu_group_for_each_dev(group, group->default_domain,
+ iommu_group_do_probe_finalize);
+}
+
+static int iommu_do_create_direct_mappings(struct device *dev, void *data)
+{
+ struct iommu_group *group = data;
+
+ iommu_create_device_direct_mappings(group, dev);
+
+ return 0;
+}
+
+static int iommu_group_create_direct_mappings(struct iommu_group *group)
+{
+ return __iommu_group_for_each_dev(group, group,
+ iommu_do_create_direct_mappings);
+}
+
+int bus_iommu_probe(struct bus_type *bus)
+{
+ struct iommu_group *group, *next;
+ LIST_HEAD(group_list);
+ int ret;
+
+ /*
+ * This code-path does not allocate the default domain when
+ * creating the iommu group, so do it after the groups are
+ * created.
+ */
+ ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
+ if (ret)
+ return ret;
+
+ list_for_each_entry_safe(group, next, &group_list, entry) {
+ /* Remove item from the list */
+ list_del_init(&group->entry);
+
+ mutex_lock(&group->mutex);
+
+ /* Try to allocate default domain */
+ probe_alloc_default_domain(bus, group);
+
+ if (!group->default_domain) {
+ mutex_unlock(&group->mutex);
+ continue;
+ }
+
+ iommu_group_create_direct_mappings(group);
+
+ ret = __iommu_group_dma_attach(group);
+
+ mutex_unlock(&group->mutex);
+
+ if (ret)
+ break;
+
+ __iommu_group_dma_finalize(group);
+ }
+
+ return ret;
+}
+
static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
{
- int err;
struct notifier_block *nb;
+ int err;
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
if (!nb)
@@ -1534,7 +1784,7 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
if (err)
goto out_free;
- err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
+ err = bus_iommu_probe(bus);
if (err)
goto out_err;
@@ -2301,71 +2551,6 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
}
EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
-static int
-request_default_domain_for_dev(struct device *dev, unsigned long type)
-{
- struct iommu_domain *domain;
- struct iommu_group *group;
- int ret;
-
- /* Device must already be in a group before calling this function */
- group = iommu_group_get(dev);
- if (!group)
- return -EINVAL;
-
- mutex_lock(&group->mutex);
-
- ret = 0;
- if (group->default_domain && group->default_domain->type == type)
- goto out;
-
- /* Don't change mappings of existing devices */
- ret = -EBUSY;
- if (iommu_group_device_count(group) != 1)
- goto out;
-
- ret = -ENOMEM;
- domain = __iommu_domain_alloc(dev->bus, type);
- if (!domain)
- goto out;
-
- /* Attach the device to the domain */
- ret = __iommu_attach_group(domain, group);
- if (ret) {
- iommu_domain_free(domain);
- goto out;
- }
-
- /* Make the domain the default for this group */
- if (group->default_domain)
- iommu_domain_free(group->default_domain);
- group->default_domain = domain;
-
- iommu_group_create_direct_mappings(group, dev);
-
- dev_info(dev, "Using iommu %s mapping\n",
- type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
-
- ret = 0;
-out:
- mutex_unlock(&group->mutex);
- iommu_group_put(group);
-
- return ret;
-}
-
-/* Request that a device is direct mapped by the IOMMU */
-int iommu_request_dm_for_dev(struct device *dev)
-{
- return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
-}
-
-/* Request that a device can't be direct mapped by the IOMMU */
-int iommu_request_dma_domain_for_dev(struct device *dev)
-{
- return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
-}
-
void iommu_set_default_passthrough(bool cmd_line)
{
if (cmd_line)
@@ -2643,17 +2828,6 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
-int iommu_sva_set_ops(struct iommu_sva *handle,
- const struct iommu_sva_ops *sva_ops)
-{
- if (handle->ops && handle->ops != sva_ops)
- return -EEXIST;
-
- handle->ops = sva_ops;
- return 0;
-}
-EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
-
int iommu_sva_get_pasid(struct iommu_sva *handle)
{
const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 0e6a9536eca6..49fc01f2a28d 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -253,7 +253,7 @@ int iova_cache_get(void)
SLAB_HWCACHE_ALIGN, NULL);
if (!iova_cache) {
mutex_unlock(&iova_cache_mutex);
- printk(KERN_ERR "Couldn't create iova cache\n");
+ pr_err("Couldn't create iova cache\n");
return -ENOMEM;
}
}
@@ -718,8 +718,8 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
if (!new_iova)
- printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
- iova->pfn_lo, iova->pfn_lo);
+ pr_err("Reserve iova range %lx@%lx failed\n",
+ iova->pfn_lo, iova->pfn_lo);
}
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 310cf09feea3..4c2972f3153b 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -805,24 +805,8 @@ static int ipmmu_of_xlate(struct device *dev,
static int ipmmu_init_arm_mapping(struct device *dev)
{
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
- struct iommu_group *group;
int ret;
- /* Create a device group and add the device to it. */
- group = iommu_group_alloc();
- if (IS_ERR(group)) {
- dev_err(dev, "Failed to allocate IOMMU group\n");
- return PTR_ERR(group);
- }
-
- ret = iommu_group_add_device(group, dev);
- iommu_group_put(group);
-
- if (ret < 0) {
- dev_err(dev, "Failed to add device to IPMMU group\n");
- return ret;
- }
-
/*
* Create the ARM mapping, used by the ARM DMA mapping core to allocate
* VAs. This will allocate a corresponding IOMMU domain.
@@ -856,48 +840,39 @@ static int ipmmu_init_arm_mapping(struct device *dev)
return 0;
error:
- iommu_group_remove_device(dev);
if (mmu->mapping)
arm_iommu_release_mapping(mmu->mapping);
return ret;
}
-static int ipmmu_add_device(struct device *dev)
+static struct iommu_device *ipmmu_probe_device(struct device *dev)
{
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
- struct iommu_group *group;
- int ret;
/*
* Only let through devices that have been verified in xlate()
*/
if (!mmu)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
- if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) {
- ret = ipmmu_init_arm_mapping(dev);
- if (ret)
- return ret;
- } else {
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ return &mmu->iommu;
+}
- iommu_group_put(group);
- }
+static void ipmmu_probe_finalize(struct device *dev)
+{
+ int ret = 0;
- iommu_device_link(&mmu->iommu, dev);
- return 0;
+ if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
+ ret = ipmmu_init_arm_mapping(dev);
+
+ if (ret)
+ dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
}
-static void ipmmu_remove_device(struct device *dev)
+static void ipmmu_release_device(struct device *dev)
{
- struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
-
- iommu_device_unlink(&mmu->iommu, dev);
arm_iommu_detach_device(dev);
- iommu_group_remove_device(dev);
}
static struct iommu_group *ipmmu_find_group(struct device *dev)
@@ -925,9 +900,11 @@ static const struct iommu_ops ipmmu_ops = {
.flush_iotlb_all = ipmmu_flush_iotlb_all,
.iotlb_sync = ipmmu_iotlb_sync,
.iova_to_phys = ipmmu_iova_to_phys,
- .add_device = ipmmu_add_device,
- .remove_device = ipmmu_remove_device,
- .device_group = ipmmu_find_group,
+ .probe_device = ipmmu_probe_device,
+ .release_device = ipmmu_release_device,
+ .probe_finalize = ipmmu_probe_finalize,
+ .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
+ ? generic_device_group : ipmmu_find_group,
.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
.of_xlate = ipmmu_of_xlate,
};
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 94a6df1bddd6..3d8a63555c25 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -34,7 +34,7 @@ __asm__ __volatile__ ( \
/* bitmap of the page sizes currently supported */
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
-DEFINE_SPINLOCK(msm_iommu_lock);
+static DEFINE_SPINLOCK(msm_iommu_lock);
static LIST_HEAD(qcom_iommu_devices);
static struct iommu_ops msm_iommu_ops;
@@ -388,43 +388,23 @@ static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
return ret;
}
-static int msm_iommu_add_device(struct device *dev)
+static struct iommu_device *msm_iommu_probe_device(struct device *dev)
{
struct msm_iommu_dev *iommu;
- struct iommu_group *group;
unsigned long flags;
spin_lock_irqsave(&msm_iommu_lock, flags);
iommu = find_iommu_for_dev(dev);
spin_unlock_irqrestore(&msm_iommu_lock, flags);
- if (iommu)
- iommu_device_link(&iommu->iommu, dev);
- else
- return -ENODEV;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
+ if (!iommu)
+ return ERR_PTR(-ENODEV);
- return 0;
+ return &iommu->iommu;
}
-static void msm_iommu_remove_device(struct device *dev)
+static void msm_iommu_release_device(struct device *dev)
{
- struct msm_iommu_dev *iommu;
- unsigned long flags;
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
- iommu = find_iommu_for_dev(dev);
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
-
- if (iommu)
- iommu_device_unlink(&iommu->iommu, dev);
-
- iommu_group_remove_device(dev);
}
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -708,8 +688,8 @@ static struct iommu_ops msm_iommu_ops = {
*/
.iotlb_sync = NULL,
.iova_to_phys = msm_iommu_iova_to_phys,
- .add_device = msm_iommu_add_device,
- .remove_device = msm_iommu_remove_device,
+ .probe_device = msm_iommu_probe_device,
+ .release_device = msm_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 5f4d6df59cf6..2be96f1cdbd2 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -441,38 +441,26 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
return pa;
}
-static int mtk_iommu_add_device(struct device *dev)
+static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_data *data;
- struct iommu_group *group;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
- return -ENODEV; /* Not a iommu client device */
+ return ERR_PTR(-ENODEV); /* Not a iommu client device */
data = dev_iommu_priv_get(dev);
- iommu_device_link(&data->iommu, dev);
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
- return 0;
+ return &data->iommu;
}
-static void mtk_iommu_remove_device(struct device *dev)
+static void mtk_iommu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct mtk_iommu_data *data;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
- data = dev_iommu_priv_get(dev);
- iommu_device_unlink(&data->iommu, dev);
-
- iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
@@ -526,8 +514,8 @@ static const struct iommu_ops mtk_iommu_ops = {
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
.iotlb_sync = mtk_iommu_iotlb_sync,
.iova_to_phys = mtk_iommu_iova_to_phys,
- .add_device = mtk_iommu_add_device,
- .remove_device = mtk_iommu_remove_device,
+ .probe_device = mtk_iommu_probe_device,
+ .release_device = mtk_iommu_release_device,
.device_group = mtk_iommu_device_group,
.of_xlate = mtk_iommu_of_xlate,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index a31be05601c9..c9d79cff4d17 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -265,10 +265,13 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
{
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct dma_iommu_mapping *mtk_mapping;
int ret;
- if (!data)
- return -ENODEV;
+ /* Only allow the domain created internally. */
+ mtk_mapping = data->dev->archdata.iommu;
+ if (mtk_mapping->domain != domain)
+ return 0;
if (!data->m4u_dom) {
data->m4u_dom = dom;
@@ -288,9 +291,6 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
{
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
- if (!data)
- return;
-
mtk_iommu_config(data, dev, false);
}
@@ -416,14 +416,17 @@ static int mtk_iommu_create_mapping(struct device *dev,
return 0;
}
-static int mtk_iommu_add_device(struct device *dev)
+static int mtk_iommu_def_domain_type(struct device *dev)
+{
+ return IOMMU_DOMAIN_UNMANAGED;
+}
+
+static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct dma_iommu_mapping *mtk_mapping;
struct of_phandle_args iommu_spec;
struct of_phandle_iterator it;
struct mtk_iommu_data *data;
- struct iommu_group *group;
int err;
of_for_each_phandle(&it, err, dev->of_node, "iommus",
@@ -442,46 +445,34 @@ static int mtk_iommu_add_device(struct device *dev)
}
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
- return -ENODEV; /* Not a iommu client device */
+ return ERR_PTR(-ENODEV); /* Not a iommu client device */
- /*
- * This is a short-term bodge because the ARM DMA code doesn't
- * understand multi-device groups, but we have to call into it
- * successfully (and not just rely on a normal IOMMU API attach
- * here) in order to set the correct DMA API ops on @dev.
- */
- group = iommu_group_alloc();
- if (IS_ERR(group))
- return PTR_ERR(group);
+ data = dev_iommu_priv_get(dev);
- err = iommu_group_add_device(group, dev);
- iommu_group_put(group);
- if (err)
- return err;
+ return &data->iommu;
+}
- data = dev_iommu_priv_get(dev);
+static void mtk_iommu_probe_finalize(struct device *dev)
+{
+ struct dma_iommu_mapping *mtk_mapping;
+ struct mtk_iommu_data *data;
+ int err;
+
+ data = dev_iommu_priv_get(dev);
mtk_mapping = data->dev->archdata.iommu;
- err = arm_iommu_attach_device(dev, mtk_mapping);
- if (err) {
- iommu_group_remove_device(dev);
- return err;
- }
- return iommu_device_link(&data->iommu, dev);
+ err = arm_iommu_attach_device(dev, mtk_mapping);
+ if (err)
+ dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
}
-static void mtk_iommu_remove_device(struct device *dev)
+static void mtk_iommu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct mtk_iommu_data *data;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
- data = dev_iommu_priv_get(dev);
- iommu_device_unlink(&data->iommu, dev);
-
- iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
@@ -534,8 +525,11 @@ static const struct iommu_ops mtk_iommu_ops = {
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.iova_to_phys = mtk_iommu_iova_to_phys,
- .add_device = mtk_iommu_add_device,
- .remove_device = mtk_iommu_remove_device,
+ .probe_device = mtk_iommu_probe_device,
+ .probe_finalize = mtk_iommu_probe_finalize,
+ .release_device = mtk_iommu_release_device,
+ .def_domain_type = mtk_iommu_def_domain_type,
+ .device_group = generic_device_group,
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
};
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 887fefcb03b4..c8282cc212cb 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -35,15 +35,6 @@
static const struct iommu_ops omap_iommu_ops;
-struct orphan_dev {
- struct device *dev;
- struct list_head node;
-};
-
-static LIST_HEAD(orphan_dev_list);
-
-static DEFINE_SPINLOCK(orphan_lock);
-
#define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
/* bitmap of the page sizes currently supported */
@@ -62,8 +53,6 @@ static DEFINE_SPINLOCK(orphan_lock);
static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
-static int _omap_iommu_add_device(struct device *dev);
-
/**
* to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
* @dom: generic iommu domain handle
@@ -1177,7 +1166,6 @@ static int omap_iommu_probe(struct platform_device *pdev)
struct omap_iommu *obj;
struct resource *res;
struct device_node *of = pdev->dev.of_node;
- struct orphan_dev *orphan_dev, *tmp;
if (!of) {
pr_err("%s: only DT-based devices are supported\n", __func__);
@@ -1248,6 +1236,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
goto out_group;
iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
+ iommu_device_set_fwnode(&obj->iommu, &of->fwnode);
err = iommu_device_register(&obj->iommu);
if (err)
@@ -1260,13 +1249,8 @@ static int omap_iommu_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "%s registered\n", obj->name);
- list_for_each_entry_safe(orphan_dev, tmp, &orphan_dev_list, node) {
- err = _omap_iommu_add_device(orphan_dev->dev);
- if (!err) {
- list_del(&orphan_dev->node);
- kfree(orphan_dev);
- }
- }
+ /* Re-probe bus to probe device attached to this IOMMU */
+ bus_iommu_probe(&platform_bus_type);
return 0;
@@ -1657,17 +1641,13 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
return ret;
}
-static int _omap_iommu_add_device(struct device *dev)
+static struct iommu_device *omap_iommu_probe_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data, *tmp;
+ struct platform_device *pdev;
struct omap_iommu *oiommu;
- struct iommu_group *group;
struct device_node *np;
- struct platform_device *pdev;
int num_iommus, i;
- int ret;
- struct orphan_dev *orphan_dev;
- unsigned long flags;
/*
* Allocate the archdata iommu structure for DT-based devices.
@@ -1676,7 +1656,7 @@ static int _omap_iommu_add_device(struct device *dev)
* IOMMU users.
*/
if (!dev->of_node)
- return 0;
+ return ERR_PTR(-ENODEV);
/*
* retrieve the count of IOMMU nodes using phandle size as element size
@@ -1689,43 +1669,27 @@ static int _omap_iommu_add_device(struct device *dev)
arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
if (!arch_data)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
np = of_parse_phandle(dev->of_node, "iommus", i);
if (!np) {
kfree(arch_data);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
pdev = of_find_device_by_node(np);
if (!pdev) {
of_node_put(np);
kfree(arch_data);
- spin_lock_irqsave(&orphan_lock, flags);
- list_for_each_entry(orphan_dev, &orphan_dev_list,
- node) {
- if (orphan_dev->dev == dev)
- break;
- }
- spin_unlock_irqrestore(&orphan_lock, flags);
-
- if (orphan_dev && orphan_dev->dev == dev)
- return -EPROBE_DEFER;
-
- orphan_dev = kzalloc(sizeof(*orphan_dev), GFP_KERNEL);
- orphan_dev->dev = dev;
- spin_lock_irqsave(&orphan_lock, flags);
- list_add(&orphan_dev->node, &orphan_dev_list);
- spin_unlock_irqrestore(&orphan_lock, flags);
- return -EPROBE_DEFER;
+ return ERR_PTR(-ENODEV);
}
oiommu = platform_get_drvdata(pdev);
if (!oiommu) {
of_node_put(np);
kfree(arch_data);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
tmp->iommu_dev = oiommu;
@@ -1734,57 +1698,25 @@ static int _omap_iommu_add_device(struct device *dev)
of_node_put(np);
}
+ dev->archdata.iommu = arch_data;
+
/*
* use the first IOMMU alone for the sysfs device linking.
* TODO: Evaluate if a single iommu_group needs to be
* maintained for both IOMMUs
*/
oiommu = arch_data->iommu_dev;
- ret = iommu_device_link(&oiommu->iommu, dev);
- if (ret) {
- kfree(arch_data);
- return ret;
- }
-
- dev->archdata.iommu = arch_data;
-
- /*
- * IOMMU group initialization calls into omap_iommu_device_group, which
- * needs a valid dev->archdata.iommu pointer
- */
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group)) {
- iommu_device_unlink(&oiommu->iommu, dev);
- dev->archdata.iommu = NULL;
- kfree(arch_data);
- return PTR_ERR(group);
- }
- iommu_group_put(group);
- return 0;
+ return &oiommu->iommu;
}
-static int omap_iommu_add_device(struct device *dev)
-{
- int ret;
-
- ret = _omap_iommu_add_device(dev);
- if (ret == -EPROBE_DEFER)
- return 0;
-
- return ret;
-}
-
-static void omap_iommu_remove_device(struct device *dev)
+static void omap_iommu_release_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
if (!dev->of_node || !arch_data)
return;
- iommu_device_unlink(&arch_data->iommu_dev->iommu, dev);
- iommu_group_remove_device(dev);
-
dev->archdata.iommu = NULL;
kfree(arch_data);
@@ -1795,6 +1727,9 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev)
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct iommu_group *group = ERR_PTR(-EINVAL);
+ if (!arch_data)
+ return ERR_PTR(-ENODEV);
+
if (arch_data->iommu_dev)
group = iommu_group_ref_get(arch_data->iommu_dev->group);
@@ -1809,8 +1744,8 @@ static const struct iommu_ops omap_iommu_ops = {
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
- .add_device = omap_iommu_add_device,
- .remove_device = omap_iommu_remove_device,
+ .probe_device = omap_iommu_probe_device,
+ .release_device = omap_iommu_release_device,
.device_group = omap_iommu_device_group,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 5b3b270972f8..c3e1fbd1988c 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -524,14 +524,13 @@ static bool qcom_iommu_capable(enum iommu_cap cap)
}
}
-static int qcom_iommu_add_device(struct device *dev)
+static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
{
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
- struct iommu_group *group;
struct device_link *link;
if (!qcom_iommu)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
/*
* Establish the link between iommu and master, so that the
@@ -542,28 +541,19 @@ static int qcom_iommu_add_device(struct device *dev)
if (!link) {
dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
dev_name(qcom_iommu->dev), dev_name(dev));
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
- iommu_device_link(&qcom_iommu->iommu, dev);
-
- return 0;
+ return &qcom_iommu->iommu;
}
-static void qcom_iommu_remove_device(struct device *dev)
+static void qcom_iommu_release_device(struct device *dev)
{
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
if (!qcom_iommu)
return;
- iommu_device_unlink(&qcom_iommu->iommu, dev);
- iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
@@ -619,8 +609,8 @@ static const struct iommu_ops qcom_iommu_ops = {
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
.iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
- .add_device = qcom_iommu_add_device,
- .remove_device = qcom_iommu_remove_device,
+ .probe_device = qcom_iommu_probe_device,
+ .release_device = qcom_iommu_release_device,
.device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index b33cdd5aad81..d25c2486ca07 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1054,40 +1054,28 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
kfree(rk_domain);
}
-static int rk_iommu_add_device(struct device *dev)
+static struct iommu_device *rk_iommu_probe_device(struct device *dev)
{
- struct iommu_group *group;
- struct rk_iommu *iommu;
struct rk_iommudata *data;
+ struct rk_iommu *iommu;
data = dev->archdata.iommu;
if (!data)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
iommu = rk_iommu_from_dev(dev);
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
- iommu_group_put(group);
-
- iommu_device_link(&iommu->iommu, dev);
data->link = device_link_add(dev, iommu->dev,
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
- return 0;
+ return &iommu->iommu;
}
-static void rk_iommu_remove_device(struct device *dev)
+static void rk_iommu_release_device(struct device *dev)
{
- struct rk_iommu *iommu;
struct rk_iommudata *data = dev->archdata.iommu;
- iommu = rk_iommu_from_dev(dev);
-
device_link_del(data->link);
- iommu_device_unlink(&iommu->iommu, dev);
- iommu_group_remove_device(dev);
}
static struct iommu_group *rk_iommu_device_group(struct device *dev)
@@ -1126,8 +1114,8 @@ static const struct iommu_ops rk_iommu_ops = {
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
.unmap = rk_iommu_unmap,
- .add_device = rk_iommu_add_device,
- .remove_device = rk_iommu_remove_device,
+ .probe_device = rk_iommu_probe_device,
+ .release_device = rk_iommu_release_device,
.iova_to_phys = rk_iommu_iova_to_phys,
.device_group = rk_iommu_device_group,
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 1137f3ddcb85..8895dbb705eb 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -87,7 +87,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
- struct zpci_dev *zdev = to_pci_dev(dev)->sysdata;
+ struct zpci_dev *zdev = to_zpci_dev(dev);
struct s390_domain_device *domain_device;
unsigned long flags;
int rc;
@@ -139,7 +139,7 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
- struct zpci_dev *zdev = to_pci_dev(dev)->sysdata;
+ struct zpci_dev *zdev = to_zpci_dev(dev);
struct s390_domain_device *domain_device, *tmp;
unsigned long flags;
int found = 0;
@@ -166,23 +166,16 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
}
}
-static int s390_iommu_add_device(struct device *dev)
+static struct iommu_device *s390_iommu_probe_device(struct device *dev)
{
- struct iommu_group *group = iommu_group_get_for_dev(dev);
- struct zpci_dev *zdev = to_pci_dev(dev)->sysdata;
+ struct zpci_dev *zdev = to_zpci_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
- iommu_device_link(&zdev->iommu_dev, dev);
-
- return 0;
+ return &zdev->iommu_dev;
}
-static void s390_iommu_remove_device(struct device *dev)
+static void s390_iommu_release_device(struct device *dev)
{
- struct zpci_dev *zdev = to_pci_dev(dev)->sysdata;
+ struct zpci_dev *zdev = to_zpci_dev(dev);
struct iommu_domain *domain;
/*
@@ -191,7 +184,7 @@ static void s390_iommu_remove_device(struct device *dev)
* to vfio-pci and completing the VFIO_SET_IOMMU ioctl (which triggers
* the attach_dev), removing the device via
* "echo 1 > /sys/bus/pci/devices/.../remove" won't trigger detach_dev,
- * only remove_device will be called via the BUS_NOTIFY_REMOVED_DEVICE
+ * only release_device will be called via the BUS_NOTIFY_REMOVED_DEVICE
* notifier.
*
* So let's call detach_dev from here if it hasn't been called before.
@@ -201,9 +194,6 @@ static void s390_iommu_remove_device(struct device *dev)
if (domain)
s390_iommu_detach_device(domain, dev);
}
-
- iommu_device_unlink(&zdev->iommu_dev, dev);
- iommu_group_remove_device(dev);
}
static int s390_iommu_update_trans(struct s390_domain *s390_domain,
@@ -373,8 +363,8 @@ static const struct iommu_ops s390_iommu_ops = {
.map = s390_iommu_map,
.unmap = s390_iommu_unmap,
.iova_to_phys = s390_iommu_iova_to_phys,
- .add_device = s390_iommu_add_device,
- .remove_device = s390_iommu_remove_device,
+ .probe_device = s390_iommu_probe_device,
+ .release_device = s390_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = S390_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
new file mode 100644
index 000000000000..fce605e96aa2
--- /dev/null
+++ b/drivers/iommu/sun50i-iommu.c
@@ -0,0 +1,1023 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
+// Copyright (C) 2019-2020, Cerno
+
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define IOMMU_RESET_REG 0x010
+#define IOMMU_ENABLE_REG 0x020
+#define IOMMU_ENABLE_ENABLE BIT(0)
+
+#define IOMMU_BYPASS_REG 0x030
+#define IOMMU_AUTO_GATING_REG 0x040
+#define IOMMU_AUTO_GATING_ENABLE BIT(0)
+
+#define IOMMU_WBUF_CTRL_REG 0x044
+#define IOMMU_OOO_CTRL_REG 0x048
+#define IOMMU_4KB_BDY_PRT_CTRL_REG 0x04c
+#define IOMMU_TTB_REG 0x050
+#define IOMMU_TLB_ENABLE_REG 0x060
+#define IOMMU_TLB_PREFETCH_REG 0x070
+#define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m) BIT(m)
+
+#define IOMMU_TLB_FLUSH_REG 0x080
+#define IOMMU_TLB_FLUSH_PTW_CACHE BIT(17)
+#define IOMMU_TLB_FLUSH_MACRO_TLB BIT(16)
+#define IOMMU_TLB_FLUSH_MICRO_TLB(i) (BIT(i) & GENMASK(5, 0))
+
+#define IOMMU_TLB_IVLD_ADDR_REG 0x090
+#define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x094
+#define IOMMU_TLB_IVLD_ENABLE_REG 0x098
+#define IOMMU_TLB_IVLD_ENABLE_ENABLE BIT(0)
+
+#define IOMMU_PC_IVLD_ADDR_REG 0x0a0
+#define IOMMU_PC_IVLD_ENABLE_REG 0x0a8
+#define IOMMU_PC_IVLD_ENABLE_ENABLE BIT(0)
+
+#define IOMMU_DM_AUT_CTRL_REG(d) (0x0b0 + ((d) / 2) * 4)
+#define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2)))
+#define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2) + 1))
+
+#define IOMMU_DM_AUT_OVWT_REG 0x0d0
+#define IOMMU_INT_ENABLE_REG 0x100
+#define IOMMU_INT_CLR_REG 0x104
+#define IOMMU_INT_STA_REG 0x108
+#define IOMMU_INT_ERR_ADDR_REG(i) (0x110 + (i) * 4)
+#define IOMMU_INT_ERR_ADDR_L1_REG 0x130
+#define IOMMU_INT_ERR_ADDR_L2_REG 0x134
+#define IOMMU_INT_ERR_DATA_REG(i) (0x150 + (i) * 4)
+#define IOMMU_L1PG_INT_REG 0x0180
+#define IOMMU_L2PG_INT_REG 0x0184
+
+#define IOMMU_INT_INVALID_L2PG BIT(17)
+#define IOMMU_INT_INVALID_L1PG BIT(16)
+#define IOMMU_INT_MASTER_PERMISSION(m) BIT(m)
+#define IOMMU_INT_MASTER_MASK (IOMMU_INT_MASTER_PERMISSION(0) | \
+ IOMMU_INT_MASTER_PERMISSION(1) | \
+ IOMMU_INT_MASTER_PERMISSION(2) | \
+ IOMMU_INT_MASTER_PERMISSION(3) | \
+ IOMMU_INT_MASTER_PERMISSION(4) | \
+ IOMMU_INT_MASTER_PERMISSION(5))
+#define IOMMU_INT_MASK (IOMMU_INT_INVALID_L1PG | \
+ IOMMU_INT_INVALID_L2PG | \
+ IOMMU_INT_MASTER_MASK)
+
+#define PT_ENTRY_SIZE sizeof(u32)
+
+#define NUM_DT_ENTRIES 4096
+#define DT_SIZE (NUM_DT_ENTRIES * PT_ENTRY_SIZE)
+
+#define NUM_PT_ENTRIES 256
+#define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
+
+struct sun50i_iommu {
+ struct iommu_device iommu;
+
+ /* Lock to modify the IOMMU registers */
+ spinlock_t iommu_lock;
+
+ struct device *dev;
+ void __iomem *base;
+ struct reset_control *reset;
+ struct clk *clk;
+
+ struct iommu_domain *domain;
+ struct iommu_group *group;
+ struct kmem_cache *pt_pool;
+};
+
+struct sun50i_iommu_domain {
+ struct iommu_domain domain;
+
+ /* Number of devices attached to the domain */
+ refcount_t refcnt;
+
+ /* L1 Page Table */
+ u32 *dt;
+ dma_addr_t dt_dma;
+
+ struct sun50i_iommu *iommu;
+};
+
+static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain)
+{
+ return container_of(domain, struct sun50i_iommu_domain, domain);
+}
+
+static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev)
+{
+ return dev_iommu_priv_get(dev);
+}
+
+static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset)
+{
+ return readl(iommu->base + offset);
+}
+
+static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value)
+{
+ writel(value, iommu->base + offset);
+}
+
+/*
+ * The Allwinner H6 IOMMU uses a 2-level page table.
+ *
+ * The first level is the usual Directory Table (DT), that consists of
+ * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page
+ * Table (PT).
+ *
+ * Each PT consits of 256 4-bytes Page Table Entries (PTE), each
+ * pointing to a 4kB page of physical memory.
+ *
+ * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
+ * register that contains its physical address.
+ */
+
+#define SUN50I_IOVA_DTE_MASK GENMASK(31, 20)
+#define SUN50I_IOVA_PTE_MASK GENMASK(19, 12)
+#define SUN50I_IOVA_PAGE_MASK GENMASK(11, 0)
+
+static u32 sun50i_iova_get_dte_index(dma_addr_t iova)
+{
+ return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova);
+}
+
+static u32 sun50i_iova_get_pte_index(dma_addr_t iova)
+{
+ return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova);
+}
+
+static u32 sun50i_iova_get_page_offset(dma_addr_t iova)
+{
+ return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova);
+}
+
+/*
+ * Each Directory Table Entry has a Page Table address and a valid
+ * bit:
+
+ * +---------------------+-----------+-+
+ * | PT address | Reserved |V|
+ * +---------------------+-----------+-+
+ * 31:10 - Page Table address
+ * 9:2 - Reserved
+ * 1:0 - 1 if the entry is valid
+ */
+
+#define SUN50I_DTE_PT_ADDRESS_MASK GENMASK(31, 10)
+#define SUN50I_DTE_PT_ATTRS GENMASK(1, 0)
+#define SUN50I_DTE_PT_VALID 1
+
+static phys_addr_t sun50i_dte_get_pt_address(u32 dte)
+{
+ return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK;
+}
+
+static bool sun50i_dte_is_pt_valid(u32 dte)
+{
+ return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID;
+}
+
+static u32 sun50i_mk_dte(dma_addr_t pt_dma)
+{
+ return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID;
+}
+
+/*
+ * Each PTE has a Page address, an authority index and a valid bit:
+ *
+ * +----------------+-----+-----+-----+---+-----+
+ * | Page address | Rsv | ACI | Rsv | V | Rsv |
+ * +----------------+-----+-----+-----+---+-----+
+ * 31:12 - Page address
+ * 11:8 - Reserved
+ * 7:4 - Authority Control Index
+ * 3:2 - Reserved
+ * 1 - 1 if the entry is valid
+ * 0 - Reserved
+ *
+ * The way permissions work is that the IOMMU has 16 "domains" that
+ * can be configured to give each masters either read or write
+ * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain
+ * 0 seems like the default domain, and its permissions in the
+ * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really
+ * useful to enforce any particular permission.
+ *
+ * Each page entry will then have a reference to the domain they are
+ * affected to, so that we can actually enforce them on a per-page
+ * basis.
+ *
+ * In order to make it work with the IOMMU framework, we will be using
+ * 4 different domains, starting at 1: RD_WR, RD, WR and NONE
+ * depending on the permission we want to enforce. Each domain will
+ * have each master setup in the same way, since the IOMMU framework
+ * doesn't seem to restrict page access on a per-device basis. And
+ * then we will use the relevant domain index when generating the page
+ * table entry depending on the permissions we want to be enforced.
+ */
+
+enum sun50i_iommu_aci {
+ SUN50I_IOMMU_ACI_DO_NOT_USE = 0,
+ SUN50I_IOMMU_ACI_NONE,
+ SUN50I_IOMMU_ACI_RD,
+ SUN50I_IOMMU_ACI_WR,
+ SUN50I_IOMMU_ACI_RD_WR,
+};
+
+#define SUN50I_PTE_PAGE_ADDRESS_MASK GENMASK(31, 12)
+#define SUN50I_PTE_ACI_MASK GENMASK(7, 4)
+#define SUN50I_PTE_PAGE_VALID BIT(1)
+
+static phys_addr_t sun50i_pte_get_page_address(u32 pte)
+{
+ return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK;
+}
+
+static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte)
+{
+ return FIELD_GET(SUN50I_PTE_ACI_MASK, pte);
+}
+
+static bool sun50i_pte_is_page_valid(u32 pte)
+{
+ return pte & SUN50I_PTE_PAGE_VALID;
+}
+
+static u32 sun50i_mk_pte(phys_addr_t page, int prot)
+{
+ enum sun50i_iommu_aci aci;
+ u32 flags = 0;
+
+ if (prot & (IOMMU_READ | IOMMU_WRITE))
+ aci = SUN50I_IOMMU_ACI_RD_WR;
+ else if (prot & IOMMU_READ)
+ aci = SUN50I_IOMMU_ACI_RD;
+ else if (prot & IOMMU_WRITE)
+ aci = SUN50I_IOMMU_ACI_WR;
+ else
+ aci = SUN50I_IOMMU_ACI_NONE;
+
+ flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci);
+ page &= SUN50I_PTE_PAGE_ADDRESS_MASK;
+ return page | flags | SUN50I_PTE_PAGE_VALID;
+}
+
+static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
+ void *vaddr, unsigned int count)
+{
+ struct sun50i_iommu *iommu = sun50i_domain->iommu;
+ dma_addr_t dma = virt_to_phys(vaddr);
+ size_t size = count * PT_ENTRY_SIZE;
+
+ dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
+}
+
+static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
+{
+ u32 reg;
+ int ret;
+
+ assert_spin_locked(&iommu->iommu_lock);
+
+ iommu_write(iommu,
+ IOMMU_TLB_FLUSH_REG,
+ IOMMU_TLB_FLUSH_PTW_CACHE |
+ IOMMU_TLB_FLUSH_MACRO_TLB |
+ IOMMU_TLB_FLUSH_MICRO_TLB(5) |
+ IOMMU_TLB_FLUSH_MICRO_TLB(4) |
+ IOMMU_TLB_FLUSH_MICRO_TLB(3) |
+ IOMMU_TLB_FLUSH_MICRO_TLB(2) |
+ IOMMU_TLB_FLUSH_MICRO_TLB(1) |
+ IOMMU_TLB_FLUSH_MICRO_TLB(0));
+
+ ret = readl_poll_timeout(iommu->base + IOMMU_TLB_FLUSH_REG,
+ reg, !reg,
+ 1, 2000);
+ if (ret)
+ dev_warn(iommu->dev, "TLB Flush timed out!\n");
+
+ return ret;
+}
+
+static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ struct sun50i_iommu *iommu = sun50i_domain->iommu;
+ unsigned long flags;
+
+ /*
+ * At boot, we'll have a first call into .flush_iotlb_all right after
+ * .probe_device, and since we link our (single) domain to our iommu in
+ * the .attach_device callback, we don't have that pointer set.
+ *
+ * It shouldn't really be any trouble to ignore it though since we flush
+ * all caches as part of the device powerup.
+ */
+ if (!iommu)
+ return;
+
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
+ sun50i_iommu_flush_all_tlb(iommu);
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+}
+
+static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+{
+ sun50i_iommu_flush_iotlb_all(domain);
+}
+
+static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
+{
+ struct sun50i_iommu_domain *sun50i_domain;
+ unsigned long flags;
+ int ret;
+
+ if (!iommu->domain)
+ return 0;
+
+ sun50i_domain = to_sun50i_domain(iommu->domain);
+
+ ret = reset_control_deassert(iommu->reset);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(iommu->clk);
+ if (ret)
+ goto err_reset_assert;
+
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
+
+ iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma);
+ iommu_write(iommu, IOMMU_TLB_PREFETCH_REG,
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
+ iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
+ iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5));
+
+ iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD),
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5));
+
+ iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR),
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5));
+
+ ret = sun50i_iommu_flush_all_tlb(iommu);
+ if (ret) {
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+ goto err_clk_disable;
+ }
+
+ iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
+ iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE);
+
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(iommu->clk);
+
+err_reset_assert:
+ reset_control_assert(iommu->reset);
+
+ return ret;
+}
+
+static void sun50i_iommu_disable(struct sun50i_iommu *iommu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
+
+ iommu_write(iommu, IOMMU_ENABLE_REG, 0);
+ iommu_write(iommu, IOMMU_TTB_REG, 0);
+
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+
+ clk_disable_unprepare(iommu->clk);
+ reset_control_assert(iommu->reset);
+}
+
+static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu,
+ gfp_t gfp)
+{
+ dma_addr_t pt_dma;
+ u32 *page_table;
+
+ page_table = kmem_cache_zalloc(iommu->pt_pool, gfp);
+ if (!page_table)
+ return ERR_PTR(-ENOMEM);
+
+ pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(iommu->dev, pt_dma)) {
+ dev_err(iommu->dev, "Couldn't map L2 Page Table\n");
+ kmem_cache_free(iommu->pt_pool, page_table);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* We rely on the physical address and DMA address being the same */
+ WARN_ON(pt_dma != virt_to_phys(page_table));
+
+ return page_table;
+}
+
+static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu,
+ u32 *page_table)
+{
+ phys_addr_t pt_phys = virt_to_phys(page_table);
+
+ dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE);
+ kmem_cache_free(iommu->pt_pool, page_table);
+}
+
+static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
+ dma_addr_t iova, gfp_t gfp)
+{
+ struct sun50i_iommu *iommu = sun50i_domain->iommu;
+ u32 *page_table;
+ u32 *dte_addr;
+ u32 old_dte;
+ u32 dte;
+
+ dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
+ dte = *dte_addr;
+ if (sun50i_dte_is_pt_valid(dte)) {
+ phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte);
+ return (u32 *)phys_to_virt(pt_phys);
+ }
+
+ page_table = sun50i_iommu_alloc_page_table(iommu, gfp);
+ if (IS_ERR(page_table))
+ return page_table;
+
+ dte = sun50i_mk_dte(virt_to_phys(page_table));
+ old_dte = cmpxchg(dte_addr, 0, dte);
+ if (old_dte) {
+ phys_addr_t installed_pt_phys =
+ sun50i_dte_get_pt_address(old_dte);
+ u32 *installed_pt = phys_to_virt(installed_pt_phys);
+ u32 *drop_pt = page_table;
+
+ page_table = installed_pt;
+ dte = old_dte;
+ sun50i_iommu_free_page_table(iommu, drop_pt);
+ }
+
+ sun50i_table_flush(sun50i_domain, page_table, PT_SIZE);
+ sun50i_table_flush(sun50i_domain, dte_addr, 1);
+
+ return page_table;
+}
+
+static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ struct sun50i_iommu *iommu = sun50i_domain->iommu;
+ u32 pte_index;
+ u32 *page_table, *pte_addr;
+ int ret = 0;
+
+ page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp);
+ if (IS_ERR(page_table)) {
+ ret = PTR_ERR(page_table);
+ goto out;
+ }
+
+ pte_index = sun50i_iova_get_pte_index(iova);
+ pte_addr = &page_table[pte_index];
+ if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) {
+ phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr);
+ dev_err(iommu->dev,
+ "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n",
+ &iova, &page_phys, &paddr, prot);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ *pte_addr = sun50i_mk_pte(paddr, prot);
+ sun50i_table_flush(sun50i_domain, pte_addr, 1);
+
+out:
+ return ret;
+}
+
+static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ phys_addr_t pt_phys;
+ dma_addr_t pte_dma;
+ u32 *pte_addr;
+ u32 dte;
+
+ dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
+ if (!sun50i_dte_is_pt_valid(dte))
+ return 0;
+
+ pt_phys = sun50i_dte_get_pt_address(dte);
+ pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
+ pte_dma = pt_phys + sun50i_iova_get_pte_index(iova) * PT_ENTRY_SIZE;
+
+ if (!sun50i_pte_is_page_valid(*pte_addr))
+ return 0;
+
+ memset(pte_addr, 0, sizeof(*pte_addr));
+ sun50i_table_flush(sun50i_domain, pte_addr, 1);
+
+ return SZ_4K;
+}
+
+static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ phys_addr_t pt_phys;
+ u32 *page_table;
+ u32 dte, pte;
+
+ dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
+ if (!sun50i_dte_is_pt_valid(dte))
+ return 0;
+
+ pt_phys = sun50i_dte_get_pt_address(dte);
+ page_table = (u32 *)phys_to_virt(pt_phys);
+ pte = page_table[sun50i_iova_get_pte_index(iova)];
+ if (!sun50i_pte_is_page_valid(pte))
+ return 0;
+
+ return sun50i_pte_get_page_address(pte) +
+ sun50i_iova_get_page_offset(iova);
+}
+
+static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
+{
+ struct sun50i_iommu_domain *sun50i_domain;
+
+ if (type != IOMMU_DOMAIN_DMA &&
+ type != IOMMU_DOMAIN_IDENTITY &&
+ type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
+ if (!sun50i_domain)
+ return NULL;
+
+ if (type == IOMMU_DOMAIN_DMA &&
+ iommu_get_dma_cookie(&sun50i_domain->domain))
+ goto err_free_domain;
+
+ sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(DT_SIZE));
+ if (!sun50i_domain->dt)
+ goto err_put_cookie;
+
+ refcount_set(&sun50i_domain->refcnt, 1);
+
+ sun50i_domain->domain.geometry.aperture_start = 0;
+ sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
+ sun50i_domain->domain.geometry.force_aperture = true;
+
+ return &sun50i_domain->domain;
+
+err_put_cookie:
+ if (type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(&sun50i_domain->domain);
+
+err_free_domain:
+ kfree(sun50i_domain);
+
+ return NULL;
+}
+
+static void sun50i_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+
+ free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE));
+ sun50i_domain->dt = NULL;
+
+ iommu_put_dma_cookie(domain);
+
+ kfree(sun50i_domain);
+}
+
+static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu,
+ struct sun50i_iommu_domain *sun50i_domain)
+{
+ iommu->domain = &sun50i_domain->domain;
+ sun50i_domain->iommu = iommu;
+
+ sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt,
+ DT_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) {
+ dev_err(iommu->dev, "Couldn't map L1 Page Table\n");
+ return -ENOMEM;
+ }
+
+ return sun50i_iommu_enable(iommu);
+}
+
+static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
+ struct sun50i_iommu_domain *sun50i_domain)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_DT_ENTRIES; i++) {
+ phys_addr_t pt_phys;
+ u32 *page_table;
+ u32 *dte_addr;
+ u32 dte;
+
+ dte_addr = &sun50i_domain->dt[i];
+ dte = *dte_addr;
+ if (!sun50i_dte_is_pt_valid(dte))
+ continue;
+
+ memset(dte_addr, 0, sizeof(*dte_addr));
+ sun50i_table_flush(sun50i_domain, dte_addr, 1);
+
+ pt_phys = sun50i_dte_get_pt_address(dte);
+ page_table = phys_to_virt(pt_phys);
+ sun50i_iommu_free_page_table(iommu, page_table);
+ }
+
+
+ sun50i_iommu_disable(iommu);
+
+ dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt),
+ DT_SIZE, DMA_TO_DEVICE);
+
+ iommu->domain = NULL;
+}
+
+static void sun50i_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
+
+ dev_dbg(dev, "Detaching from IOMMU domain\n");
+
+ if (iommu->domain != domain)
+ return;
+
+ if (refcount_dec_and_test(&sun50i_domain->refcnt))
+ sun50i_iommu_detach_domain(iommu, sun50i_domain);
+}
+
+static int sun50i_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ struct sun50i_iommu *iommu;
+
+ iommu = sun50i_iommu_from_dev(dev);
+ if (!iommu)
+ return -ENODEV;
+
+ dev_dbg(dev, "Attaching to IOMMU domain\n");
+
+ refcount_inc(&sun50i_domain->refcnt);
+
+ if (iommu->domain == domain)
+ return 0;
+
+ if (iommu->domain)
+ sun50i_iommu_detach_device(iommu->domain, dev);
+
+ sun50i_iommu_attach_domain(iommu, sun50i_domain);
+
+ return 0;
+}
+
+static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
+{
+ struct sun50i_iommu *iommu;
+
+ iommu = sun50i_iommu_from_dev(dev);
+ if (!iommu)
+ return ERR_PTR(-ENODEV);
+
+ return &iommu->iommu;
+}
+
+static void sun50i_iommu_release_device(struct device *dev) {}
+
+static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
+{
+ struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
+
+ return iommu_group_ref_get(iommu->group);
+}
+
+static int sun50i_iommu_of_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
+ unsigned id = args->args[0];
+
+ dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev));
+
+ return iommu_fwspec_add_ids(dev, &id, 1);
+}
+
+static const struct iommu_ops sun50i_iommu_ops = {
+ .pgsize_bitmap = SZ_4K,
+ .attach_dev = sun50i_iommu_attach_device,
+ .detach_dev = sun50i_iommu_detach_device,
+ .device_group = sun50i_iommu_device_group,
+ .domain_alloc = sun50i_iommu_domain_alloc,
+ .domain_free = sun50i_iommu_domain_free,
+ .flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
+ .iotlb_sync = sun50i_iommu_iotlb_sync,
+ .iova_to_phys = sun50i_iommu_iova_to_phys,
+ .map = sun50i_iommu_map,
+ .of_xlate = sun50i_iommu_of_xlate,
+ .probe_device = sun50i_iommu_probe_device,
+ .release_device = sun50i_iommu_release_device,
+ .unmap = sun50i_iommu_unmap,
+};
+
+static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
+ unsigned master, phys_addr_t iova,
+ unsigned prot)
+{
+ dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n",
+ &iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd");
+
+ if (iommu->domain)
+ report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
+ else
+ dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
+}
+
+static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
+ unsigned addr_reg,
+ unsigned blame_reg)
+{
+ phys_addr_t iova;
+ unsigned master;
+ u32 blame;
+
+ assert_spin_locked(&iommu->iommu_lock);
+
+ iova = iommu_read(iommu, addr_reg);
+ blame = iommu_read(iommu, blame_reg);
+ master = ilog2(blame & IOMMU_INT_MASTER_MASK);
+
+ /*
+ * If the address is not in the page table, we can't get what
+ * operation triggered the fault. Assume it's a read
+ * operation.
+ */
+ sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ);
+
+ return iova;
+}
+
+static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
+{
+ enum sun50i_iommu_aci aci;
+ phys_addr_t iova;
+ unsigned master;
+ unsigned dir;
+ u32 blame;
+
+ assert_spin_locked(&iommu->iommu_lock);
+
+ blame = iommu_read(iommu, IOMMU_INT_STA_REG);
+ master = ilog2(blame & IOMMU_INT_MASTER_MASK);
+ iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master));
+ aci = sun50i_get_pte_aci(iommu_read(iommu,
+ IOMMU_INT_ERR_DATA_REG(master)));
+
+ switch (aci) {
+ /*
+ * If we are in the read-only domain, then it means we
+ * tried to write.
+ */
+ case SUN50I_IOMMU_ACI_RD:
+ dir = IOMMU_FAULT_WRITE;
+ break;
+
+ /*
+ * If we are in the write-only domain, then it means
+ * we tried to read.
+ */
+ case SUN50I_IOMMU_ACI_WR:
+
+ /*
+ * If we are in the domain without any permission, we
+ * can't really tell. Let's default to a read
+ * operation.
+ */
+ case SUN50I_IOMMU_ACI_NONE:
+
+ /* WTF? */
+ case SUN50I_IOMMU_ACI_RD_WR:
+ default:
+ dir = IOMMU_FAULT_READ;
+ break;
+ }
+
+ /*
+ * If the address is not in the page table, we can't get what
+ * operation triggered the fault. Assume it's a read
+ * operation.
+ */
+ sun50i_iommu_report_fault(iommu, master, iova, dir);
+
+ return iova;
+}
+
+static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
+{
+ struct sun50i_iommu *iommu = dev_id;
+ phys_addr_t iova;
+ u32 status;
+
+ spin_lock(&iommu->iommu_lock);
+
+ status = iommu_read(iommu, IOMMU_INT_STA_REG);
+ if (!(status & IOMMU_INT_MASK)) {
+ spin_unlock(&iommu->iommu_lock);
+ return IRQ_NONE;
+ }
+
+ if (status & IOMMU_INT_INVALID_L2PG)
+ iova = sun50i_iommu_handle_pt_irq(iommu,
+ IOMMU_INT_ERR_ADDR_L2_REG,
+ IOMMU_L2PG_INT_REG);
+ else if (status & IOMMU_INT_INVALID_L1PG)
+ iova = sun50i_iommu_handle_pt_irq(iommu,
+ IOMMU_INT_ERR_ADDR_L1_REG,
+ IOMMU_L1PG_INT_REG);
+ else
+ iova = sun50i_iommu_handle_perm_irq(iommu);
+
+ iommu_write(iommu, IOMMU_INT_CLR_REG, status);
+
+ iommu_write(iommu, IOMMU_RESET_REG, ~status);
+ iommu_write(iommu, IOMMU_RESET_REG, status);
+
+ spin_unlock(&iommu->iommu_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sun50i_iommu_probe(struct platform_device *pdev)
+{
+ struct sun50i_iommu *iommu;
+ int ret, irq;
+
+ iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return -ENOMEM;
+ spin_lock_init(&iommu->iommu_lock);
+ platform_set_drvdata(pdev, iommu);
+ iommu->dev = &pdev->dev;
+
+ iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev),
+ PT_SIZE, PT_SIZE,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!iommu->pt_pool)
+ return -ENOMEM;
+
+ iommu->group = iommu_group_alloc();
+ if (IS_ERR(iommu->group)) {
+ ret = PTR_ERR(iommu->group);
+ goto err_free_cache;
+ }
+
+ iommu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(iommu->base)) {
+ ret = PTR_ERR(iommu->base);
+ goto err_free_group;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_free_group;
+ }
+
+ iommu->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(iommu->clk)) {
+ dev_err(&pdev->dev, "Couldn't get our clock.\n");
+ ret = PTR_ERR(iommu->clk);
+ goto err_free_group;
+ }
+
+ iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(iommu->reset)) {
+ dev_err(&pdev->dev, "Couldn't get our reset line.\n");
+ ret = PTR_ERR(iommu->reset);
+ goto err_free_group;
+ }
+
+ ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
+ NULL, dev_name(&pdev->dev));
+ if (ret)
+ goto err_free_group;
+
+ iommu_device_set_ops(&iommu->iommu, &sun50i_iommu_ops);
+ iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
+
+ ret = iommu_device_register(&iommu->iommu);
+ if (ret)
+ goto err_remove_sysfs;
+
+ ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0,
+ dev_name(&pdev->dev), iommu);
+ if (ret < 0)
+ goto err_unregister;
+
+ bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops);
+
+ return 0;
+
+err_unregister:
+ iommu_device_unregister(&iommu->iommu);
+
+err_remove_sysfs:
+ iommu_device_sysfs_remove(&iommu->iommu);
+
+err_free_group:
+ iommu_group_put(iommu->group);
+
+err_free_cache:
+ kmem_cache_destroy(iommu->pt_pool);
+
+ return ret;
+}
+
+static const struct of_device_id sun50i_iommu_dt[] = {
+ { .compatible = "allwinner,sun50i-h6-iommu", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sun50i_iommu_dt);
+
+static struct platform_driver sun50i_iommu_driver = {
+ .driver = {
+ .name = "sun50i-iommu",
+ .of_match_table = sun50i_iommu_dt,
+ .suppress_bind_attrs = true,
+ }
+};
+builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe);
+
+MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
+MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
+MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index db6559e8336f..5fbdff6ff41a 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -243,28 +243,16 @@ static bool gart_iommu_capable(enum iommu_cap cap)
return false;
}
-static int gart_iommu_add_device(struct device *dev)
+static struct iommu_device *gart_iommu_probe_device(struct device *dev)
{
- struct iommu_group *group;
-
if (!dev_iommu_fwspec_get(dev))
- return -ENODEV;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
+ return ERR_PTR(-ENODEV);
- iommu_device_link(&gart_handle->iommu, dev);
-
- return 0;
+ return &gart_handle->iommu;
}
-static void gart_iommu_remove_device(struct device *dev)
+static void gart_iommu_release_device(struct device *dev)
{
- iommu_group_remove_device(dev);
- iommu_device_unlink(&gart_handle->iommu, dev);
}
static int gart_iommu_of_xlate(struct device *dev,
@@ -290,8 +278,8 @@ static const struct iommu_ops gart_iommu_ops = {
.domain_free = gart_iommu_domain_free,
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
- .add_device = gart_iommu_add_device,
- .remove_device = gart_iommu_remove_device,
+ .probe_device = gart_iommu_probe_device,
+ .release_device = gart_iommu_release_device,
.device_group = generic_device_group,
.map = gart_iommu_map,
.unmap = gart_iommu_unmap,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 63a147b623e6..7426b7666e2b 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -757,11 +757,10 @@ static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
return 0;
}
-static int tegra_smmu_add_device(struct device *dev)
+static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
{
struct device_node *np = dev->of_node;
struct tegra_smmu *smmu = NULL;
- struct iommu_group *group;
struct of_phandle_args args;
unsigned int index = 0;
int err;
@@ -774,7 +773,7 @@ static int tegra_smmu_add_device(struct device *dev)
of_node_put(args.np);
if (err < 0)
- return err;
+ return ERR_PTR(err);
/*
* Only a single IOMMU master interface is currently
@@ -783,8 +782,6 @@ static int tegra_smmu_add_device(struct device *dev)
*/
dev->archdata.iommu = smmu;
- iommu_device_link(&smmu->iommu, dev);
-
break;
}
@@ -793,26 +790,14 @@ static int tegra_smmu_add_device(struct device *dev)
}
if (!smmu)
- return -ENODEV;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
+ return ERR_PTR(-ENODEV);
- return 0;
+ return &smmu->iommu;
}
-static void tegra_smmu_remove_device(struct device *dev)
+static void tegra_smmu_release_device(struct device *dev)
{
- struct tegra_smmu *smmu = dev->archdata.iommu;
-
- if (smmu)
- iommu_device_unlink(&smmu->iommu, dev);
-
dev->archdata.iommu = NULL;
- iommu_group_remove_device(dev);
}
static const struct tegra_smmu_group_soc *
@@ -895,8 +880,8 @@ static const struct iommu_ops tegra_smmu_ops = {
.domain_free = tegra_smmu_domain_free,
.attach_dev = tegra_smmu_attach_dev,
.detach_dev = tegra_smmu_detach_dev,
- .add_device = tegra_smmu_add_device,
- .remove_device = tegra_smmu_remove_device,
+ .probe_device = tegra_smmu_probe_device,
+ .release_device = tegra_smmu_release_device,
.device_group = tegra_smmu_device_group,
.map = tegra_smmu_map,
.unmap = tegra_smmu_unmap,
@@ -1015,7 +1000,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
* value. However the IOMMU registration process will attempt to add
* all devices to the IOMMU when bus_set_iommu() is called. In order
* not to rely on global variables to track the IOMMU instance, we
- * set it here so that it can be looked up from the .add_device()
+ * set it here so that it can be looked up from the .probe_device()
* callback via the IOMMU device's .drvdata field.
*/
mc->smmu = smmu;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 4e1d11af23c8..f6f07489a9aa 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -865,24 +865,23 @@ static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
return dev ? dev_to_virtio(dev)->priv : NULL;
}
-static int viommu_add_device(struct device *dev)
+static struct iommu_device *viommu_probe_device(struct device *dev)
{
int ret;
- struct iommu_group *group;
struct viommu_endpoint *vdev;
struct viommu_dev *viommu = NULL;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
if (!fwspec || fwspec->ops != &viommu_ops)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
if (!viommu)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
vdev->dev = dev;
vdev->viommu = viommu;
@@ -896,45 +895,25 @@ static int viommu_add_device(struct device *dev)
goto err_free_dev;
}
- ret = iommu_device_link(&viommu->iommu, dev);
- if (ret)
- goto err_free_dev;
+ return &viommu->iommu;
- /*
- * Last step creates a default domain and attaches to it. Everything
- * must be ready.
- */
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto err_unlink_dev;
- }
-
- iommu_group_put(group);
-
- return PTR_ERR_OR_ZERO(group);
-
-err_unlink_dev:
- iommu_device_unlink(&viommu->iommu, dev);
err_free_dev:
generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
- return ret;
+ return ERR_PTR(ret);
}
-static void viommu_remove_device(struct device *dev)
+static void viommu_release_device(struct device *dev)
{
- struct viommu_endpoint *vdev;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct viommu_endpoint *vdev;
if (!fwspec || fwspec->ops != &viommu_ops)
return;
vdev = dev_iommu_priv_get(dev);
- iommu_group_remove_device(dev);
- iommu_device_unlink(&vdev->viommu->iommu, dev);
generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
}
@@ -960,8 +939,8 @@ static struct iommu_ops viommu_ops = {
.unmap = viommu_unmap,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
- .add_device = viommu_add_device,
- .remove_device = viommu_remove_device,
+ .probe_device = viommu_probe_device,
+ .release_device = viommu_release_device,
.device_group = viommu_device_group,
.get_resv_regions = viommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index a85aada04a64..29fead208cad 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -493,6 +493,19 @@ config TI_SCI_INTA_IRQCHIP
If you wish to use interrupt aggregator irq resources managed by the
TI System Controller, say Y here. Otherwise, say N.
+config RISCV_INTC
+ bool "RISC-V Local Interrupt Controller"
+ depends on RISCV
+ default y
+ help
+ This enables support for the per-HART local interrupt controller
+ found in standard RISC-V systems. The per-HART local interrupt
+ controller handles timer interrupts, software interrupts, and
+ hardware interrupts. Without a per-HART local interrupt controller,
+ a RISC-V system will be unable to handle any interrupts.
+
+ If you don't know what to do here, say Y.
+
config SIFIVE_PLIC
bool "SiFive Platform-Level Interrupt Controller"
depends on RISCV
@@ -532,4 +545,31 @@ config LOONGSON_HTPIC
help
Support for the Loongson-3 HyperTransport PIC Controller.
+config LOONGSON_HTVEC
+ bool "Loongson3 HyperTransport Interrupt Vector Controller"
+ depends on MACH_LOONGSON64
+ default MACH_LOONGSON64
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support for the Loongson3 HyperTransport Interrupt Vector Controller.
+
+config LOONGSON_PCH_PIC
+ bool "Loongson PCH PIC Controller"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ default MACH_LOONGSON64
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
+ help
+ Support for the Loongson PCH PIC Controller.
+
+config LOONGSON_PCH_MSI
+ bool "Loongson PCH PIC Controller"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ depends on PCI
+ default MACH_LOONGSON64
+ select IRQ_DOMAIN_HIERARCHY
+ select PCI_MSI
+ help
+ Support for the Loongson PCH MSI Controller.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 37bbe39bf909..133f9c45744a 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -98,6 +98,7 @@ obj-$(CONFIG_NDS32) += irq-ativic32.o
obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o
obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
+obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
@@ -107,3 +108,6 @@ obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o
obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o
+obj-$(CONFIG_LOONGSON_HTVEC) += irq-loongson-htvec.o
+obj-$(CONFIG_LOONGSON_PCH_PIC) += irq-loongson-pch-pic.o
+obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 124251b0ccba..cd685f521c77 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -174,6 +174,13 @@ static struct {
int next_victim;
} vpe_proxy;
+struct cpu_lpi_count {
+ atomic_t managed;
+ atomic_t unmanaged;
+};
+
+static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
+
static LIST_HEAD(its_nodes);
static DEFINE_RAW_SPINLOCK(its_lock);
static struct rdists *gic_rdists;
@@ -1510,42 +1517,159 @@ static void its_unmask_irq(struct irq_data *d)
lpi_update_config(d, 0, LPI_PROP_ENABLED);
}
+static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
+{
+ if (irqd_affinity_is_managed(d))
+ return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
+
+ return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
+}
+
+static void its_inc_lpi_count(struct irq_data *d, int cpu)
+{
+ if (irqd_affinity_is_managed(d))
+ atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
+ else
+ atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
+}
+
+static void its_dec_lpi_count(struct irq_data *d, int cpu)
+{
+ if (irqd_affinity_is_managed(d))
+ atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
+ else
+ atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
+}
+
+static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
+ const struct cpumask *cpu_mask)
+{
+ unsigned int cpu = nr_cpu_ids, tmp;
+ int count = S32_MAX;
+
+ for_each_cpu(tmp, cpu_mask) {
+ int this_count = its_read_lpi_count(d, tmp);
+ if (this_count < count) {
+ cpu = tmp;
+ count = this_count;
+ }
+ }
+
+ return cpu;
+}
+
+/*
+ * As suggested by Thomas Gleixner in:
+ * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
+ */
+static int its_select_cpu(struct irq_data *d,
+ const struct cpumask *aff_mask)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ cpumask_var_t tmpmask;
+ int cpu, node;
+
+ if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
+ return -ENOMEM;
+
+ node = its_dev->its->numa_node;
+
+ if (!irqd_affinity_is_managed(d)) {
+ /* First try the NUMA node */
+ if (node != NUMA_NO_NODE) {
+ /*
+ * Try the intersection of the affinity mask and the
+ * node mask (and the online mask, just to be safe).
+ */
+ cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
+ cpumask_and(tmpmask, tmpmask, cpu_online_mask);
+
+ /*
+ * Ideally, we would check if the mask is empty, and
+ * try again on the full node here.
+ *
+ * But it turns out that the way ACPI describes the
+ * affinity for ITSs only deals about memory, and
+ * not target CPUs, so it cannot describe a single
+ * ITS placed next to two NUMA nodes.
+ *
+ * Instead, just fallback on the online mask. This
+ * diverges from Thomas' suggestion above.
+ */
+ cpu = cpumask_pick_least_loaded(d, tmpmask);
+ if (cpu < nr_cpu_ids)
+ goto out;
+
+ /* If we can't cross sockets, give up */
+ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
+ goto out;
+
+ /* If the above failed, expand the search */
+ }
+
+ /* Try the intersection of the affinity and online masks */
+ cpumask_and(tmpmask, aff_mask, cpu_online_mask);
+
+ /* If that doesn't fly, the online mask is the last resort */
+ if (cpumask_empty(tmpmask))
+ cpumask_copy(tmpmask, cpu_online_mask);
+
+ cpu = cpumask_pick_least_loaded(d, tmpmask);
+ } else {
+ cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask);
+
+ /* If we cannot cross sockets, limit the search to that node */
+ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
+ node != NUMA_NO_NODE)
+ cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
+
+ cpu = cpumask_pick_least_loaded(d, tmpmask);
+ }
+out:
+ free_cpumask_var(tmpmask);
+
+ pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
+ return cpu;
+}
+
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- unsigned int cpu;
- const struct cpumask *cpu_mask = cpu_online_mask;
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_collection *target_col;
u32 id = its_get_event_id(d);
+ int cpu, prev_cpu;
/* A forwarded interrupt should use irq_set_vcpu_affinity */
if (irqd_is_forwarded_to_vcpu(d))
return -EINVAL;
- /* lpi cannot be routed to a redistributor that is on a foreign node */
- if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
- if (its_dev->its->numa_node >= 0) {
- cpu_mask = cpumask_of_node(its_dev->its->numa_node);
- if (!cpumask_intersects(mask_val, cpu_mask))
- return -EINVAL;
- }
- }
+ prev_cpu = its_dev->event_map.col_map[id];
+ its_dec_lpi_count(d, prev_cpu);
- cpu = cpumask_any_and(mask_val, cpu_mask);
+ if (!force)
+ cpu = its_select_cpu(d, mask_val);
+ else
+ cpu = cpumask_pick_least_loaded(d, mask_val);
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
+ if (cpu < 0 || cpu >= nr_cpu_ids)
+ goto err;
/* don't set the affinity when the target cpu is same as current one */
- if (cpu != its_dev->event_map.col_map[id]) {
+ if (cpu != prev_cpu) {
target_col = &its_dev->its->collections[cpu];
its_send_movi(its_dev, target_col, id);
its_dev->event_map.col_map[id] = cpu;
irq_data_update_effective_affinity(d, cpumask_of(cpu));
}
+ its_inc_lpi_count(d, cpu);
+
return IRQ_SET_MASK_OK_DONE;
+
+err:
+ its_inc_lpi_count(d, prev_cpu);
+ return -EINVAL;
}
static u64 its_irq_get_msi_base(struct its_device *its_dev)
@@ -3432,22 +3556,13 @@ static int its_irq_domain_activate(struct irq_domain *domain,
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
- const struct cpumask *cpu_mask = cpu_online_mask;
int cpu;
- /* get the cpu_mask of local node */
- if (its_dev->its->numa_node >= 0)
- cpu_mask = cpumask_of_node(its_dev->its->numa_node);
-
- /* Bind the LPI to the first possible CPU */
- cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
- if (cpu >= nr_cpu_ids) {
- if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
- return -EINVAL;
-
- cpu = cpumask_first(cpu_online_mask);
- }
+ cpu = its_select_cpu(d, cpu_online_mask);
+ if (cpu < 0 || cpu >= nr_cpu_ids)
+ return -EINVAL;
+ its_inc_lpi_count(d, cpu);
its_dev->event_map.col_map[event] = cpu;
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -3462,6 +3577,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain,
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
/* Stop the delivery of interrupts */
its_send_discard(its_dev, event);
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index d7006ef18a0d..cc46bc2d634b 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1150,7 +1150,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
isb();
}
-static void gic_smp_init(void)
+static void __init gic_smp_init(void)
{
set_smp_cross_call(gic_raise_softirq);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
@@ -1282,7 +1282,6 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
break;
case SPI_RANGE:
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 30ab623343d3..00de05abd3c3 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -982,7 +982,6 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
} else {
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
new file mode 100644
index 000000000000..1ece9337c78d
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson HyperTransport Interrupt Vector support
+ */
+
+#define pr_fmt(fmt) "htvec: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+/* Registers */
+#define HTVEC_EN_OFF 0x20
+#define HTVEC_MAX_PARENT_IRQ 4
+
+#define VEC_COUNT_PER_REG 32
+#define VEC_REG_COUNT 4
+#define VEC_COUNT (VEC_COUNT_PER_REG * VEC_REG_COUNT)
+#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
+#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
+
+struct htvec {
+ void __iomem *base;
+ struct irq_domain *htvec_domain;
+ raw_spinlock_t htvec_lock;
+};
+
+static void htvec_irq_dispatch(struct irq_desc *desc)
+{
+ int i;
+ u32 pending;
+ bool handled = false;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct htvec *priv = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < VEC_REG_COUNT; i++) {
+ pending = readl(priv->base + 4 * i);
+ while (pending) {
+ int bit = __ffs(pending);
+
+ generic_handle_irq(irq_linear_revmap(priv->htvec_domain, bit +
+ VEC_COUNT_PER_REG * i));
+ pending &= ~BIT(bit);
+ handled = true;
+ }
+ }
+
+ if (!handled)
+ spurious_interrupt();
+
+ chained_irq_exit(chip, desc);
+}
+
+static void htvec_ack_irq(struct irq_data *d)
+{
+ struct htvec *priv = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(VEC_REG_BIT(d->hwirq)),
+ priv->base + VEC_REG_IDX(d->hwirq) * 4);
+}
+
+static void htvec_mask_irq(struct irq_data *d)
+{
+ u32 reg;
+ void __iomem *addr;
+ struct htvec *priv = irq_data_get_irq_chip_data(d);
+
+ raw_spin_lock(&priv->htvec_lock);
+ addr = priv->base + HTVEC_EN_OFF;
+ addr += VEC_REG_IDX(d->hwirq) * 4;
+ reg = readl(addr);
+ reg &= ~BIT(VEC_REG_BIT(d->hwirq));
+ writel(reg, addr);
+ raw_spin_unlock(&priv->htvec_lock);
+}
+
+static void htvec_unmask_irq(struct irq_data *d)
+{
+ u32 reg;
+ void __iomem *addr;
+ struct htvec *priv = irq_data_get_irq_chip_data(d);
+
+ raw_spin_lock(&priv->htvec_lock);
+ addr = priv->base + HTVEC_EN_OFF;
+ addr += VEC_REG_IDX(d->hwirq) * 4;
+ reg = readl(addr);
+ reg |= BIT(VEC_REG_BIT(d->hwirq));
+ writel(reg, addr);
+ raw_spin_unlock(&priv->htvec_lock);
+}
+
+static struct irq_chip htvec_irq_chip = {
+ .name = "LOONGSON_HTVEC",
+ .irq_mask = htvec_mask_irq,
+ .irq_unmask = htvec_unmask_irq,
+ .irq_ack = htvec_ack_irq,
+};
+
+static int htvec_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ unsigned long hwirq;
+ unsigned int type, i;
+ struct htvec *priv = domain->host_data;
+
+ irq_domain_translate_onecell(domain, arg, &hwirq, &type);
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i, &htvec_irq_chip,
+ priv, handle_edge_irq, NULL, NULL);
+ }
+
+ return 0;
+}
+
+static void htvec_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops htvec_domain_ops = {
+ .translate = irq_domain_translate_onecell,
+ .alloc = htvec_domain_alloc,
+ .free = htvec_domain_free,
+};
+
+static void htvec_reset(struct htvec *priv)
+{
+ u32 idx;
+
+ /* Clear IRQ cause registers, mask all interrupts */
+ for (idx = 0; idx < VEC_REG_COUNT; idx++) {
+ writel_relaxed(0x0, priv->base + HTVEC_EN_OFF + 4 * idx);
+ writel_relaxed(0xFFFFFFFF, priv->base);
+ }
+}
+
+static int htvec_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct htvec *priv;
+ int err, parent_irq[4], num_parents = 0, i;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&priv->htvec_lock);
+ priv->base = of_iomap(node, 0);
+ if (!priv->base) {
+ err = -ENOMEM;
+ goto free_priv;
+ }
+
+ /* Interrupt may come from any of the 4 interrupt line */
+ for (i = 0; i < HTVEC_MAX_PARENT_IRQ; i++) {
+ parent_irq[i] = irq_of_parse_and_map(node, i);
+ if (parent_irq[i] <= 0)
+ break;
+
+ num_parents++;
+ }
+
+ if (!num_parents) {
+ pr_err("Failed to get parent irqs\n");
+ err = -ENODEV;
+ goto iounmap_base;
+ }
+
+ priv->htvec_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ VEC_COUNT,
+ &htvec_domain_ops,
+ priv);
+ if (!priv->htvec_domain) {
+ pr_err("Failed to create IRQ domain\n");
+ err = -ENOMEM;
+ goto iounmap_base;
+ }
+
+ htvec_reset(priv);
+
+ for (i = 0; i < num_parents; i++)
+ irq_set_chained_handler_and_data(parent_irq[i],
+ htvec_irq_dispatch, priv);
+
+ return 0;
+
+iounmap_base:
+ iounmap(priv->base);
+free_priv:
+ kfree(priv);
+
+ return err;
+}
+
+IRQCHIP_DECLARE(htvec, "loongson,htvec-1.0", htvec_of_init);
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
new file mode 100644
index 000000000000..50becd21008c
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson PCH MSI support
+ */
+
+#define pr_fmt(fmt) "pch-msi: " fmt
+
+#include <linux/irqchip.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+struct pch_msi_data {
+ struct mutex msi_map_lock;
+ phys_addr_t doorbell;
+ u32 irq_first; /* The vector number that MSIs starts */
+ u32 num_irqs; /* The number of vectors for MSIs */
+ unsigned long *msi_map;
+};
+
+static void pch_msi_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void pch_msi_unmask_msi_irq(struct irq_data *d)
+{
+ irq_chip_unmask_parent(d);
+ pci_msi_unmask_irq(d);
+}
+
+static struct irq_chip pch_msi_irq_chip = {
+ .name = "PCH PCI MSI",
+ .irq_mask = pch_msi_mask_msi_irq,
+ .irq_unmask = pch_msi_unmask_msi_irq,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static int pch_msi_allocate_hwirq(struct pch_msi_data *priv, int num_req)
+{
+ int first;
+
+ mutex_lock(&priv->msi_map_lock);
+
+ first = bitmap_find_free_region(priv->msi_map, priv->num_irqs,
+ get_count_order(num_req));
+ if (first < 0) {
+ mutex_unlock(&priv->msi_map_lock);
+ return -ENOSPC;
+ }
+
+ mutex_unlock(&priv->msi_map_lock);
+
+ return priv->irq_first + first;
+}
+
+static void pch_msi_free_hwirq(struct pch_msi_data *priv,
+ int hwirq, int num_req)
+{
+ int first = hwirq - priv->irq_first;
+
+ mutex_lock(&priv->msi_map_lock);
+ bitmap_release_region(priv->msi_map, first, get_count_order(num_req));
+ mutex_unlock(&priv->msi_map_lock);
+}
+
+static void pch_msi_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct pch_msi_data *priv = irq_data_get_irq_chip_data(data);
+
+ msg->address_hi = upper_32_bits(priv->doorbell);
+ msg->address_lo = lower_32_bits(priv->doorbell);
+ msg->data = data->hwirq;
+}
+
+static struct msi_domain_info pch_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ .chip = &pch_msi_irq_chip,
+};
+
+static struct irq_chip middle_irq_chip = {
+ .name = "PCH MSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_compose_msi_msg = pch_msi_compose_msi_msg,
+};
+
+static int pch_msi_parent_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, int hwirq)
+{
+ struct irq_fwspec fwspec;
+ int ret;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int pch_msi_middle_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct pch_msi_data *priv = domain->host_data;
+ int hwirq, err, i;
+
+ hwirq = pch_msi_allocate_hwirq(priv, nr_irqs);
+ if (hwirq < 0)
+ return hwirq;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = pch_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
+ goto err_hwirq;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &middle_irq_chip, priv);
+ }
+
+ return 0;
+
+err_hwirq:
+ pch_msi_free_hwirq(priv, hwirq, nr_irqs);
+ irq_domain_free_irqs_parent(domain, virq, i - 1);
+
+ return err;
+}
+
+static void pch_msi_middle_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct pch_msi_data *priv = irq_data_get_irq_chip_data(d);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ pch_msi_free_hwirq(priv, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops pch_msi_middle_domain_ops = {
+ .alloc = pch_msi_middle_domain_alloc,
+ .free = pch_msi_middle_domain_free,
+};
+
+static int pch_msi_init_domains(struct pch_msi_data *priv,
+ struct device_node *node,
+ struct irq_domain *parent)
+{
+ struct irq_domain *middle_domain, *msi_domain;
+
+ middle_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ priv->num_irqs,
+ &pch_msi_middle_domain_ops,
+ priv);
+ if (!middle_domain) {
+ pr_err("Failed to create the MSI middle domain\n");
+ return -ENOMEM;
+ }
+
+ middle_domain->parent = parent;
+ irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
+
+ msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ &pch_msi_domain_info,
+ middle_domain);
+ if (!msi_domain) {
+ pr_err("Failed to create PCI MSI domain\n");
+ irq_domain_remove(middle_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int pch_msi_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct pch_msi_data *priv;
+ struct irq_domain *parent_domain;
+ struct resource res;
+ int ret;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("Failed to find the parent domain\n");
+ return -ENXIO;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->msi_map_lock);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ pr_err("Failed to allocate resource\n");
+ goto err_priv;
+ }
+
+ priv->doorbell = res.start;
+
+ if (of_property_read_u32(node, "loongson,msi-base-vec",
+ &priv->irq_first)) {
+ pr_err("Unable to parse MSI vec base\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ if (of_property_read_u32(node, "loongson,msi-num-vecs",
+ &priv->num_irqs)) {
+ pr_err("Unable to parse MSI vec number\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ priv->msi_map = bitmap_alloc(priv->num_irqs, GFP_KERNEL);
+ if (!priv->msi_map) {
+ ret = -ENOMEM;
+ goto err_priv;
+ }
+
+ pr_debug("Registering %d MSIs, starting at %d\n",
+ priv->num_irqs, priv->irq_first);
+
+ ret = pch_msi_init_domains(priv, node, parent_domain);
+ if (ret)
+ goto err_map;
+
+ return 0;
+
+err_map:
+ kfree(priv->msi_map);
+err_priv:
+ kfree(priv);
+ return ret;
+}
+
+IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_init);
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
new file mode 100644
index 000000000000..2a05b9305012
--- /dev/null
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson PCH PIC support
+ */
+
+#define pr_fmt(fmt) "pch-pic: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+/* Registers */
+#define PCH_PIC_MASK 0x20
+#define PCH_PIC_HTMSI_EN 0x40
+#define PCH_PIC_EDGE 0x60
+#define PCH_PIC_CLR 0x80
+#define PCH_PIC_AUTO0 0xc0
+#define PCH_PIC_AUTO1 0xe0
+#define PCH_INT_ROUTE(irq) (0x100 + irq)
+#define PCH_INT_HTVEC(irq) (0x200 + irq)
+#define PCH_PIC_POL 0x3e0
+
+#define PIC_COUNT_PER_REG 32
+#define PIC_REG_COUNT 2
+#define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT)
+#define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG)
+#define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG)
+
+struct pch_pic {
+ void __iomem *base;
+ struct irq_domain *pic_domain;
+ u32 ht_vec_base;
+ raw_spinlock_t pic_lock;
+};
+
+static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
+{
+ u32 reg;
+ void __iomem *addr = priv->base + offset + PIC_REG_IDX(bit) * 4;
+
+ raw_spin_lock(&priv->pic_lock);
+ reg = readl(addr);
+ reg |= BIT(PIC_REG_BIT(bit));
+ writel(reg, addr);
+ raw_spin_unlock(&priv->pic_lock);
+}
+
+static void pch_pic_bitclr(struct pch_pic *priv, int offset, int bit)
+{
+ u32 reg;
+ void __iomem *addr = priv->base + offset + PIC_REG_IDX(bit) * 4;
+
+ raw_spin_lock(&priv->pic_lock);
+ reg = readl(addr);
+ reg &= ~BIT(PIC_REG_BIT(bit));
+ writel(reg, addr);
+ raw_spin_unlock(&priv->pic_lock);
+}
+
+static void pch_pic_eoi_irq(struct irq_data *d)
+{
+ u32 idx = PIC_REG_IDX(d->hwirq);
+ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(PIC_REG_BIT(d->hwirq)),
+ priv->base + PCH_PIC_CLR + idx * 4);
+}
+
+static void pch_pic_mask_irq(struct irq_data *d)
+{
+ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+
+ pch_pic_bitset(priv, PCH_PIC_MASK, d->hwirq);
+ irq_chip_mask_parent(d);
+}
+
+static void pch_pic_unmask_irq(struct irq_data *d)
+{
+ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+
+ irq_chip_unmask_parent(d);
+ pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq);
+}
+
+static int pch_pic_set_type(struct irq_data *d, unsigned int type)
+{
+ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ int ret = 0;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct irq_chip pch_pic_irq_chip = {
+ .name = "PCH PIC",
+ .irq_mask = pch_pic_mask_irq,
+ .irq_unmask = pch_pic_unmask_irq,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_eoi = pch_pic_eoi_irq,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = pch_pic_set_type,
+};
+
+static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int err;
+ unsigned int type;
+ unsigned long hwirq;
+ struct irq_fwspec fwspec;
+ struct pch_pic *priv = domain->host_data;
+
+ irq_domain_translate_twocell(domain, arg, &hwirq, &type);
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq + priv->ht_vec_base;
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, hwirq,
+ &pch_pic_irq_chip, priv,
+ handle_fasteoi_ack_irq, NULL, NULL);
+ irq_set_probe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops pch_pic_domain_ops = {
+ .translate = irq_domain_translate_twocell,
+ .alloc = pch_pic_alloc,
+ .free = irq_domain_free_irqs_parent,
+};
+
+static void pch_pic_reset(struct pch_pic *priv)
+{
+ int i;
+
+ for (i = 0; i < PIC_COUNT; i++) {
+ /* Write vectore ID */
+ writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i));
+ /* Hardcode route to HT0 Lo */
+ writeb(1, priv->base + PCH_INT_ROUTE(i));
+ }
+
+ for (i = 0; i < PIC_REG_COUNT; i++) {
+ /* Clear IRQ cause registers, mask all interrupts */
+ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_MASK + 4 * i);
+ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_CLR + 4 * i);
+ /* Clear auto bounce, we don't need that */
+ writel_relaxed(0, priv->base + PCH_PIC_AUTO0 + 4 * i);
+ writel_relaxed(0, priv->base + PCH_PIC_AUTO1 + 4 * i);
+ /* Enable HTMSI transformer */
+ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_HTMSI_EN + 4 * i);
+ }
+}
+
+static int pch_pic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct pch_pic *priv;
+ struct irq_domain *parent_domain;
+ int err;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&priv->pic_lock);
+ priv->base = of_iomap(node, 0);
+ if (!priv->base) {
+ err = -ENOMEM;
+ goto free_priv;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("Failed to find the parent domain\n");
+ err = -ENXIO;
+ goto iounmap_base;
+ }
+
+ if (of_property_read_u32(node, "loongson,pic-base-vec",
+ &priv->ht_vec_base)) {
+ pr_err("Failed to determine pic-base-vec\n");
+ err = -EINVAL;
+ goto iounmap_base;
+ }
+
+ priv->pic_domain = irq_domain_create_hierarchy(parent_domain, 0,
+ PIC_COUNT,
+ of_node_to_fwnode(node),
+ &pch_pic_domain_ops,
+ priv);
+ if (!priv->pic_domain) {
+ pr_err("Failed to create IRQ domain\n");
+ err = -ENOMEM;
+ goto iounmap_base;
+ }
+
+ pch_pic_reset(priv);
+
+ return 0;
+
+iounmap_base:
+ iounmap(priv->base);
+free_priv:
+ kfree(priv);
+
+ return err;
+}
+
+IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init);
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
new file mode 100644
index 000000000000..a6f97fa6ff69
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017-2018 SiFive
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#define pr_fmt(fmt) "riscv-intc: " fmt
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/cpu.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+
+static struct irq_domain *intc_domain;
+
+static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
+{
+ unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
+
+ if (unlikely(cause >= BITS_PER_LONG))
+ panic("unexpected interrupt cause");
+
+ switch (cause) {
+#ifdef CONFIG_SMP
+ case RV_IRQ_SOFT:
+ /*
+ * We only use software interrupts to pass IPIs, so if a
+ * non-SMP system gets one, then we don't know what to do.
+ */
+ handle_IPI(regs);
+ break;
+#endif
+ default:
+ handle_domain_irq(intc_domain, cause, regs);
+ break;
+ }
+}
+
+/*
+ * On RISC-V systems local interrupts are masked or unmasked by writing
+ * the SIE (Supervisor Interrupt Enable) CSR. As CSRs can only be written
+ * on the local hart, these functions can only be called on the hart that
+ * corresponds to the IRQ chip.
+ */
+
+static void riscv_intc_irq_mask(struct irq_data *d)
+{
+ csr_clear(CSR_IE, BIT(d->hwirq));
+}
+
+static void riscv_intc_irq_unmask(struct irq_data *d)
+{
+ csr_set(CSR_IE, BIT(d->hwirq));
+}
+
+static int riscv_intc_cpu_starting(unsigned int cpu)
+{
+ csr_set(CSR_IE, BIT(RV_IRQ_SOFT));
+ return 0;
+}
+
+static int riscv_intc_cpu_dying(unsigned int cpu)
+{
+ csr_clear(CSR_IE, BIT(RV_IRQ_SOFT));
+ return 0;
+}
+
+static struct irq_chip riscv_intc_chip = {
+ .name = "RISC-V INTC",
+ .irq_mask = riscv_intc_irq_mask,
+ .irq_unmask = riscv_intc_irq_unmask,
+};
+
+static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_percpu_devid(irq);
+ irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops riscv_intc_domain_ops = {
+ .map = riscv_intc_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init riscv_intc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int rc, hartid;
+
+ hartid = riscv_of_parent_hartid(node);
+ if (hartid < 0) {
+ pr_warn("unable to fine hart id for %pOF\n", node);
+ return 0;
+ }
+
+ /*
+ * The DT will have one INTC DT node under each CPU (or HART)
+ * DT node so riscv_intc_init() function will be called once
+ * for each INTC DT node. We only need to do INTC initialization
+ * for the INTC DT node belonging to boot CPU (or boot HART).
+ */
+ if (riscv_hartid_to_cpuid(hartid) != smp_processor_id())
+ return 0;
+
+ intc_domain = irq_domain_add_linear(node, BITS_PER_LONG,
+ &riscv_intc_domain_ops, NULL);
+ if (!intc_domain) {
+ pr_err("unable to add IRQ domain\n");
+ return -ENXIO;
+ }
+
+ rc = set_handle_irq(&riscv_intc_irq);
+ if (rc) {
+ pr_err("failed to set irq handler\n");
+ return rc;
+ }
+
+ cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_STARTING,
+ "irqchip/riscv/intc:starting",
+ riscv_intc_cpu_starting,
+ riscv_intc_cpu_dying);
+
+ pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index d0a71febdadc..eaa3e9fe54e9 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -9,6 +9,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -76,6 +77,8 @@ struct plic_handler {
void __iomem *enable_base;
struct plic_priv *priv;
};
+static int plic_parent_irq;
+static bool plic_cpuhp_setup_done;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
static inline void plic_toggle(struct plic_handler *handler,
@@ -176,9 +179,12 @@ static struct irq_chip plic_chip = {
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
+ struct plic_priv *priv = d->host_data;
+
irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
irq_set_noprobe(irq);
+ irq_set_affinity(irq, &priv->lmask);
return 0;
}
@@ -215,15 +221,17 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
* that source ID back to the same claim register. This automatically enables
* and disables the interrupt, so there's nothing else to do.
*/
-static void plic_handle_irq(struct pt_regs *regs)
+static void plic_handle_irq(struct irq_desc *desc)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
irq_hw_number_t hwirq;
WARN_ON_ONCE(!handler->present);
- csr_clear(CSR_IE, IE_EIE);
+ chained_irq_enter(chip, desc);
+
while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
@@ -233,21 +241,8 @@ static void plic_handle_irq(struct pt_regs *regs)
else
generic_handle_irq(irq);
}
- csr_set(CSR_IE, IE_EIE);
-}
-/*
- * Walk up the DT tree until we find an active RISC-V core (HART) node and
- * extract the cpuid from it.
- */
-static int plic_find_hart_id(struct device_node *node)
-{
- for (; node; node = node->parent) {
- if (of_device_is_compatible(node, "riscv"))
- return riscv_of_processor_hartid(node);
- }
-
- return -1;
+ chained_irq_exit(chip, desc);
}
static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
@@ -258,10 +253,8 @@ static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
static int plic_dying_cpu(unsigned int cpu)
{
- struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
-
- csr_clear(CSR_IE, IE_EIE);
- plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
+ if (plic_parent_irq)
+ disable_percpu_irq(plic_parent_irq);
return 0;
}
@@ -270,7 +263,11 @@ static int plic_starting_cpu(unsigned int cpu)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- csr_set(CSR_IE, IE_EIE);
+ if (plic_parent_irq)
+ enable_percpu_irq(plic_parent_irq,
+ irq_get_trigger_type(plic_parent_irq));
+ else
+ pr_warn("cpu%d: parent irq not available\n", cpu);
plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
return 0;
@@ -282,6 +279,7 @@ static int __init plic_init(struct device_node *node,
int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs;
struct plic_priv *priv;
+ struct plic_handler *handler;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -301,8 +299,6 @@ static int __init plic_init(struct device_node *node,
nr_contexts = of_irq_count(node);
if (WARN_ON(!nr_contexts))
goto out_iounmap;
- if (WARN_ON(nr_contexts < num_possible_cpus()))
- goto out_iounmap;
error = -ENOMEM;
priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
@@ -312,7 +308,6 @@ static int __init plic_init(struct device_node *node,
for (i = 0; i < nr_contexts; i++) {
struct of_phandle_args parent;
- struct plic_handler *handler;
irq_hw_number_t hwirq;
int cpu, hartid;
@@ -328,7 +323,7 @@ static int __init plic_init(struct device_node *node,
if (parent.args[0] != RV_IRQ_EXT)
continue;
- hartid = plic_find_hart_id(parent.np);
+ hartid = riscv_of_parent_hartid(parent.np);
if (hartid < 0) {
pr_warn("failed to parse hart ID for context %d.\n", i);
continue;
@@ -340,6 +335,14 @@ static int __init plic_init(struct device_node *node,
continue;
}
+ /* Find parent domain and register chained handler */
+ if (!plic_parent_irq && irq_find_host(parent.np)) {
+ plic_parent_irq = irq_of_parse_and_map(node, i);
+ if (plic_parent_irq)
+ irq_set_chained_handler(plic_parent_irq,
+ plic_handle_irq);
+ }
+
/*
* When running in M-mode we need to ignore the S-mode handler.
* Here we assume it always comes later, but that might be a
@@ -366,12 +369,20 @@ done:
nr_handlers++;
}
- cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ /*
+ * We can have multiple PLIC instances so setup cpuhp state only
+ * when context handler for current/boot CPU is present.
+ */
+ handler = this_cpu_ptr(&plic_handlers);
+ if (handler->present && !plic_cpuhp_setup_done) {
+ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
plic_starting_cpu, plic_dying_cpu);
- pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
- nr_irqs, nr_handlers, nr_contexts);
- set_handle_irq(plic_handle_irq);
+ plic_cpuhp_setup_done = true;
+ }
+
+ pr_info("%pOFP: mapped %d interrupts with %d handlers for"
+ " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
return 0;
out_iounmap:
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index c664d84e1667..ed943140e1fd 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -83,6 +83,17 @@ config LEDS_APU
To compile this driver as a module, choose M here: the
module will be called leds-apu.
+config LEDS_ARIEL
+ tristate "Dell Wyse 3020 status LED support"
+ depends on LEDS_CLASS
+ depends on (MACH_MMP3_DT && MFD_ENE_KB3930) || COMPILE_TEST
+ help
+ This driver adds support for controlling the front panel status
+ LEDs on Dell Wyse 3020 (Ariel) board via the KB3930 Embedded
+ Controller.
+
+ Say Y to if your machine is a Dell Wyse 3020 thin client.
+
config LEDS_AS3645A
tristate "AS3645A and LM3555 LED flash controllers support"
depends on I2C && LEDS_CLASS_FLASH
@@ -92,6 +103,16 @@ config LEDS_AS3645A
controller. V4L2 flash API is provided as well if
CONFIG_V4L2_FLASH_API is enabled.
+config LEDS_AW2013
+ tristate "LED support for Awinic AW2013"
+ depends on LEDS_CLASS && I2C && OF
+ help
+ This option enables support for the AW2013 3-channel
+ LED driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-aw2013.
+
config LEDS_BCM6328
tristate "LED Support for Broadcom BCM6328"
depends on LEDS_CLASS
@@ -857,6 +878,14 @@ config LEDS_IP30
To compile this driver as a module, choose M here: the module
will be called leds-ip30.
+config LEDS_SGM3140
+ tristate "LED support for the SGM3140"
+ depends on LEDS_CLASS_FLASH
+ depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
+ help
+ This option enables support for the SGM3140 500mA Buck/Boost Charge
+ Pump LED Driver.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 45235d5fb218..d6b8a792c936 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -12,8 +12,10 @@ obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o
obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o
obj-$(CONFIG_LEDS_APU) += leds-apu.o
+obj-$(CONFIG_LEDS_ARIEL) += leds-ariel.o
obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o
obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
+obj-$(CONFIG_LEDS_AW2013) += leds-aw2013.o
obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o
obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
@@ -77,6 +79,7 @@ obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o
+obj-$(CONFIG_LEDS_SGM3140) += leds-sgm3140.o
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
diff --git a/drivers/leds/leds-ariel.c b/drivers/leds/leds-ariel.c
new file mode 100644
index 000000000000..bb68ba23a7d4
--- /dev/null
+++ b/drivers/leds/leds-ariel.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0-or-later
+/*
+ * Dell Wyse 3020 a.k.a. "Ariel" Embedded Controller LED Driver
+ *
+ * Copyright (C) 2020 Lubomir Rintel
+ */
+
+#include <linux/module.h>
+#include <linux/leds.h>
+#include <linux/regmap.h>
+#include <linux/of_platform.h>
+
+enum ec_index {
+ EC_BLUE_LED = 0x01,
+ EC_AMBER_LED = 0x02,
+ EC_GREEN_LED = 0x03,
+};
+
+enum {
+ EC_LED_OFF = 0x00,
+ EC_LED_STILL = 0x01,
+ EC_LED_FADE = 0x02,
+ EC_LED_BLINK = 0x03,
+};
+
+struct ariel_led {
+ struct regmap *ec_ram;
+ enum ec_index ec_index;
+ struct led_classdev led_cdev;
+};
+
+#define led_cdev_to_ariel_led(c) container_of(c, struct ariel_led, led_cdev)
+
+static enum led_brightness ariel_led_get(struct led_classdev *led_cdev)
+{
+ struct ariel_led *led = led_cdev_to_ariel_led(led_cdev);
+ unsigned int led_status = 0;
+
+ if (regmap_read(led->ec_ram, led->ec_index, &led_status))
+ return LED_OFF;
+
+ if (led_status == EC_LED_STILL)
+ return LED_FULL;
+ else
+ return LED_OFF;
+}
+
+static void ariel_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ariel_led *led = led_cdev_to_ariel_led(led_cdev);
+
+ if (brightness == LED_OFF)
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_OFF);
+ else
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_STILL);
+}
+
+static int ariel_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct ariel_led *led = led_cdev_to_ariel_led(led_cdev);
+
+ if (*delay_on == 0 && *delay_off == 0)
+ return -EINVAL;
+
+ if (*delay_on == 0) {
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_OFF);
+ } else if (*delay_off == 0) {
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_STILL);
+ } else {
+ *delay_on = 500;
+ *delay_off = 500;
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_BLINK);
+ }
+
+ return 0;
+}
+
+#define NLEDS 3
+
+static int ariel_led_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ariel_led *leds;
+ struct regmap *ec_ram;
+ int ret;
+ int i;
+
+ ec_ram = dev_get_regmap(dev->parent, "ec_ram");
+ if (!ec_ram)
+ return -ENODEV;
+
+ leds = devm_kcalloc(dev, NLEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ leds[0].ec_index = EC_BLUE_LED;
+ leds[0].led_cdev.name = "blue:power",
+ leds[0].led_cdev.default_trigger = "default-on";
+
+ leds[1].ec_index = EC_AMBER_LED;
+ leds[1].led_cdev.name = "amber:status",
+
+ leds[2].ec_index = EC_GREEN_LED;
+ leds[2].led_cdev.name = "green:status",
+ leds[2].led_cdev.default_trigger = "default-on";
+
+ for (i = 0; i < NLEDS; i++) {
+ leds[i].ec_ram = ec_ram;
+ leds[i].led_cdev.brightness_get = ariel_led_get;
+ leds[i].led_cdev.brightness_set = ariel_led_set;
+ leds[i].led_cdev.blink_set = ariel_blink_set;
+
+ ret = devm_led_classdev_register(dev, &leds[i].led_cdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver ariel_led_driver = {
+ .probe = ariel_led_probe,
+ .driver = {
+ .name = "dell-wyse-ariel-led",
+ },
+};
+module_platform_driver(ariel_led_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Dell Wyse 3020 Status LEDs Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
new file mode 100644
index 000000000000..d709cc1f949e
--- /dev/null
+++ b/drivers/leds/leds-aw2013.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Driver for Awinic AW2013 3-channel LED driver
+
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#define AW2013_MAX_LEDS 3
+
+/* Reset and ID register */
+#define AW2013_RSTR 0x00
+#define AW2013_RSTR_RESET 0x55
+#define AW2013_RSTR_CHIP_ID 0x33
+
+/* Global control register */
+#define AW2013_GCR 0x01
+#define AW2013_GCR_ENABLE BIT(0)
+
+/* LED channel enable register */
+#define AW2013_LCTR 0x30
+#define AW2013_LCTR_LE(x) BIT((x))
+
+/* LED channel control registers */
+#define AW2013_LCFG(x) (0x31 + (x))
+#define AW2013_LCFG_IMAX_MASK (BIT(0) | BIT(1)) // Should be 0-3
+#define AW2013_LCFG_MD BIT(4)
+#define AW2013_LCFG_FI BIT(5)
+#define AW2013_LCFG_FO BIT(6)
+
+/* LED channel PWM registers */
+#define AW2013_REG_PWM(x) (0x34 + (x))
+
+/* LED channel timing registers */
+#define AW2013_LEDT0(x) (0x37 + (x) * 3)
+#define AW2013_LEDT0_T1(x) ((x) << 4) // Should be 0-7
+#define AW2013_LEDT0_T2(x) (x) // Should be 0-5
+
+#define AW2013_LEDT1(x) (0x38 + (x) * 3)
+#define AW2013_LEDT1_T3(x) ((x) << 4) // Should be 0-7
+#define AW2013_LEDT1_T4(x) (x) // Should be 0-7
+
+#define AW2013_LEDT2(x) (0x39 + (x) * 3)
+#define AW2013_LEDT2_T0(x) ((x) << 4) // Should be 0-8
+#define AW2013_LEDT2_REPEAT(x) (x) // Should be 0-15
+
+#define AW2013_REG_MAX 0x77
+
+#define AW2013_TIME_STEP 130 /* ms */
+
+struct aw2013;
+
+struct aw2013_led {
+ struct aw2013 *chip;
+ struct led_classdev cdev;
+ u32 num;
+ unsigned int imax;
+};
+
+struct aw2013 {
+ struct mutex mutex; /* held when writing to registers */
+ struct regulator *vcc_regulator;
+ struct i2c_client *client;
+ struct aw2013_led leds[AW2013_MAX_LEDS];
+ struct regmap *regmap;
+ int num_leds;
+ bool enabled;
+};
+
+static int aw2013_chip_init(struct aw2013 *chip)
+{
+ int i, ret;
+
+ ret = regmap_write(chip->regmap, AW2013_GCR, AW2013_GCR_ENABLE);
+ if (ret) {
+ dev_err(&chip->client->dev, "Failed to enable the chip: %d\n",
+ ret);
+ return ret;
+ }
+
+ for (i = 0; i < chip->num_leds; i++) {
+ ret = regmap_update_bits(chip->regmap,
+ AW2013_LCFG(chip->leds[i].num),
+ AW2013_LCFG_IMAX_MASK,
+ chip->leds[i].imax);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "Failed to set maximum current for led %d: %d\n",
+ chip->leds[i].num, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void aw2013_chip_disable(struct aw2013 *chip)
+{
+ int ret;
+
+ if (!chip->enabled)
+ return;
+
+ regmap_write(chip->regmap, AW2013_GCR, 0);
+
+ ret = regulator_disable(chip->vcc_regulator);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "Failed to disable regulator: %d\n", ret);
+ return;
+ }
+
+ chip->enabled = false;
+}
+
+static int aw2013_chip_enable(struct aw2013 *chip)
+{
+ int ret;
+
+ if (chip->enabled)
+ return 0;
+
+ ret = regulator_enable(chip->vcc_regulator);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "Failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ chip->enabled = true;
+
+ ret = aw2013_chip_init(chip);
+ if (ret)
+ aw2013_chip_disable(chip);
+
+ return ret;
+}
+
+static bool aw2013_chip_in_use(struct aw2013 *chip)
+{
+ int i;
+
+ for (i = 0; i < chip->num_leds; i++)
+ if (chip->leds[i].cdev.brightness)
+ return true;
+
+ return false;
+}
+
+static int aw2013_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct aw2013_led *led = container_of(cdev, struct aw2013_led, cdev);
+ int ret, num;
+
+ mutex_lock(&led->chip->mutex);
+
+ if (aw2013_chip_in_use(led->chip)) {
+ ret = aw2013_chip_enable(led->chip);
+ if (ret)
+ goto error;
+ }
+
+ num = led->num;
+
+ ret = regmap_write(led->chip->regmap, AW2013_REG_PWM(num), brightness);
+ if (ret)
+ goto error;
+
+ if (brightness) {
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCTR,
+ AW2013_LCTR_LE(num), 0xFF);
+ } else {
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCTR,
+ AW2013_LCTR_LE(num), 0);
+ if (ret)
+ goto error;
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCFG(num),
+ AW2013_LCFG_MD, 0);
+ }
+ if (ret)
+ goto error;
+
+ if (!aw2013_chip_in_use(led->chip))
+ aw2013_chip_disable(led->chip);
+
+error:
+ mutex_unlock(&led->chip->mutex);
+
+ return ret;
+}
+
+static int aw2013_blink_set(struct led_classdev *cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct aw2013_led *led = container_of(cdev, struct aw2013_led, cdev);
+ int ret, num = led->num;
+ unsigned long off = 0, on = 0;
+
+ /* If no blink specified, default to 1 Hz. */
+ if (!*delay_off && !*delay_on) {
+ *delay_off = 500;
+ *delay_on = 500;
+ }
+
+ if (!led->cdev.brightness) {
+ led->cdev.brightness = LED_FULL;
+ ret = aw2013_brightness_set(&led->cdev, led->cdev.brightness);
+ if (ret)
+ return ret;
+ }
+
+ /* Never on - just set to off */
+ if (!*delay_on) {
+ led->cdev.brightness = LED_OFF;
+ return aw2013_brightness_set(&led->cdev, LED_OFF);
+ }
+
+ mutex_lock(&led->chip->mutex);
+
+ /* Never off - brightness is already set, disable blinking */
+ if (!*delay_off) {
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCFG(num),
+ AW2013_LCFG_MD, 0);
+ goto out;
+ }
+
+ /* Convert into values the HW will understand. */
+ off = min(5, ilog2((*delay_off - 1) / AW2013_TIME_STEP) + 1);
+ on = min(7, ilog2((*delay_on - 1) / AW2013_TIME_STEP) + 1);
+
+ *delay_off = BIT(off) * AW2013_TIME_STEP;
+ *delay_on = BIT(on) * AW2013_TIME_STEP;
+
+ /* Set timings */
+ ret = regmap_write(led->chip->regmap,
+ AW2013_LEDT0(num), AW2013_LEDT0_T2(on));
+ if (ret)
+ goto out;
+ ret = regmap_write(led->chip->regmap,
+ AW2013_LEDT1(num), AW2013_LEDT1_T4(off));
+ if (ret)
+ goto out;
+
+ /* Finally, enable the LED */
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCFG(num),
+ AW2013_LCFG_MD, 0xFF);
+ if (ret)
+ goto out;
+
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCTR,
+ AW2013_LCTR_LE(num), 0xFF);
+
+out:
+ mutex_unlock(&led->chip->mutex);
+
+ return ret;
+}
+
+static int aw2013_probe_dt(struct aw2013 *chip)
+{
+ struct device_node *np = chip->client->dev.of_node, *child;
+ int count, ret = 0, i = 0;
+ struct aw2013_led *led;
+
+ count = of_get_child_count(np);
+ if (!count || count > AW2013_MAX_LEDS)
+ return -EINVAL;
+
+ regmap_write(chip->regmap, AW2013_RSTR, AW2013_RSTR_RESET);
+
+ for_each_available_child_of_node(np, child) {
+ struct led_init_data init_data = {};
+ u32 source;
+ u32 imax;
+
+ ret = of_property_read_u32(child, "reg", &source);
+ if (ret != 0 || source >= AW2013_MAX_LEDS) {
+ dev_err(&chip->client->dev,
+ "Couldn't read LED address: %d\n", ret);
+ count--;
+ continue;
+ }
+
+ led = &chip->leds[i];
+ led->num = source;
+ led->chip = chip;
+ init_data.fwnode = of_fwnode_handle(child);
+
+ if (!of_property_read_u32(child, "led-max-microamp", &imax)) {
+ led->imax = min_t(u32, imax / 5000, 3);
+ } else {
+ led->imax = 1; // 5mA
+ dev_info(&chip->client->dev,
+ "DT property led-max-microamp is missing\n");
+ }
+
+ of_property_read_string(child, "linux,default-trigger",
+ &led->cdev.default_trigger);
+
+ led->cdev.brightness_set_blocking = aw2013_brightness_set;
+ led->cdev.blink_set = aw2013_blink_set;
+
+ ret = devm_led_classdev_register_ext(&chip->client->dev,
+ &led->cdev, &init_data);
+ if (ret < 0)
+ return ret;
+
+ i++;
+ }
+
+ if (!count)
+ return -EINVAL;
+
+ chip->num_leds = i;
+
+ return 0;
+}
+
+static const struct regmap_config aw2013_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AW2013_REG_MAX,
+};
+
+static int aw2013_probe(struct i2c_client *client)
+{
+ struct aw2013 *chip;
+ int ret;
+ unsigned int chipid;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ mutex_init(&chip->mutex);
+ mutex_lock(&chip->mutex);
+
+ chip->client = client;
+ i2c_set_clientdata(client, chip);
+
+ chip->regmap = devm_regmap_init_i2c(client, &aw2013_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ goto error;
+ }
+
+ chip->vcc_regulator = devm_regulator_get(&client->dev, "vcc");
+ ret = PTR_ERR_OR_ZERO(chip->vcc_regulator);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to request regulator: %d\n", ret);
+ goto error;
+ }
+
+ ret = regulator_enable(chip->vcc_regulator);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to enable regulator: %d\n", ret);
+ goto error;
+ }
+
+ ret = regmap_read(chip->regmap, AW2013_RSTR, &chipid);
+ if (ret) {
+ dev_err(&client->dev, "Failed to read chip ID: %d\n",
+ ret);
+ goto error_reg;
+ }
+
+ if (chipid != AW2013_RSTR_CHIP_ID) {
+ dev_err(&client->dev, "Chip reported wrong ID: %x\n",
+ chipid);
+ ret = -ENODEV;
+ goto error_reg;
+ }
+
+ ret = aw2013_probe_dt(chip);
+ if (ret < 0)
+ goto error_reg;
+
+ ret = regulator_disable(chip->vcc_regulator);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to disable regulator: %d\n", ret);
+ goto error;
+ }
+
+ mutex_unlock(&chip->mutex);
+
+ return 0;
+
+error_reg:
+ regulator_disable(chip->vcc_regulator);
+
+error:
+ mutex_destroy(&chip->mutex);
+ return ret;
+}
+
+static int aw2013_remove(struct i2c_client *client)
+{
+ struct aw2013 *chip = i2c_get_clientdata(client);
+
+ aw2013_chip_disable(chip);
+
+ mutex_destroy(&chip->mutex);
+
+ return 0;
+}
+
+static const struct of_device_id aw2013_match_table[] = {
+ { .compatible = "awinic,aw2013", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, aw2013_match_table);
+
+static struct i2c_driver aw2013_driver = {
+ .driver = {
+ .name = "leds-aw2013",
+ .of_match_table = of_match_ptr(aw2013_match_table),
+ },
+ .probe_new = aw2013_probe,
+ .remove = aw2013_remove,
+};
+
+module_i2c_driver(aw2013_driver);
+
+MODULE_AUTHOR("Nikita Travkin <nikitos.tr@gmail.com>");
+MODULE_DESCRIPTION("AW2013 LED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index a5abb499574b..11ce05249751 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -7,7 +7,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
diff --git a/drivers/leds/leds-lp3952.c b/drivers/leds/leds-lp3952.c
index 4e4e542774cb..6ee9131fbf25 100644
--- a/drivers/leds/leds-lp3952.c
+++ b/drivers/leds/leds-lp3952.c
@@ -7,7 +7,7 @@
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/kernel.h>
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index c94995f0daa2..9079850e6ea4 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -5,7 +5,6 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 14ef4ccdda3a..ceceeb6a0e96 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -12,16 +12,17 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/leds.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
struct netxbig_gpio_ext {
- unsigned int *addr;
+ struct gpio_desc **addr;
int num_addr;
- unsigned int *data;
+ struct gpio_desc **data;
int num_data;
- unsigned int enable;
+ struct gpio_desc *enable;
};
enum netxbig_led_mode {
@@ -69,7 +70,7 @@ static void gpio_ext_set_addr(struct netxbig_gpio_ext *gpio_ext, int addr)
int pin;
for (pin = 0; pin < gpio_ext->num_addr; pin++)
- gpio_set_value(gpio_ext->addr[pin], (addr >> pin) & 1);
+ gpiod_set_value(gpio_ext->addr[pin], (addr >> pin) & 1);
}
static void gpio_ext_set_data(struct netxbig_gpio_ext *gpio_ext, int data)
@@ -77,14 +78,14 @@ static void gpio_ext_set_data(struct netxbig_gpio_ext *gpio_ext, int data)
int pin;
for (pin = 0; pin < gpio_ext->num_data; pin++)
- gpio_set_value(gpio_ext->data[pin], (data >> pin) & 1);
+ gpiod_set_value(gpio_ext->data[pin], (data >> pin) & 1);
}
static void gpio_ext_enable_select(struct netxbig_gpio_ext *gpio_ext)
{
/* Enable select is done on the raising edge. */
- gpio_set_value(gpio_ext->enable, 0);
- gpio_set_value(gpio_ext->enable, 1);
+ gpiod_set_value(gpio_ext->enable, 0);
+ gpiod_set_value(gpio_ext->enable, 1);
}
static void gpio_ext_set_value(struct netxbig_gpio_ext *gpio_ext,
@@ -99,41 +100,6 @@ static void gpio_ext_set_value(struct netxbig_gpio_ext *gpio_ext,
spin_unlock_irqrestore(&gpio_ext_lock, flags);
}
-static int gpio_ext_init(struct platform_device *pdev,
- struct netxbig_gpio_ext *gpio_ext)
-{
- int err;
- int i;
-
- if (unlikely(!gpio_ext))
- return -EINVAL;
-
- /* Configure address GPIOs. */
- for (i = 0; i < gpio_ext->num_addr; i++) {
- err = devm_gpio_request_one(&pdev->dev, gpio_ext->addr[i],
- GPIOF_OUT_INIT_LOW,
- "GPIO extension addr");
- if (err)
- return err;
- }
- /* Configure data GPIOs. */
- for (i = 0; i < gpio_ext->num_data; i++) {
- err = devm_gpio_request_one(&pdev->dev, gpio_ext->data[i],
- GPIOF_OUT_INIT_LOW,
- "GPIO extension data");
- if (err)
- return err;
- }
- /* Configure "enable select" GPIO. */
- err = devm_gpio_request_one(&pdev->dev, gpio_ext->enable,
- GPIOF_OUT_INIT_LOW,
- "GPIO extension enable");
- if (err)
- return err;
-
- return 0;
-}
-
/*
* Class LED driver.
*/
@@ -347,15 +313,47 @@ static int create_netxbig_led(struct platform_device *pdev,
return devm_led_classdev_register(&pdev->dev, &led_dat->cdev);
}
-static int gpio_ext_get_of_pdata(struct device *dev, struct device_node *np,
- struct netxbig_gpio_ext *gpio_ext)
+/**
+ * netxbig_gpio_ext_remove() - Clean up GPIO extension data
+ * @data: managed resource data to clean up
+ *
+ * Since we pick GPIO descriptors from another device than the device our
+ * driver is probing to, we need to register a specific callback to free
+ * these up using managed resources.
+ */
+static void netxbig_gpio_ext_remove(void *data)
+{
+ struct netxbig_gpio_ext *gpio_ext = data;
+ int i;
+
+ for (i = 0; i < gpio_ext->num_addr; i++)
+ gpiod_put(gpio_ext->addr[i]);
+ for (i = 0; i < gpio_ext->num_data; i++)
+ gpiod_put(gpio_ext->data[i]);
+ gpiod_put(gpio_ext->enable);
+}
+
+/**
+ * netxbig_gpio_ext_get() - Obtain GPIO extension device data
+ * @dev: main LED device
+ * @gpio_ext_dev: the GPIO extension device
+ * @gpio_ext: the data structure holding the GPIO extension data
+ *
+ * This function walks the subdevice that only contain GPIO line
+ * handles in the device tree and obtains the GPIO descriptors from that
+ * device.
+ */
+static int netxbig_gpio_ext_get(struct device *dev,
+ struct device *gpio_ext_dev,
+ struct netxbig_gpio_ext *gpio_ext)
{
- int *addr, *data;
+ struct gpio_desc **addr, **data;
int num_addr, num_data;
+ struct gpio_desc *gpiod;
int ret;
int i;
- ret = of_gpio_named_count(np, "addr-gpios");
+ ret = gpiod_count(gpio_ext_dev, "addr");
if (ret < 0) {
dev_err(dev,
"Failed to count GPIOs in DT property addr-gpios\n");
@@ -366,16 +364,25 @@ static int gpio_ext_get_of_pdata(struct device *dev, struct device_node *np,
if (!addr)
return -ENOMEM;
+ /*
+ * We cannot use devm_ managed resources with these GPIO descriptors
+ * since they are associated with the "GPIO extension device" which
+ * does not probe any driver. The device tree parser will however
+ * populate a platform device for it so we can anyway obtain the
+ * GPIO descriptors from the device.
+ */
for (i = 0; i < num_addr; i++) {
- ret = of_get_named_gpio(np, "addr-gpios", i);
- if (ret < 0)
- return ret;
- addr[i] = ret;
+ gpiod = gpiod_get_index(gpio_ext_dev, "addr", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ gpiod_set_consumer_name(gpiod, "GPIO extension addr");
+ addr[i] = gpiod;
}
gpio_ext->addr = addr;
gpio_ext->num_addr = num_addr;
- ret = of_gpio_named_count(np, "data-gpios");
+ ret = gpiod_count(gpio_ext_dev, "data");
if (ret < 0) {
dev_err(dev,
"Failed to count GPIOs in DT property data-gpios\n");
@@ -387,23 +394,26 @@ static int gpio_ext_get_of_pdata(struct device *dev, struct device_node *np,
return -ENOMEM;
for (i = 0; i < num_data; i++) {
- ret = of_get_named_gpio(np, "data-gpios", i);
- if (ret < 0)
- return ret;
- data[i] = ret;
+ gpiod = gpiod_get_index(gpio_ext_dev, "data", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ gpiod_set_consumer_name(gpiod, "GPIO extension data");
+ data[i] = gpiod;
}
gpio_ext->data = data;
gpio_ext->num_data = num_data;
- ret = of_get_named_gpio(np, "enable-gpio", 0);
- if (ret < 0) {
+ gpiod = gpiod_get(gpio_ext_dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
dev_err(dev,
"Failed to get GPIO from DT property enable-gpio\n");
- return ret;
+ return PTR_ERR(gpiod);
}
- gpio_ext->enable = ret;
+ gpiod_set_consumer_name(gpiod, "GPIO extension enable");
+ gpio_ext->enable = gpiod;
- return 0;
+ return devm_add_action_or_reset(dev, netxbig_gpio_ext_remove, gpio_ext);
}
static int netxbig_leds_get_of_pdata(struct device *dev,
@@ -411,6 +421,8 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
{
struct device_node *np = dev->of_node;
struct device_node *gpio_ext_np;
+ struct platform_device *gpio_ext_pdev;
+ struct device *gpio_ext_dev;
struct device_node *child;
struct netxbig_gpio_ext *gpio_ext;
struct netxbig_led_timer *timers;
@@ -426,13 +438,19 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
dev_err(dev, "Failed to get DT handle gpio-ext\n");
return -EINVAL;
}
+ gpio_ext_pdev = of_find_device_by_node(gpio_ext_np);
+ if (!gpio_ext_pdev) {
+ dev_err(dev, "Failed to find platform device for gpio-ext\n");
+ return -ENODEV;
+ }
+ gpio_ext_dev = &gpio_ext_pdev->dev;
gpio_ext = devm_kzalloc(dev, sizeof(*gpio_ext), GFP_KERNEL);
if (!gpio_ext) {
of_node_put(gpio_ext_np);
return -ENOMEM;
}
- ret = gpio_ext_get_of_pdata(dev, gpio_ext_np, gpio_ext);
+ ret = netxbig_gpio_ext_get(dev, gpio_ext_dev, gpio_ext);
of_node_put(gpio_ext_np);
if (ret)
return ret;
@@ -585,10 +603,6 @@ static int netxbig_led_probe(struct platform_device *pdev)
if (!leds_data)
return -ENOMEM;
- ret = gpio_ext_init(pdev, pdata->gpio_ext);
- if (ret < 0)
- return ret;
-
for (i = 0; i < pdata->num_leds; i++) {
ret = create_netxbig_led(pdev, pdata,
&leds_data[i], &pdata->leds[i]);
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 66cdc003b8f4..d288acbc99c7 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -4,7 +4,7 @@
* Copyright 2013 Qtechnology/AS
*
* Author: Peter Meerwald <p.meerwald@bct-electronic.com>
- * Author: Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ * Author: Ricardo Ribalda <ribalda@kernel.org>
*
* Based on leds-pca955x.c
*
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 6c8a724aac51..ef7b91bd2064 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -91,15 +91,21 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
pwm_init_state(led_data->pwm, &led_data->pwmstate);
ret = devm_led_classdev_register(dev, &led_data->cdev);
- if (ret == 0) {
- priv->num_leds++;
- led_pwm_set(&led_data->cdev, led_data->cdev.brightness);
- } else {
+ if (ret) {
dev_err(dev, "failed to register PWM led for %s: %d\n",
led->name, ret);
+ return ret;
}
- return ret;
+ ret = led_pwm_set(&led_data->cdev, led_data->cdev.brightness);
+ if (ret) {
+ dev_err(dev, "failed to set led PWM value for %s: %d",
+ led->name, ret);
+ return ret;
+ }
+
+ priv->num_leds++;
+ return 0;
}
static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
diff --git a/drivers/leds/leds-sgm3140.c b/drivers/leds/leds-sgm3140.c
new file mode 100644
index 000000000000..c494b934ae09
--- /dev/null
+++ b/drivers/leds/leds-sgm3140.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Luca Weiss <luca@z3ntu.xyz>
+
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-flash.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-flash-led-class.h>
+
+#define FLASH_TIMEOUT_DEFAULT 250000U /* 250ms */
+#define FLASH_MAX_TIMEOUT_DEFAULT 300000U /* 300ms */
+
+struct sgm3140 {
+ struct led_classdev_flash fled_cdev;
+ struct v4l2_flash *v4l2_flash;
+
+ struct timer_list powerdown_timer;
+
+ struct gpio_desc *flash_gpio;
+ struct gpio_desc *enable_gpio;
+ struct regulator *vin_regulator;
+
+ bool enabled;
+
+ /* current timeout in us */
+ u32 timeout;
+ /* maximum timeout in us */
+ u32 max_timeout;
+};
+
+static struct sgm3140 *flcdev_to_sgm3140(struct led_classdev_flash *flcdev)
+{
+ return container_of(flcdev, struct sgm3140, fled_cdev);
+}
+
+static int sgm3140_strobe_set(struct led_classdev_flash *fled_cdev, bool state)
+{
+ struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
+ int ret;
+
+ if (priv->enabled == state)
+ return 0;
+
+ if (state) {
+ ret = regulator_enable(priv->vin_regulator);
+ if (ret) {
+ dev_err(fled_cdev->led_cdev.dev,
+ "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ gpiod_set_value_cansleep(priv->flash_gpio, 1);
+ gpiod_set_value_cansleep(priv->enable_gpio, 1);
+ mod_timer(&priv->powerdown_timer,
+ jiffies + usecs_to_jiffies(priv->timeout));
+ } else {
+ del_timer_sync(&priv->powerdown_timer);
+ gpiod_set_value_cansleep(priv->enable_gpio, 0);
+ gpiod_set_value_cansleep(priv->flash_gpio, 0);
+ ret = regulator_disable(priv->vin_regulator);
+ if (ret) {
+ dev_err(fled_cdev->led_cdev.dev,
+ "failed to disable regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
+ priv->enabled = state;
+
+ return 0;
+}
+
+static int sgm3140_strobe_get(struct led_classdev_flash *fled_cdev, bool *state)
+{
+ struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
+
+ *state = timer_pending(&priv->powerdown_timer);
+
+ return 0;
+}
+
+static int sgm3140_timeout_set(struct led_classdev_flash *fled_cdev,
+ u32 timeout)
+{
+ struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
+
+ priv->timeout = timeout;
+
+ return 0;
+}
+
+static const struct led_flash_ops sgm3140_flash_ops = {
+ .strobe_set = sgm3140_strobe_set,
+ .strobe_get = sgm3140_strobe_get,
+ .timeout_set = sgm3140_timeout_set,
+};
+
+static int sgm3140_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+ struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
+ bool enable = brightness == LED_ON;
+ int ret;
+
+ if (priv->enabled == enable)
+ return 0;
+
+ if (enable) {
+ ret = regulator_enable(priv->vin_regulator);
+ if (ret) {
+ dev_err(led_cdev->dev,
+ "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ gpiod_set_value_cansleep(priv->enable_gpio, 1);
+ } else {
+ gpiod_set_value_cansleep(priv->enable_gpio, 0);
+ ret = regulator_disable(priv->vin_regulator);
+ if (ret) {
+ dev_err(led_cdev->dev,
+ "failed to disable regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
+ priv->enabled = enable;
+
+ return 0;
+}
+
+static void sgm3140_powerdown_timer(struct timer_list *t)
+{
+ struct sgm3140 *priv = from_timer(priv, t, powerdown_timer);
+
+ gpiod_set_value(priv->enable_gpio, 0);
+ gpiod_set_value(priv->flash_gpio, 0);
+ regulator_disable(priv->vin_regulator);
+
+ priv->enabled = false;
+}
+
+static void sgm3140_init_flash_timeout(struct sgm3140 *priv)
+{
+ struct led_classdev_flash *fled_cdev = &priv->fled_cdev;
+ struct led_flash_setting *s;
+
+ /* Init flash timeout setting */
+ s = &fled_cdev->timeout;
+ s->min = 1;
+ s->max = priv->max_timeout;
+ s->step = 1;
+ s->val = FLASH_TIMEOUT_DEFAULT;
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+static void sgm3140_init_v4l2_flash_config(struct sgm3140 *priv,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+ struct led_classdev *led_cdev = &priv->fled_cdev.led_cdev;
+ struct led_flash_setting *s;
+
+ strscpy(v4l2_sd_cfg->dev_name, led_cdev->dev->kobj.name,
+ sizeof(v4l2_sd_cfg->dev_name));
+
+ /* Init flash intensity setting */
+ s = &v4l2_sd_cfg->intensity;
+ s->min = 0;
+ s->max = 1;
+ s->step = 1;
+ s->val = 1;
+}
+
+#else
+static void sgm3140_init_v4l2_flash_config(struct sgm3140 *priv,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+}
+#endif
+
+static int sgm3140_probe(struct platform_device *pdev)
+{
+ struct sgm3140 *priv;
+ struct led_classdev *led_cdev;
+ struct led_classdev_flash *fled_cdev;
+ struct led_init_data init_data = {};
+ struct fwnode_handle *child_node;
+ struct v4l2_flash_config v4l2_sd_cfg = {};
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->flash_gpio = devm_gpiod_get(&pdev->dev, "flash", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(priv->flash_gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request flash gpio: %d\n", ret);
+ return ret;
+ }
+
+ priv->enable_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(priv->enable_gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request enable gpio: %d\n", ret);
+ return ret;
+ }
+
+ priv->vin_regulator = devm_regulator_get(&pdev->dev, "vin");
+ ret = PTR_ERR_OR_ZERO(priv->vin_regulator);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request regulator: %d\n", ret);
+ return ret;
+ }
+
+ child_node = fwnode_get_next_available_child_node(pdev->dev.fwnode,
+ NULL);
+ if (!child_node) {
+ dev_err(&pdev->dev,
+ "No fwnode child node found for connected LED.\n");
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32(child_node, "flash-max-timeout-us",
+ &priv->max_timeout);
+ if (ret) {
+ priv->max_timeout = FLASH_MAX_TIMEOUT_DEFAULT;
+ dev_warn(&pdev->dev,
+ "flash-max-timeout-us property missing\n");
+ }
+
+ /*
+ * Set default timeout to FLASH_DEFAULT_TIMEOUT except if max_timeout
+ * from DT is lower.
+ */
+ priv->timeout = min(priv->max_timeout, FLASH_TIMEOUT_DEFAULT);
+
+ timer_setup(&priv->powerdown_timer, sgm3140_powerdown_timer, 0);
+
+ fled_cdev = &priv->fled_cdev;
+ led_cdev = &fled_cdev->led_cdev;
+
+ fled_cdev->ops = &sgm3140_flash_ops;
+
+ led_cdev->brightness_set_blocking = sgm3140_brightness_set;
+ led_cdev->max_brightness = LED_ON;
+ led_cdev->flags |= LED_DEV_CAP_FLASH;
+
+ sgm3140_init_flash_timeout(priv);
+
+ init_data.fwnode = child_node;
+
+ platform_set_drvdata(pdev, priv);
+
+ /* Register in the LED subsystem */
+ ret = devm_led_classdev_flash_register_ext(&pdev->dev,
+ fled_cdev, &init_data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register flash device: %d\n",
+ ret);
+ goto err;
+ }
+
+ sgm3140_init_v4l2_flash_config(priv, &v4l2_sd_cfg);
+
+ /* Create V4L2 Flash subdev */
+ priv->v4l2_flash = v4l2_flash_init(&pdev->dev,
+ child_node,
+ fled_cdev, NULL,
+ &v4l2_sd_cfg);
+ if (IS_ERR(priv->v4l2_flash)) {
+ ret = PTR_ERR(priv->v4l2_flash);
+ goto err;
+ }
+
+ return ret;
+
+err:
+ fwnode_handle_put(child_node);
+ return ret;
+}
+
+static int sgm3140_remove(struct platform_device *pdev)
+{
+ struct sgm3140 *priv = platform_get_drvdata(pdev);
+
+ del_timer_sync(&priv->powerdown_timer);
+
+ v4l2_flash_release(priv->v4l2_flash);
+
+ return 0;
+}
+
+static const struct of_device_id sgm3140_dt_match[] = {
+ { .compatible = "sgmicro,sgm3140" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sgm3140_dt_match);
+
+static struct platform_driver sgm3140_driver = {
+ .probe = sgm3140_probe,
+ .remove = sgm3140_remove,
+ .driver = {
+ .name = "sgm3140",
+ .of_match_table = sgm3140_dt_match,
+ },
+};
+
+module_platform_driver(sgm3140_driver);
+
+MODULE_AUTHOR("Luca Weiss <luca@z3ntu.xyz>");
+MODULE_DESCRIPTION("SG Micro SGM3140 charge pump led driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 58be20cae183..1128ac75443c 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -93,7 +93,7 @@
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/workqueue.h>
#include <linux/leds-tca6507.h>
#include <linux/of.h>
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index a8911ebd30e5..0929f1275814 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -214,8 +214,9 @@ tlc591xx_probe(struct i2c_client *client,
err = devm_led_classdev_register_ext(dev, &led->ldev,
&init_data);
if (err < 0) {
- dev_err(dev, "couldn't register LED %s\n",
- led->ldev.name);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "couldn't register LED %s\n",
+ led->ldev.name);
return err;
}
}
diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c
index 34a68604c46c..b4688d1d9d2b 100644
--- a/drivers/leds/trigger/ledtrig-timer.c
+++ b/drivers/leds/trigger/ledtrig-timer.c
@@ -28,7 +28,7 @@ static ssize_t led_delay_on_store(struct device *dev,
{
struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
- ssize_t ret = -EINVAL;
+ ssize_t ret;
ret = kstrtoul(buf, 10, &state);
if (ret)
@@ -53,7 +53,7 @@ static ssize_t led_delay_off_store(struct device *dev,
{
struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
- ssize_t ret = -EINVAL;
+ ssize_t ret;
ret = kstrtoul(buf, 10, &state);
if (ret)
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index 5c1034c22197..f185f1a00008 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -21,16 +21,14 @@
void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
unsigned long flags)
{
- struct request_queue *q = pblk->dev->q;
struct pblk_w_ctx w_ctx;
sector_t lba = pblk_get_lba(bio);
- unsigned long start_time = jiffies;
+ unsigned long start_time;
unsigned int bpos, pos;
int nr_entries = pblk_get_secs(bio);
int i, ret;
- generic_start_io_acct(q, REQ_OP_WRITE, bio_sectors(bio),
- &pblk->disk->part0);
+ start_time = bio_start_io_acct(bio);
/* Update the write buffer head (mem) with the entries that we can
* write. The write in itself cannot fail, so there is no need to
@@ -79,7 +77,7 @@ retry:
pblk_rl_inserted(&pblk->rl, nr_entries);
out:
- generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time);
+ bio_end_io_acct(bio, start_time);
pblk_write_should_kick(pblk);
if (ret == NVM_IO_DONE)
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 9a967a2e83dd..6e677ff62cc9 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -145,9 +145,8 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
int ret = 0;
map_size = pblk_trans_map_size(pblk);
- pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN
- | __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM,
- PAGE_KERNEL);
+ pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN |
+ __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM);
if (!pblk->trans_map) {
pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
map_size);
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 8efd14e683dc..140927ebf41e 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -187,12 +187,11 @@ static void pblk_end_user_read(struct bio *bio, int error)
static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
bool put_line)
{
- struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *int_bio = rqd->bio;
unsigned long start_time = r_ctx->start_time;
- generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
+ bio_end_io_acct(int_bio, start_time);
if (rqd->error)
pblk_log_read_err(pblk, rqd);
@@ -263,17 +262,15 @@ retry:
void pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct request_queue *q = dev->q;
sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
bool from_cache;
struct pblk_g_ctx *r_ctx;
struct nvm_rq *rqd;
struct bio *int_bio, *split_bio;
+ unsigned long start_time;
- generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
- &pblk->disk->part0);
+ start_time = bio_start_io_acct(bio);
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
@@ -283,7 +280,7 @@ void pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->end_io = pblk_end_io_read;
r_ctx = nvm_rq_to_pdu(rqd);
- r_ctx->start_time = jiffies;
+ r_ctx->start_time = start_time;
r_ctx->lba = blba;
if (pblk_alloc_rqd_meta(pblk, rqd)) {
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index cbd46c1c5bf7..fcb9d7bd5bd0 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -247,7 +247,6 @@ config PMAC_RACKMETER
config SENSORS_AMS
tristate "Apple Motion Sensor driver"
depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C)
- select INPUT_POLLDEV
help
Support for the motion sensor included in PowerBooks. Includes
implementations for PMU and I2C.
diff --git a/drivers/macintosh/ams/ams-input.c b/drivers/macintosh/ams/ams-input.c
index 06a96b3f11de..0da493d449b2 100644
--- a/drivers/macintosh/ams/ams-input.c
+++ b/drivers/macintosh/ams/ams-input.c
@@ -25,9 +25,8 @@ MODULE_PARM_DESC(invert, "Invert input data on X and Y axis");
static DEFINE_MUTEX(ams_input_mutex);
-static void ams_idev_poll(struct input_polled_dev *dev)
+static void ams_idev_poll(struct input_dev *idev)
{
- struct input_dev *idev = dev->input;
s8 x, y, z;
mutex_lock(&ams_info.lock);
@@ -59,14 +58,10 @@ static int ams_input_enable(void)
ams_info.ycalib = y;
ams_info.zcalib = z;
- ams_info.idev = input_allocate_polled_device();
- if (!ams_info.idev)
+ input = input_allocate_device();
+ if (!input)
return -ENOMEM;
- ams_info.idev->poll = ams_idev_poll;
- ams_info.idev->poll_interval = 25;
-
- input = ams_info.idev->input;
input->name = "Apple Motion Sensor";
input->id.bustype = ams_info.bustype;
input->id.vendor = 0;
@@ -75,28 +70,32 @@ static int ams_input_enable(void)
input_set_abs_params(input, ABS_X, -50, 50, 3, 0);
input_set_abs_params(input, ABS_Y, -50, 50, 3, 0);
input_set_abs_params(input, ABS_Z, -50, 50, 3, 0);
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
- set_bit(EV_ABS, input->evbit);
- set_bit(EV_KEY, input->evbit);
- set_bit(BTN_TOUCH, input->keybit);
+ error = input_setup_polling(input, ams_idev_poll);
+ if (error)
+ goto err_free_input;
- error = input_register_polled_device(ams_info.idev);
- if (error) {
- input_free_polled_device(ams_info.idev);
- ams_info.idev = NULL;
- return error;
- }
+ input_set_poll_interval(input, 25);
+ error = input_register_device(input);
+ if (error)
+ goto err_free_input;
+
+ ams_info.idev = input;
joystick = true;
return 0;
+
+err_free_input:
+ input_free_device(input);
+ return error;
}
static void ams_input_disable(void)
{
if (ams_info.idev) {
- input_unregister_polled_device(ams_info.idev);
- input_free_polled_device(ams_info.idev);
+ input_unregister_device(ams_info.idev);
ams_info.idev = NULL;
}
diff --git a/drivers/macintosh/ams/ams.h b/drivers/macintosh/ams/ams.h
index fe8d596f9845..935bdd9cd9a6 100644
--- a/drivers/macintosh/ams/ams.h
+++ b/drivers/macintosh/ams/ams.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/i2c.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
@@ -51,7 +51,7 @@ struct ams {
#endif
/* Joystick emulation */
- struct input_polled_dev *idev;
+ struct input_dev *idev;
__u16 bustype;
/* calibrated null values */
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index 7af0c536d568..28b8581b44dd 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -183,8 +183,7 @@ static void mac_hid_stop_emulation(void)
}
static int mac_hid_toggle_emumouse(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int old_val = *valp;
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index eb3adfb7f88d..d4759db002c6 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -9,10 +9,10 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/pgtable.h>
#include <asm/prom.h>
#include <linux/adb.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/hydra.h>
#include <asm/irq.h>
#include <linux/init.h>
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 74bf2938276b..eab7e83c11c4 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -16,8 +16,8 @@
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
+#include <linux/pgtable.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 83eb05bf85ff..73e6ae88fafd 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -50,9 +50,9 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/uaccess.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/irq.h>
#ifdef CONFIG_PPC_PMAC
@@ -2184,8 +2184,6 @@ pmu_read(struct file *file, char __user *buf,
if (count < 1 || !pp)
return -EINVAL;
- if (!access_ok(buf, count))
- return -EFAULT;
spin_lock_irqsave(&pp->lock, flags);
add_wait_queue(&pp->wait, &wait);
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index 4150301a89a5..e8377ce0a95a 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -132,14 +132,6 @@ static int create_cpu_loop(int cpu)
s32 tmax;
int fmin;
- /* Get PID params from the appropriate SAT */
- hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
- if (hdr == NULL) {
- printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
- return -EINVAL;
- }
- piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
-
/* Get FVT params to get Tmax; if not found, assume default */
hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL);
if (hdr) {
@@ -152,6 +144,16 @@ static int create_cpu_loop(int cpu)
if (tmax < cpu_all_tmax)
cpu_all_tmax = tmax;
+ kfree(hdr);
+
+ /* Get PID params from the appropriate SAT */
+ hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
+ if (hdr == NULL) {
+ printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
+ return -EINVAL;
+ }
+ piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
+
/*
* Darwin has a minimum fan speed of 1000 rpm for the 4-way and
* 515 for the 2-way. That appears to be overkill, so for now,
@@ -174,6 +176,9 @@ static int create_cpu_loop(int cpu)
pid.min = fmin;
wf_cpu_pid_init(&cpu_pid[cpu], &pid);
+
+ kfree(hdr);
+
return 0;
}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 5a577a6734cf..05b1009e2820 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -236,4 +236,22 @@ config SUN6I_MSGBOX
various Allwinner SoCs. This mailbox is used for communication
between the application CPUs and the power management coprocessor.
+config SPRD_MBOX
+ tristate "Spreadtrum Mailbox"
+ depends on ARCH_SPRD || COMPILE_TEST
+ help
+ Mailbox driver implementation for the Spreadtrum platform. It is used
+ to send message between application processors and MCU. Say Y here if
+ you want to build the Spreatrum mailbox controller driver.
+
+config QCOM_IPCC
+ bool "Qualcomm Technologies, Inc. IPCC driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Qualcomm Technologies, Inc. Inter-Processor Communication Controller
+ (IPCC) driver for MSM devices. The driver provides mailbox support for
+ sending interrupts to the clients. On the other hand, the driver also
+ acts as an interrupt controller for receiving interrupts from clients.
+ Say Y here if you want to build this driver.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 2e4364ef5c47..60d224b723a1 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -50,3 +50,7 @@ obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
+
+obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o
+
+obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 7906624a731c..7205b825c8b5 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -12,6 +12,7 @@
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
@@ -66,6 +67,8 @@ struct imx_mu_priv {
struct clk *clk;
int irq;
+ u32 xcr;
+
bool side_b;
};
@@ -154,12 +157,17 @@ static int imx_mu_scu_tx(struct imx_mu_priv *priv,
switch (cp->type) {
case IMX_MU_TYPE_TX:
- if (msg->hdr.size > sizeof(*msg)) {
+ /*
+ * msg->hdr.size specifies the number of u32 words while
+ * sizeof yields bytes.
+ */
+
+ if (msg->hdr.size > sizeof(*msg) / 4) {
/*
* The real message size can be different to
* struct imx_sc_rpc_msg_max size
*/
- dev_err(priv->dev, "Exceed max msg size (%zu) on TX, got: %i\n", sizeof(*msg), msg->hdr.size);
+ dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on TX; got: %i bytes\n", sizeof(*msg), msg->hdr.size << 2);
return -EINVAL;
}
@@ -198,9 +206,8 @@ static int imx_mu_scu_rx(struct imx_mu_priv *priv,
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(0));
*data++ = imx_mu_read(priv, priv->dcfg->xRR[0]);
- if (msg.hdr.size > sizeof(msg)) {
- dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
- sizeof(msg), msg.hdr.size);
+ if (msg.hdr.size > sizeof(msg) / 4) {
+ dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), msg.hdr.size << 2);
return -EINVAL;
}
@@ -285,8 +292,10 @@ static int imx_mu_startup(struct mbox_chan *chan)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
+ unsigned long irq_flag = IRQF_SHARED;
int ret;
+ pm_runtime_get_sync(priv->dev);
if (cp->type == IMX_MU_TYPE_TXDB) {
/* Tx doorbell don't have ACK support */
tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
@@ -294,8 +303,12 @@ static int imx_mu_startup(struct mbox_chan *chan)
return 0;
}
- ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED |
- IRQF_NO_SUSPEND, cp->irq_desc, chan);
+ /* IPC MU should be with IRQF_NO_SUSPEND set */
+ if (!priv->dev->pm_domain)
+ irq_flag |= IRQF_NO_SUSPEND;
+
+ ret = request_irq(priv->irq, imx_mu_isr, irq_flag,
+ cp->irq_desc, chan);
if (ret) {
dev_err(priv->dev,
"Unable to acquire IRQ %d\n", priv->irq);
@@ -323,6 +336,7 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
if (cp->type == IMX_MU_TYPE_TXDB) {
tasklet_kill(&cp->txdb_tasklet);
+ pm_runtime_put_sync(priv->dev);
return;
}
@@ -341,6 +355,7 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
}
free_irq(priv->irq, chan);
+ pm_runtime_put_sync(priv->dev);
}
static const struct mbox_chan_ops imx_mu_ops = {
@@ -374,7 +389,7 @@ static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
break;
default:
dev_err(mbox->dev, "Invalid chan type: %d\n", type);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
if (chan >= mbox->num_chans) {
@@ -508,14 +523,39 @@ static int imx_mu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- return devm_mbox_controller_register(dev, &priv->mbox);
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret) {
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ goto disable_runtime_pm;
+ }
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ goto disable_runtime_pm;
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+
+disable_runtime_pm:
+ pm_runtime_disable(dev);
+ clk_disable_unprepare(priv->clk);
+ return ret;
}
static int imx_mu_remove(struct platform_device *pdev)
{
struct imx_mu_priv *priv = platform_get_drvdata(pdev);
- clk_disable_unprepare(priv->clk);
+ pm_runtime_disable(priv->dev);
return 0;
}
@@ -558,12 +598,69 @@ static const struct of_device_id imx_mu_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
+static int imx_mu_suspend_noirq(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv->clk)
+ priv->xcr = imx_mu_read(priv, priv->dcfg->xCR);
+
+ return 0;
+}
+
+static int imx_mu_resume_noirq(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ /*
+ * ONLY restore MU when context lost, the TIE could
+ * be set during noirq resume as there is MU data
+ * communication going on, and restore the saved
+ * value will overwrite the TIE and cause MU data
+ * send failed, may lead to system freeze. This issue
+ * is observed by testing freeze mode suspend.
+ */
+ if (!imx_mu_read(priv, priv->dcfg->xCR) && !priv->clk)
+ imx_mu_write(priv, priv->xcr, priv->dcfg->xCR);
+
+ return 0;
+}
+
+static int imx_mu_runtime_suspend(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int imx_mu_runtime_resume(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ dev_err(dev, "failed to enable clock\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx_mu_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
+ imx_mu_resume_noirq)
+ SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
+ imx_mu_runtime_resume, NULL)
+};
+
static struct platform_driver imx_mu_driver = {
.probe = imx_mu_probe,
.remove = imx_mu_remove,
.driver = {
.name = "imx_mu",
.of_match_table = imx_mu_dt_ids,
+ .pm = &imx_mu_pm_ops,
},
};
module_platform_driver(imx_mu_driver);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 34844b7a3675..8c7fac38bb1c 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -568,7 +568,7 @@ static int pcc_mbox_probe(struct platform_device *pdev)
return ret;
}
-struct platform_driver pcc_mbox_driver = {
+static struct platform_driver pcc_mbox_driver = {
.probe = pcc_mbox_probe,
.driver = {
.name = "PCCT",
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index eeebafd546e5..cec34f0af6ce 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -24,6 +24,35 @@ struct qcom_apcs_ipc {
struct platform_device *clk;
};
+struct qcom_apcs_ipc_data {
+ int offset;
+ char *clk_name;
+};
+
+static const struct qcom_apcs_ipc_data ipq6018_apcs_data = {
+ .offset = 8, .clk_name = "qcom,apss-ipq6018-clk"
+};
+
+static const struct qcom_apcs_ipc_data ipq8074_apcs_data = {
+ .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data msm8916_apcs_data = {
+ .offset = 8, .clk_name = "qcom-apcs-msm8916-clk"
+};
+
+static const struct qcom_apcs_ipc_data msm8996_apcs_data = {
+ .offset = 16, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data msm8998_apcs_data = {
+ .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
+ .offset = 12, .clk_name = NULL
+};
+
static const struct regmap_config apcs_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -48,17 +77,12 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
static int qcom_apcs_ipc_probe(struct platform_device *pdev)
{
struct qcom_apcs_ipc *apcs;
+ const struct qcom_apcs_ipc_data *apcs_data;
struct regmap *regmap;
struct resource *res;
- unsigned long offset;
void __iomem *base;
unsigned long i;
int ret;
- const struct of_device_id apcs_clk_match_table[] = {
- { .compatible = "qcom,msm8916-apcs-kpss-global", },
- { .compatible = "qcom,qcs404-apcs-apps-global", },
- {}
- };
apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
if (!apcs)
@@ -73,10 +97,10 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- offset = (unsigned long)of_device_get_match_data(&pdev->dev);
+ apcs_data = of_device_get_match_data(&pdev->dev);
apcs->regmap = regmap;
- apcs->offset = offset;
+ apcs->offset = apcs_data->offset;
/* Initialize channel identifiers */
for (i = 0; i < ARRAY_SIZE(apcs->mbox_chans); i++)
@@ -93,9 +117,9 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
return ret;
}
- if (of_match_device(apcs_clk_match_table, &pdev->dev)) {
+ if (apcs_data->clk_name) {
apcs->clk = platform_device_register_data(&pdev->dev,
- "qcom-apcs-msm8916-clk",
+ apcs_data->clk_name,
PLATFORM_DEVID_NONE,
NULL, 0);
if (IS_ERR(apcs->clk))
@@ -119,14 +143,15 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
/* .data is the offset of the ipc register within the global block */
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
- { .compatible = "qcom,msm8916-apcs-kpss-global", .data = (void *)8 },
- { .compatible = "qcom,msm8996-apcs-hmss-global", .data = (void *)16 },
- { .compatible = "qcom,msm8998-apcs-hmss-global", .data = (void *)8 },
- { .compatible = "qcom,qcs404-apcs-apps-global", .data = (void *)8 },
- { .compatible = "qcom,sc7180-apss-shared", .data = (void *)12 },
- { .compatible = "qcom,sdm845-apss-shared", .data = (void *)12 },
- { .compatible = "qcom,sm8150-apss-shared", .data = (void *)12 },
- { .compatible = "qcom,ipq8074-apcs-apps-global", .data = (void *)8 },
+ { .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq8074_apcs_data },
+ { .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8998_apcs_data },
+ { .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
{}
};
MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
new file mode 100644
index 000000000000..2d13c72944c6
--- /dev/null
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/mailbox/qcom-ipcc.h>
+
+#define IPCC_MBOX_MAX_CHAN 48
+
+/* IPCC Register offsets */
+#define IPCC_REG_SEND_ID 0x0c
+#define IPCC_REG_RECV_ID 0x10
+#define IPCC_REG_RECV_SIGNAL_ENABLE 0x14
+#define IPCC_REG_RECV_SIGNAL_DISABLE 0x18
+#define IPCC_REG_RECV_SIGNAL_CLEAR 0x1c
+#define IPCC_REG_CLIENT_CLEAR 0x38
+
+#define IPCC_SIGNAL_ID_MASK GENMASK(15, 0)
+#define IPCC_CLIENT_ID_MASK GENMASK(31, 16)
+
+#define IPCC_NO_PENDING_IRQ GENMASK(31, 0)
+
+/**
+ * struct qcom_ipcc_chan_info - Per-mailbox-channel info
+ * @client_id: The client-id to which the interrupt has to be triggered
+ * @signal_id: The signal-id to which the interrupt has to be triggered
+ */
+struct qcom_ipcc_chan_info {
+ u16 client_id;
+ u16 signal_id;
+};
+
+/**
+ * struct qcom_ipcc - Holder for the mailbox driver
+ * @dev: Device associated with this instance
+ * @base: Base address of the IPCC frame associated to APSS
+ * @irq_domain: The irq_domain associated with this instance
+ * @chan: The mailbox channels array
+ * @mchan: The per-mailbox channel info array
+ * @mbox: The mailbox controller
+ * @irq: Summary irq
+ */
+struct qcom_ipcc {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+ struct mbox_chan chan[IPCC_MBOX_MAX_CHAN];
+ struct qcom_ipcc_chan_info mchan[IPCC_MBOX_MAX_CHAN];
+ struct mbox_controller mbox;
+ int irq;
+};
+
+static inline struct qcom_ipcc *to_qcom_ipcc(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct qcom_ipcc, mbox);
+}
+
+static inline u32 qcom_ipcc_get_hwirq(u16 client_id, u16 signal_id)
+{
+ return FIELD_PREP(IPCC_CLIENT_ID_MASK, client_id) |
+ FIELD_PREP(IPCC_SIGNAL_ID_MASK, signal_id);
+}
+
+static irqreturn_t qcom_ipcc_irq_fn(int irq, void *data)
+{
+ struct qcom_ipcc *ipcc = data;
+ u32 hwirq;
+ int virq;
+
+ for (;;) {
+ hwirq = readl(ipcc->base + IPCC_REG_RECV_ID);
+ if (hwirq == IPCC_NO_PENDING_IRQ)
+ break;
+
+ virq = irq_find_mapping(ipcc->irq_domain, hwirq);
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_CLEAR);
+ generic_handle_irq(virq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void qcom_ipcc_mask_irq(struct irq_data *irqd)
+{
+ struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_DISABLE);
+}
+
+static void qcom_ipcc_unmask_irq(struct irq_data *irqd)
+{
+ struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_ENABLE);
+}
+
+static struct irq_chip qcom_ipcc_irq_chip = {
+ .name = "ipcc",
+ .irq_mask = qcom_ipcc_mask_irq,
+ .irq_unmask = qcom_ipcc_unmask_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int qcom_ipcc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct qcom_ipcc *ipcc = d->host_data;
+
+ irq_set_chip_and_handler(irq, &qcom_ipcc_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, ipcc);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static int qcom_ipcc_domain_xlate(struct irq_domain *d,
+ struct device_node *node, const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (intsize != 3)
+ return -EINVAL;
+
+ *out_hwirq = qcom_ipcc_get_hwirq(intspec[0], intspec[1]);
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static const struct irq_domain_ops qcom_ipcc_irq_ops = {
+ .map = qcom_ipcc_domain_map,
+ .xlate = qcom_ipcc_domain_xlate,
+};
+
+static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_ipcc *ipcc = to_qcom_ipcc(chan->mbox);
+ struct qcom_ipcc_chan_info *mchan = chan->con_priv;
+ u32 hwirq;
+
+ hwirq = qcom_ipcc_get_hwirq(mchan->client_id, mchan->signal_id);
+ writel(hwirq, ipcc->base + IPCC_REG_SEND_ID);
+
+ return 0;
+}
+
+static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *ph)
+{
+ struct qcom_ipcc *ipcc = to_qcom_ipcc(mbox);
+ struct qcom_ipcc_chan_info *mchan;
+ struct mbox_chan *chan;
+ unsigned int i;
+
+ if (ph->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < IPCC_MBOX_MAX_CHAN; i++) {
+ chan = &ipcc->chan[i];
+ if (!chan->con_priv) {
+ mchan = &ipcc->mchan[i];
+ mchan->client_id = ph->args[0];
+ mchan->signal_id = ph->args[1];
+ chan->con_priv = mchan;
+ break;
+ }
+
+ chan = NULL;
+ }
+
+ return chan ?: ERR_PTR(-EBUSY);
+}
+
+static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
+ .send_data = qcom_ipcc_mbox_send_data,
+};
+
+static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
+{
+ struct mbox_controller *mbox;
+ struct device *dev = ipcc->dev;
+
+ mbox = &ipcc->mbox;
+ mbox->dev = dev;
+ mbox->num_chans = IPCC_MBOX_MAX_CHAN;
+ mbox->chans = ipcc->chan;
+ mbox->ops = &ipcc_mbox_chan_ops;
+ mbox->of_xlate = qcom_ipcc_mbox_xlate;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = false;
+
+ return devm_mbox_controller_register(dev, mbox);
+}
+
+static int qcom_ipcc_probe(struct platform_device *pdev)
+{
+ struct qcom_ipcc *ipcc;
+ int ret;
+
+ ipcc = devm_kzalloc(&pdev->dev, sizeof(*ipcc), GFP_KERNEL);
+ if (!ipcc)
+ return -ENOMEM;
+
+ ipcc->dev = &pdev->dev;
+
+ ipcc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ipcc->base))
+ return PTR_ERR(ipcc->base);
+
+ ipcc->irq = platform_get_irq(pdev, 0);
+ if (ipcc->irq < 0)
+ return ipcc->irq;
+
+ ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
+ &qcom_ipcc_irq_ops, ipcc);
+ if (!ipcc->irq_domain)
+ return -ENOMEM;
+
+ ret = qcom_ipcc_setup_mbox(ipcc);
+ if (ret)
+ goto err_mbox;
+
+ ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn,
+ IRQF_TRIGGER_HIGH, "ipcc", ipcc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret);
+ goto err_mbox;
+ }
+
+ enable_irq_wake(ipcc->irq);
+ platform_set_drvdata(pdev, ipcc);
+
+ return 0;
+
+err_mbox:
+ irq_domain_remove(ipcc->irq_domain);
+
+ return ret;
+}
+
+static int qcom_ipcc_remove(struct platform_device *pdev)
+{
+ struct qcom_ipcc *ipcc = platform_get_drvdata(pdev);
+
+ disable_irq_wake(ipcc->irq);
+ irq_domain_remove(ipcc->irq_domain);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_ipcc_of_match[] = {
+ { .compatible = "qcom,ipcc"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_ipcc_of_match);
+
+static struct platform_driver qcom_ipcc_driver = {
+ .probe = qcom_ipcc_probe,
+ .remove = qcom_ipcc_remove,
+ .driver = {
+ .name = "qcom-ipcc",
+ .of_match_table = qcom_ipcc_of_match,
+ },
+};
+
+static int __init qcom_ipcc_init(void)
+{
+ return platform_driver_register(&qcom_ipcc_driver);
+}
+arch_initcall(qcom_ipcc_init);
+
+MODULE_AUTHOR("Venkata Narendra Kumar Gutta <vnkgutta@codeaurora.org>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
new file mode 100644
index 000000000000..f6fab24ae8a9
--- /dev/null
+++ b/drivers/mailbox/sprd-mailbox.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Spreadtrum mailbox driver
+ *
+ * Copyright (c) 2020 Spreadtrum Communications Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define SPRD_MBOX_ID 0x0
+#define SPRD_MBOX_MSG_LOW 0x4
+#define SPRD_MBOX_MSG_HIGH 0x8
+#define SPRD_MBOX_TRIGGER 0xc
+#define SPRD_MBOX_FIFO_RST 0x10
+#define SPRD_MBOX_FIFO_STS 0x14
+#define SPRD_MBOX_IRQ_STS 0x18
+#define SPRD_MBOX_IRQ_MSK 0x1c
+#define SPRD_MBOX_LOCK 0x20
+#define SPRD_MBOX_FIFO_DEPTH 0x24
+
+/* Bit and mask definiation for inbox's SPRD_MBOX_FIFO_STS register */
+#define SPRD_INBOX_FIFO_DELIVER_MASK GENMASK(23, 16)
+#define SPRD_INBOX_FIFO_OVERLOW_MASK GENMASK(15, 8)
+#define SPRD_INBOX_FIFO_DELIVER_SHIFT 16
+#define SPRD_INBOX_FIFO_BUSY_MASK GENMASK(7, 0)
+
+/* Bit and mask definiation for SPRD_MBOX_IRQ_STS register */
+#define SPRD_MBOX_IRQ_CLR BIT(0)
+
+/* Bit and mask definiation for outbox's SPRD_MBOX_FIFO_STS register */
+#define SPRD_OUTBOX_FIFO_FULL BIT(0)
+#define SPRD_OUTBOX_FIFO_WR_SHIFT 16
+#define SPRD_OUTBOX_FIFO_RD_SHIFT 24
+#define SPRD_OUTBOX_FIFO_POS_MASK GENMASK(7, 0)
+
+/* Bit and mask definiation for inbox's SPRD_MBOX_IRQ_MSK register */
+#define SPRD_INBOX_FIFO_BLOCK_IRQ BIT(0)
+#define SPRD_INBOX_FIFO_OVERFLOW_IRQ BIT(1)
+#define SPRD_INBOX_FIFO_DELIVER_IRQ BIT(2)
+#define SPRD_INBOX_FIFO_IRQ_MASK GENMASK(2, 0)
+
+/* Bit and mask definiation for outbox's SPRD_MBOX_IRQ_MSK register */
+#define SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ BIT(0)
+#define SPRD_OUTBOX_FIFO_IRQ_MASK GENMASK(4, 0)
+
+#define SPRD_MBOX_CHAN_MAX 8
+
+struct sprd_mbox_priv {
+ struct mbox_controller mbox;
+ struct device *dev;
+ void __iomem *inbox_base;
+ void __iomem *outbox_base;
+ struct clk *clk;
+ u32 outbox_fifo_depth;
+
+ struct mbox_chan chan[SPRD_MBOX_CHAN_MAX];
+};
+
+static struct sprd_mbox_priv *to_sprd_mbox_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct sprd_mbox_priv, mbox);
+}
+
+static u32 sprd_mbox_get_fifo_len(struct sprd_mbox_priv *priv, u32 fifo_sts)
+{
+ u32 wr_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_WR_SHIFT) &
+ SPRD_OUTBOX_FIFO_POS_MASK;
+ u32 rd_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_RD_SHIFT) &
+ SPRD_OUTBOX_FIFO_POS_MASK;
+ u32 fifo_len;
+
+ /*
+ * If the read pointer is equal with write pointer, which means the fifo
+ * is full or empty.
+ */
+ if (wr_pos == rd_pos) {
+ if (fifo_sts & SPRD_OUTBOX_FIFO_FULL)
+ fifo_len = priv->outbox_fifo_depth;
+ else
+ fifo_len = 0;
+ } else if (wr_pos > rd_pos) {
+ fifo_len = wr_pos - rd_pos;
+ } else {
+ fifo_len = priv->outbox_fifo_depth - rd_pos + wr_pos;
+ }
+
+ return fifo_len;
+}
+
+static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+ struct mbox_chan *chan;
+ u32 fifo_sts, fifo_len, msg[2];
+ int i, id;
+
+ fifo_sts = readl(priv->outbox_base + SPRD_MBOX_FIFO_STS);
+
+ fifo_len = sprd_mbox_get_fifo_len(priv, fifo_sts);
+ if (!fifo_len) {
+ dev_warn_ratelimited(priv->dev, "spurious outbox interrupt\n");
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < fifo_len; i++) {
+ msg[0] = readl(priv->outbox_base + SPRD_MBOX_MSG_LOW);
+ msg[1] = readl(priv->outbox_base + SPRD_MBOX_MSG_HIGH);
+ id = readl(priv->outbox_base + SPRD_MBOX_ID);
+
+ chan = &priv->chan[id];
+ mbox_chan_received_data(chan, (void *)msg);
+
+ /* Trigger to update outbox FIFO pointer */
+ writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER);
+ }
+
+ /* Clear irq status after reading all message. */
+ writel(SPRD_MBOX_IRQ_CLR, priv->outbox_base + SPRD_MBOX_IRQ_STS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sprd_mbox_inbox_isr(int irq, void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+ struct mbox_chan *chan;
+ u32 fifo_sts, send_sts, busy, id;
+
+ fifo_sts = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS);
+
+ /* Get the inbox data delivery status */
+ send_sts = (fifo_sts & SPRD_INBOX_FIFO_DELIVER_MASK) >>
+ SPRD_INBOX_FIFO_DELIVER_SHIFT;
+ if (!send_sts) {
+ dev_warn_ratelimited(priv->dev, "spurious inbox interrupt\n");
+ return IRQ_NONE;
+ }
+
+ while (send_sts) {
+ id = __ffs(send_sts);
+ send_sts &= (send_sts - 1);
+
+ chan = &priv->chan[id];
+
+ /*
+ * Check if the message was fetched by remote traget, if yes,
+ * that means the transmission has been completed.
+ */
+ busy = fifo_sts & SPRD_INBOX_FIFO_BUSY_MASK;
+ if (!(busy & BIT(id)))
+ mbox_chan_txdone(chan, 0);
+ }
+
+ /* Clear FIFO delivery and overflow status */
+ writel(fifo_sts &
+ (SPRD_INBOX_FIFO_DELIVER_MASK | SPRD_INBOX_FIFO_OVERLOW_MASK),
+ priv->inbox_base + SPRD_MBOX_FIFO_RST);
+
+ /* Clear irq status */
+ writel(SPRD_MBOX_IRQ_CLR, priv->inbox_base + SPRD_MBOX_IRQ_STS);
+
+ return IRQ_HANDLED;
+}
+
+static int sprd_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ unsigned long id = (unsigned long)chan->con_priv;
+ u32 *data = msg;
+
+ /* Write data into inbox FIFO, and only support 8 bytes every time */
+ writel(data[0], priv->inbox_base + SPRD_MBOX_MSG_LOW);
+ writel(data[1], priv->inbox_base + SPRD_MBOX_MSG_HIGH);
+
+ /* Set target core id */
+ writel(id, priv->inbox_base + SPRD_MBOX_ID);
+
+ /* Trigger remote request */
+ writel(0x1, priv->inbox_base + SPRD_MBOX_TRIGGER);
+
+ return 0;
+}
+
+static int sprd_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ unsigned long id = (unsigned long)chan->con_priv;
+ u32 busy;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ busy = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS) &
+ SPRD_INBOX_FIFO_BUSY_MASK;
+ if (!(busy & BIT(id))) {
+ mbox_chan_txdone(chan, 0);
+ return 0;
+ }
+
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+static int sprd_mbox_startup(struct mbox_chan *chan)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ u32 val;
+
+ /* Select outbox FIFO mode and reset the outbox FIFO status */
+ writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
+
+ /* Enable inbox FIFO overflow and delivery interrupt */
+ val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+ val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
+ writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+
+ /* Enable outbox FIFO not empty interrupt */
+ val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+ val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
+ writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+
+ return 0;
+}
+
+static void sprd_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+
+ /* Disable inbox & outbox interrupt */
+ writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+ writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+}
+
+static const struct mbox_chan_ops sprd_mbox_ops = {
+ .send_data = sprd_mbox_send_data,
+ .flush = sprd_mbox_flush,
+ .startup = sprd_mbox_startup,
+ .shutdown = sprd_mbox_shutdown,
+};
+
+static void sprd_mbox_disable(void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+
+ clk_disable_unprepare(priv->clk);
+}
+
+static int sprd_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sprd_mbox_priv *priv;
+ int ret, inbox_irq, outbox_irq;
+ unsigned long id;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ /*
+ * The Spreadtrum mailbox uses an inbox to send messages to the target
+ * core, and uses an outbox to receive messages from other cores.
+ *
+ * Thus the mailbox controller supplies 2 different register addresses
+ * and IRQ numbers for inbox and outbox.
+ */
+ priv->inbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->inbox_base))
+ return PTR_ERR(priv->inbox_base);
+
+ priv->outbox_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->outbox_base))
+ return PTR_ERR(priv->outbox_base);
+
+ priv->clk = devm_clk_get(dev, "enable");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get mailbox clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, sprd_mbox_disable, priv);
+ if (ret) {
+ dev_err(dev, "failed to add mailbox disable action\n");
+ return ret;
+ }
+
+ inbox_irq = platform_get_irq(pdev, 0);
+ if (inbox_irq < 0)
+ return inbox_irq;
+
+ ret = devm_request_irq(dev, inbox_irq, sprd_mbox_inbox_isr,
+ IRQF_NO_SUSPEND, dev_name(dev), priv);
+ if (ret) {
+ dev_err(dev, "failed to request inbox IRQ: %d\n", ret);
+ return ret;
+ }
+
+ outbox_irq = platform_get_irq(pdev, 1);
+ if (outbox_irq < 0)
+ return outbox_irq;
+
+ ret = devm_request_irq(dev, outbox_irq, sprd_mbox_outbox_isr,
+ IRQF_NO_SUSPEND, dev_name(dev), priv);
+ if (ret) {
+ dev_err(dev, "failed to request outbox IRQ: %d\n", ret);
+ return ret;
+ }
+
+ /* Get the default outbox FIFO depth */
+ priv->outbox_fifo_depth =
+ readl(priv->outbox_base + SPRD_MBOX_FIFO_DEPTH) + 1;
+ priv->mbox.dev = dev;
+ priv->mbox.chans = &priv->chan[0];
+ priv->mbox.num_chans = SPRD_MBOX_CHAN_MAX;
+ priv->mbox.ops = &sprd_mbox_ops;
+ priv->mbox.txdone_irq = true;
+
+ for (id = 0; id < SPRD_MBOX_CHAN_MAX; id++)
+ priv->chan[id].con_priv = (void *)id;
+
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret) {
+ dev_err(dev, "failed to register mailbox: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_mbox_of_match[] = {
+ { .compatible = "sprd,sc9860-mailbox", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_mbox_of_match);
+
+static struct platform_driver sprd_mbox_driver = {
+ .driver = {
+ .name = "sprd-mailbox",
+ .of_match_table = sprd_mbox_of_match,
+ },
+ .probe = sprd_mbox_probe,
+};
+module_platform_driver(sprd_mbox_driver);
+
+MODULE_AUTHOR("Baolin Wang <baolin.wang@unisoc.com>");
+MODULE_DESCRIPTION("Spreadtrum mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 86887c9a349a..f44079d62b1a 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -504,10 +504,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mchan->req_buf_size = resource_size(&res);
mchan->req_buf = devm_ioremap(mdev, res.start,
mchan->req_buf_size);
- if (IS_ERR(mchan->req_buf)) {
+ if (!mchan->req_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
- ret = PTR_ERR(mchan->req_buf);
- return ret;
+ return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret);
@@ -520,10 +519,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mchan->resp_buf_size = resource_size(&res);
mchan->resp_buf = devm_ioremap(mdev, res.start,
mchan->resp_buf_size);
- if (IS_ERR(mchan->resp_buf)) {
+ if (!mchan->resp_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
- ret = PTR_ERR(mchan->resp_buf);
- return ret;
+ return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s.\n", name);
@@ -543,10 +541,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mchan->req_buf_size = resource_size(&res);
mchan->req_buf = devm_ioremap(mdev, res.start,
mchan->req_buf_size);
- if (IS_ERR(mchan->req_buf)) {
+ if (!mchan->req_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
- ret = PTR_ERR(mchan->req_buf);
- return ret;
+ return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s.\n", name);
@@ -559,10 +556,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mchan->resp_buf_size = resource_size(&res);
mchan->resp_buf = devm_ioremap(mdev, res.start,
mchan->resp_buf_size);
- if (IS_ERR(mchan->resp_buf)) {
+ if (!mchan->resp_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
- ret = PTR_ERR(mchan->resp_buf);
- return ret;
+ return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s.\n", name);
@@ -668,10 +664,9 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
/* IPI IRQ */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "unable to find IPI IRQ.\n");
+ if (ret < 0)
goto free_mbox_dev;
- }
+
pdata->irq = ret;
ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
IRQF_SHARED, dev_name(dev), pdata);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index d6d5ab23c088..6665b56865b7 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -269,6 +269,7 @@ config DM_UNSTRIPED
config DM_CRYPT
tristate "Crypt target support"
depends on BLK_DEV_DM
+ depends on (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n)
select CRYPTO
select CRYPTO_CBC
select CRYPTO_ESSIV
@@ -336,6 +337,14 @@ config DM_WRITECACHE
The writecache target doesn't cache reads because reads are supposed
to be cached in standard RAM.
+config DM_EBS
+ tristate "Emulated block size target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM
+ select DM_BUFIO
+ help
+ dm-ebs emulates smaller logical block size on backing devices
+ with larger ones (e.g. 512 byte sectors on 4K native disks).
+
config DM_ERA
tristate "Era target (EXPERIMENTAL)"
depends on BLK_DEV_DM
@@ -443,6 +452,17 @@ config DM_MULTIPATH_ST
If unsure, say N.
+config DM_MULTIPATH_HST
+ tristate "I/O Path Selector based on historical service time"
+ depends on DM_MULTIPATH
+ help
+ This path selector is a dynamic load balancer which selects
+ the path expected to complete the incoming I/O in the shortest
+ time by comparing estimated service time (based on historical
+ service time).
+
+ If unsure, say N.
+
config DM_DELAY
tristate "I/O delaying target"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index d91a7edcd2ab..31840f95cd40 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -17,6 +17,7 @@ dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o \
dm-cache-background-tracker.o
dm-cache-smq-y += dm-cache-policy-smq.o
+dm-ebs-y += dm-ebs-target.o
dm-era-y += dm-era-target.o
dm-clone-y += dm-clone-target.o dm-clone-metadata.o
dm-verity-y += dm-verity-target.o
@@ -54,6 +55,7 @@ obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
+obj-$(CONFIG_DM_MULTIPATH_HST) += dm-historical-service-time.o
obj-$(CONFIG_DM_SWITCH) += dm-switch.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
obj-$(CONFIG_DM_PERSISTENT_DATA) += persistent-data/
@@ -65,6 +67,7 @@ obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
+obj-$(CONFIG_DM_EBS) += dm-ebs.o
obj-$(CONFIG_DM_ERA) += dm-era.o
obj-$(CONFIG_DM_CLONE) += dm-clone.o
obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 6dfa653d30db..bf7dd96db9b3 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -26,3 +26,12 @@ config BCACHE_CLOSURES_DEBUG
Keeps all active closures in a linked list and provides a debugfs
interface to list them, which makes it possible to see asynchronous
operations that get stuck.
+
+config BCACHE_ASYNC_REGISTRAION
+ bool "Asynchronous device registration (EXPERIMENTAL)"
+ depends on BCACHE
+ help
+ Add a sysfs file /sys/fs/bcache/register_async. Writing registering
+ device path into this file will returns immediately and the real
+ registration work is handled in kernel work queue in asynchronous
+ way.
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 74a9849ea164..221e0191b687 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -176,7 +176,7 @@
* - updates to non leaf nodes just happen synchronously (see btree_split()).
*/
-#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
+#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
#include <linux/bcache.h>
#include <linux/bio.h>
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 4385303836d8..4995fcaefe29 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -6,7 +6,7 @@
* Copyright 2012 Google, Inc.
*/
-#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
+#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
#include "util.h"
#include "bset.h"
@@ -31,7 +31,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
if (b->ops->key_dump)
b->ops->key_dump(b, k);
else
- pr_err("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
+ pr_cont("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
if (next < bset_bkey_last(i) &&
bkey_cmp(k, b->ops->is_extents ?
@@ -1225,7 +1225,7 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
- pr_debug("sorted %i keys", out->keys);
+ pr_debug("sorted %i keys\n", out->keys);
}
static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 72856e5f23a3..39de94edd73a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -619,7 +619,7 @@ retry:
* and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
*/
if (btree_node_journal_flush(b)) {
- pr_debug("bnode %p is flushing by journal, retry", b);
+ pr_debug("bnode %p is flushing by journal, retry\n", b);
mutex_unlock(&b->write_lock);
udelay(1);
goto retry;
@@ -802,7 +802,7 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->shrink.batch = c->btree_pages * 2;
if (register_shrinker(&c->shrink))
- pr_warn("bcache: %s: could not register shrinker",
+ pr_warn("bcache: %s: could not register shrinker\n",
__func__);
return 0;
@@ -1054,7 +1054,7 @@ retry:
*/
if (btree_node_journal_flush(b)) {
mutex_unlock(&b->write_lock);
- pr_debug("bnode %p journal_flush set, retry", b);
+ pr_debug("bnode %p journal_flush set, retry\n", b);
udelay(1);
goto retry;
}
@@ -1798,7 +1798,7 @@ static void bch_btree_gc(struct cache_set *c)
schedule_timeout_interruptible(msecs_to_jiffies
(GC_SLEEP_MS));
else if (ret)
- pr_warn("gc failed!");
+ pr_warn("gc failed!\n");
} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
bch_btree_gc_finish(c);
@@ -1907,10 +1907,8 @@ static int bch_btree_check_thread(void *arg)
struct btree_iter iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
- int i, n;
k = p = NULL;
- i = n = 0;
cur_idx = prev_idx = 0;
ret = 0;
@@ -2045,7 +2043,7 @@ int bch_btree_check(struct cache_set *c)
&check_state->infos[i],
name);
if (IS_ERR(check_state->infos[i].thread)) {
- pr_err("fails to run thread bch_btrchk[%d]", i);
+ pr_err("fails to run thread bch_btrchk[%d]\n", i);
for (--i; i >= 0; i--)
kthread_stop(check_state->infos[i].thread);
ret = -ENOMEM;
@@ -2456,7 +2454,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys,
if (ret) {
struct bkey *k;
- pr_err("error %i", ret);
+ pr_err("error %i\n", ret);
while ((k = bch_keylist_pop(keys)))
bkey_put(c, k);
@@ -2744,7 +2742,7 @@ struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
break;
if (bkey_cmp(&buf->last_scanned, end) >= 0) {
- pr_debug("scan finished");
+ pr_debug("scan finished\n");
break;
}
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 886710043025..9162af5bb6ec 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -130,18 +130,18 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
char buf[80];
bch_extent_to_text(buf, sizeof(buf), k);
- pr_err(" %s", buf);
+ pr_cont(" %s", buf);
for (j = 0; j < KEY_PTRS(k); j++) {
size_t n = PTR_BUCKET_NR(b->c, k, j);
- pr_err(" bucket %zu", n);
+ pr_cont(" bucket %zu", n);
if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
- pr_err(" prio %i",
- PTR_BUCKET(b->c, k, j)->prio);
+ pr_cont(" prio %i",
+ PTR_BUCKET(b->c, k, j)->prio);
}
- pr_err(" %s\n", bch_ptr_status(b->c, k));
+ pr_cont(" %s\n", bch_ptr_status(b->c, k));
}
/* Btree ptrs */
@@ -553,7 +553,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
if (stale && KEY_DIRTY(k)) {
bch_extent_to_text(buf, sizeof(buf), k);
- pr_info("stale dirty pointer, stale %u, key: %s",
+ pr_info("stale dirty pointer, stale %u, key: %s\n",
stale, buf);
}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 4d93f07f63e5..b25ee33b0d0b 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -65,14 +65,14 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
* we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
*/
if (bio->bi_opf & REQ_RAHEAD) {
- pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore",
+ pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore\n",
dc->backing_dev_name);
return;
}
errors = atomic_add_return(1, &dc->io_errors);
if (errors < dc->error_limit)
- pr_err("%s: IO error on backing device, unrecoverable",
+ pr_err("%s: IO error on backing device, unrecoverable\n",
dc->backing_dev_name);
else
bch_cached_dev_error(dc);
@@ -123,12 +123,12 @@ void bch_count_io_errors(struct cache *ca,
errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit)
- pr_err("%s: IO error on %s%s",
+ pr_err("%s: IO error on %s%s\n",
ca->cache_dev_name, m,
is_read ? ", recovering." : ".");
else
bch_cache_set_error(ca->set,
- "%s: too many IO errors %s",
+ "%s: too many IO errors %s\n",
ca->cache_dev_name, m);
}
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 0e3ff9745ac7..90aac4e2333f 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -47,7 +47,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
closure_init_stack(&cl);
- pr_debug("reading %u", bucket_index);
+ pr_debug("reading %u\n", bucket_index);
while (offset < ca->sb.bucket_size) {
reread: left = ca->sb.bucket_size - offset;
@@ -78,13 +78,13 @@ reread: left = ca->sb.bucket_size - offset;
size_t blocks, bytes = set_bytes(j);
if (j->magic != jset_magic(&ca->sb)) {
- pr_debug("%u: bad magic", bucket_index);
+ pr_debug("%u: bad magic\n", bucket_index);
return ret;
}
if (bytes > left << 9 ||
bytes > PAGE_SIZE << JSET_BITS) {
- pr_info("%u: too big, %zu bytes, offset %u",
+ pr_info("%u: too big, %zu bytes, offset %u\n",
bucket_index, bytes, offset);
return ret;
}
@@ -93,7 +93,7 @@ reread: left = ca->sb.bucket_size - offset;
goto reread;
if (j->csum != csum_set(j)) {
- pr_info("%u: bad csum, %zu bytes, offset %u",
+ pr_info("%u: bad csum, %zu bytes, offset %u\n",
bucket_index, bytes, offset);
return ret;
}
@@ -190,7 +190,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
uint64_t seq;
bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
- pr_debug("%u journal buckets", ca->sb.njournal_buckets);
+ pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
/*
* Read journal buckets ordered by golden ratio hash to quickly
@@ -215,7 +215,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
* If that fails, check all the buckets we haven't checked
* already
*/
- pr_debug("falling back to linear search");
+ pr_debug("falling back to linear search\n");
for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
l < ca->sb.njournal_buckets;
@@ -233,7 +233,7 @@ bsearch:
/* Binary search */
m = l;
r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
- pr_debug("starting binary search, l %u r %u", l, r);
+ pr_debug("starting binary search, l %u r %u\n", l, r);
while (l + 1 < r) {
seq = list_entry(list->prev, struct journal_replay,
@@ -253,7 +253,7 @@ bsearch:
* Read buckets in reverse order until we stop finding more
* journal entries
*/
- pr_debug("finishing up: m %u njournal_buckets %u",
+ pr_debug("finishing up: m %u njournal_buckets %u\n",
m, ca->sb.njournal_buckets);
l = m;
@@ -370,10 +370,10 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
if (n != i->j.seq) {
if (n == start && is_discard_enabled(s))
- pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
+ pr_info("journal entries %llu-%llu may be discarded! (replaying %llu-%llu)\n",
n, i->j.seq - 1, start, end);
else {
- pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
+ pr_err("journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
n, i->j.seq - 1, start, end);
ret = -EIO;
goto err;
@@ -403,7 +403,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
entries++;
}
- pr_info("journal replay done, %i keys in %i entries, seq %llu",
+ pr_info("journal replay done, %i keys in %i entries, seq %llu\n",
keys, entries, end);
err:
while (!list_empty(list)) {
@@ -481,7 +481,7 @@ static void btree_flush_write(struct cache_set *c)
break;
if (btree_node_journal_flush(b))
- pr_err("BUG: flush_write bit should not be set here!");
+ pr_err("BUG: flush_write bit should not be set here!\n");
mutex_lock(&b->write_lock);
@@ -534,13 +534,13 @@ static void btree_flush_write(struct cache_set *c)
for (i = 0; i < nr; i++) {
b = btree_nodes[i];
if (!b) {
- pr_err("BUG: btree_nodes[%d] is NULL", i);
+ pr_err("BUG: btree_nodes[%d] is NULL\n", i);
continue;
}
/* safe to check without holding b->write_lock */
if (!btree_node_journal_flush(b)) {
- pr_err("BUG: bnode %p: journal_flush bit cleaned", b);
+ pr_err("BUG: bnode %p: journal_flush bit cleaned\n", b);
continue;
}
@@ -548,14 +548,14 @@ static void btree_flush_write(struct cache_set *c)
if (!btree_current_write(b)->journal) {
clear_bit(BTREE_NODE_journal_flush, &b->flags);
mutex_unlock(&b->write_lock);
- pr_debug("bnode %p: written by others", b);
+ pr_debug("bnode %p: written by others\n", b);
continue;
}
if (!btree_node_dirty(b)) {
clear_bit(BTREE_NODE_journal_flush, &b->flags);
mutex_unlock(&b->write_lock);
- pr_debug("bnode %p: dirty bit cleaned by others", b);
+ pr_debug("bnode %p: dirty bit cleaned by others\n", b);
continue;
}
@@ -716,7 +716,7 @@ void bch_journal_next(struct journal *j)
j->cur->data->keys = 0;
if (fifo_full(&j->pin))
- pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
+ pr_debug("journal_pin full (%zu)\n", fifo_used(&j->pin));
}
static void journal_write_endio(struct bio *bio)
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 71a90fbec314..7acf024e99f3 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -110,7 +110,7 @@ static void bch_data_invalidate(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio;
- pr_debug("invalidating %i sectors from %llu",
+ pr_debug("invalidating %i sectors from %llu\n",
bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
while (bio_sectors(bio)) {
@@ -396,7 +396,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
bio_sectors(bio) & (c->sb.block_size - 1)) {
- pr_debug("skipping unaligned io");
+ pr_debug("skipping unaligned io\n");
goto skip;
}
@@ -650,7 +650,7 @@ static void backing_request_endio(struct bio *bio)
*/
if (unlikely(s->iop.writeback &&
bio->bi_opf & REQ_PREFLUSH)) {
- pr_err("Can't flush %s: returned bi_status %i",
+ pr_err("Can't flush %s: returned bi_status %i\n",
dc->backing_dev_name, bio->bi_status);
} else {
/* set to orig_bio->bi_status in bio_complete() */
@@ -668,9 +668,7 @@ static void backing_request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
- &s->d->disk->part0, s->start_time);
-
+ bio_end_io_acct(s->orig_bio, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
s->orig_bio->bi_status = s->iop.status;
bio_endio(s->orig_bio);
@@ -730,7 +728,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->recoverable = 1;
s->write = op_is_write(bio_op(bio));
s->read_dirty_data = 0;
- s->start_time = jiffies;
+ s->start_time = bio_start_io_acct(bio);
s->iop.c = d->c;
s->iop.bio = NULL;
@@ -1082,8 +1080,7 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_end_io = ddip->bi_end_io;
bio->bi_private = ddip->bi_private;
- generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
- &ddip->d->disk->part0, ddip->start_time);
+ bio_end_io_acct(bio, ddip->start_time);
if (bio->bi_status) {
struct cached_dev *dc = container_of(ddip->d,
@@ -1108,7 +1105,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
*/
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
ddip->d = d;
- ddip->start_time = jiffies;
+ ddip->start_time = bio_start_io_acct(bio);
ddip->bi_end_io = bio->bi_end_io;
ddip->bi_private = bio->bi_private;
bio->bi_end_io = detached_dev_end_io;
@@ -1190,11 +1187,6 @@ blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio)
}
}
- generic_start_io_acct(q,
- bio_op(bio),
- bio_sectors(bio),
- &d->disk->part0);
-
bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset;
@@ -1311,8 +1303,6 @@ blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
- generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
-
s = search_alloc(bio, d);
cl = &s->cl;
bio = &s->bio.bio;
@@ -1372,7 +1362,6 @@ void bch_flash_dev_request_init(struct bcache_device *d)
{
struct gendisk *g = d->disk;
- g->queue->make_request_fn = flash_dev_make_request;
g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index d98354fa28e3..f9975c22bf7e 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -89,7 +89,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
sb->d[i] = le64_to_cpu(s->d[i]);
- pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
+ pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n",
sb->version, sb->flags, sb->seq, sb->keys);
err = "Not a bcache superblock (bad offset)";
@@ -234,7 +234,7 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
out->csum = csum_set(out);
- pr_debug("ver %llu, flags %llu, seq %llu",
+ pr_debug("ver %llu, flags %llu, seq %llu\n",
sb->version, sb->flags, sb->seq);
submit_bio(bio);
@@ -365,11 +365,11 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
}
bch_extent_to_text(buf, sizeof(buf), k);
- pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
+ pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf);
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
if (!bch_is_zero(u->uuid, 16))
- pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
+ pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n",
u - c->uuids, u->uuid, u->label,
u->first_reg, u->last_reg, u->invalidated);
@@ -534,7 +534,7 @@ int bch_prio_write(struct cache *ca, bool wait)
struct bucket *b;
struct closure cl;
- pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu",
+ pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n",
fifo_used(&ca->free[RESERVE_PRIO]),
fifo_used(&ca->free[RESERVE_NONE]),
fifo_used(&ca->free_inc));
@@ -629,12 +629,12 @@ static int prio_read(struct cache *ca, uint64_t bucket)
if (p->csum !=
bch_crc64(&p->magic, bucket_bytes(ca) - 8)) {
- pr_warn("bad csum reading priorities");
+ pr_warn("bad csum reading priorities\n");
goto out;
}
if (p->magic != pset_magic(&ca->sb)) {
- pr_warn("bad magic reading priorities");
+ pr_warn("bad magic reading priorities\n");
goto out;
}
@@ -728,11 +728,11 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
ret = sysfs_create_link(&d->kobj, &c->kobj, "cache");
if (ret < 0)
- pr_err("Couldn't create device -> cache set symlink");
+ pr_err("Couldn't create device -> cache set symlink\n");
ret = sysfs_create_link(&c->kobj, &d->kobj, d->name);
if (ret < 0)
- pr_err("Couldn't create cache set -> device symlink");
+ pr_err("Couldn't create cache set -> device symlink\n");
clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
}
@@ -789,15 +789,17 @@ static void bcache_device_free(struct bcache_device *d)
lockdep_assert_held(&bch_register_lock);
if (disk)
- pr_info("%s stopped", disk->disk_name);
+ pr_info("%s stopped\n", disk->disk_name);
else
- pr_err("bcache device (NULL gendisk) stopped");
+ pr_err("bcache device (NULL gendisk) stopped\n");
if (d->c)
bcache_device_detach(d);
if (disk) {
- if (disk->flags & GENHD_FL_UP)
+ bool disk_added = (disk->flags & GENHD_FL_UP) != 0;
+
+ if (disk_added)
del_gendisk(disk);
if (disk->queue)
@@ -805,7 +807,8 @@ static void bcache_device_free(struct bcache_device *d)
ida_simple_remove(&bcache_device_idx,
first_minor_to_idx(disk->first_minor));
- put_disk(disk);
+ if (disk_added)
+ put_disk(disk);
}
bioset_exit(&d->bio_split);
@@ -830,7 +833,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
if (!d->nr_stripes || d->nr_stripes > max_stripes) {
- pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
+ pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)\n",
(unsigned int)d->nr_stripes);
return -ENOMEM;
}
@@ -928,11 +931,11 @@ static int cached_dev_status_update(void *arg)
dc->offline_seconds = 0;
if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
- pr_err("%s: device offline for %d seconds",
+ pr_err("%s: device offline for %d seconds\n",
dc->backing_dev_name,
BACKING_DEV_OFFLINE_TIMEOUT);
- pr_err("%s: disable I/O request due to backing "
- "device offline", dc->disk.name);
+ pr_err("%s: disable I/O request due to backing device offline\n",
+ dc->disk.name);
dc->io_disable = true;
/* let others know earlier that io_disable is true */
smp_mb();
@@ -959,7 +962,7 @@ int bch_cached_dev_run(struct cached_dev *dc)
};
if (dc->io_disable) {
- pr_err("I/O disabled on cached dev %s",
+ pr_err("I/O disabled on cached dev %s\n",
dc->backing_dev_name);
kfree(env[1]);
kfree(env[2]);
@@ -971,7 +974,7 @@ int bch_cached_dev_run(struct cached_dev *dc)
kfree(env[1]);
kfree(env[2]);
kfree(buf);
- pr_info("cached dev %s is running already",
+ pr_info("cached dev %s is running already\n",
dc->backing_dev_name);
return -EBUSY;
}
@@ -1001,16 +1004,14 @@ int bch_cached_dev_run(struct cached_dev *dc)
if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
sysfs_create_link(&disk_to_dev(d->disk)->kobj,
&d->kobj, "bcache")) {
- pr_err("Couldn't create bcache dev <-> disk sysfs symlinks");
+ pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n");
return -ENOMEM;
}
dc->status_update_thread = kthread_run(cached_dev_status_update,
dc, "bcache_status_update");
if (IS_ERR(dc->status_update_thread)) {
- pr_warn("failed to create bcache_status_update kthread, "
- "continue to run without monitoring backing "
- "device status");
+ pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n");
}
return 0;
@@ -1036,7 +1037,7 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
} while (time_out > 0);
if (time_out == 0)
- pr_warn("give up waiting for dc->writeback_write_update to quit");
+ pr_warn("give up waiting for dc->writeback_write_update to quit\n");
cancel_delayed_work_sync(&dc->writeback_rate_update);
}
@@ -1077,7 +1078,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_unlock(&bch_register_lock);
- pr_info("Caching disabled for %s", dc->backing_dev_name);
+ pr_info("Caching disabled for %s\n", dc->backing_dev_name);
/* Drop ref we took in cached_dev_detach() */
closure_put(&dc->disk.cl);
@@ -1117,20 +1118,20 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
return -ENOENT;
if (dc->disk.c) {
- pr_err("Can't attach %s: already attached",
+ pr_err("Can't attach %s: already attached\n",
dc->backing_dev_name);
return -EINVAL;
}
if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
- pr_err("Can't attach %s: shutting down",
+ pr_err("Can't attach %s: shutting down\n",
dc->backing_dev_name);
return -EINVAL;
}
if (dc->sb.block_size < c->sb.block_size) {
/* Will die */
- pr_err("Couldn't attach %s: block size less than set's block size",
+ pr_err("Couldn't attach %s: block size less than set's block size\n",
dc->backing_dev_name);
return -EINVAL;
}
@@ -1138,7 +1139,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
/* Check whether already attached */
list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
- pr_err("Tried to attach %s but duplicate UUID already attached",
+ pr_err("Tried to attach %s but duplicate UUID already attached\n",
dc->backing_dev_name);
return -EINVAL;
@@ -1157,14 +1158,14 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
if (!u) {
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- pr_err("Couldn't find uuid for %s in set",
+ pr_err("Couldn't find uuid for %s in set\n",
dc->backing_dev_name);
return -ENOENT;
}
u = uuid_find_empty(c);
if (!u) {
- pr_err("Not caching %s, no room for UUID",
+ pr_err("Not caching %s, no room for UUID\n",
dc->backing_dev_name);
return -EINVAL;
}
@@ -1210,7 +1211,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
down_write(&dc->writeback_lock);
if (bch_cached_dev_writeback_start(dc)) {
up_write(&dc->writeback_lock);
- pr_err("Couldn't start writeback facilities for %s",
+ pr_err("Couldn't start writeback facilities for %s\n",
dc->disk.disk->disk_name);
return -ENOMEM;
}
@@ -1233,7 +1234,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
*/
kthread_stop(dc->writeback_thread);
cancel_writeback_rate_update_dwork(dc);
- pr_err("Couldn't run cached device %s",
+ pr_err("Couldn't run cached device %s\n",
dc->backing_dev_name);
return ret;
}
@@ -1244,7 +1245,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
- pr_info("Caching %s as %s on set %pU",
+ pr_info("Caching %s as %s on set %pU\n",
dc->backing_dev_name,
dc->disk.disk->disk_name,
dc->disk.c->sb.set_uuid);
@@ -1384,7 +1385,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
- pr_info("registered backing device %s", dc->backing_dev_name);
+ pr_info("registered backing device %s\n", dc->backing_dev_name);
list_add(&dc->list, &uncached_devices);
/* attach to a matched cache set if it exists */
@@ -1401,7 +1402,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
return 0;
err:
- pr_notice("error %s: %s", dc->backing_dev_name, err);
+ pr_notice("error %s: %s\n", dc->backing_dev_name, err);
bcache_device_stop(&dc->disk);
return ret;
}
@@ -1497,7 +1498,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
u = uuid_find_empty(c);
if (!u) {
- pr_err("Can't create volume, no room for UUID");
+ pr_err("Can't create volume, no room for UUID\n");
return -EINVAL;
}
@@ -1523,7 +1524,7 @@ bool bch_cached_dev_error(struct cached_dev *dc)
smp_mb();
pr_err("stop %s: too many IO errors on backing device %s\n",
- dc->disk.disk->disk_name, dc->backing_dev_name);
+ dc->disk.disk->disk_name, dc->backing_dev_name);
bcache_device_stop(&dc->disk);
return true;
@@ -1534,6 +1535,7 @@ bool bch_cached_dev_error(struct cached_dev *dc)
__printf(2, 3)
bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
if (c->on_error != ON_ERROR_PANIC &&
@@ -1541,20 +1543,22 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
return false;
if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
- pr_info("CACHE_SET_IO_DISABLE already set");
+ pr_info("CACHE_SET_IO_DISABLE already set\n");
/*
* XXX: we can be called from atomic context
* acquire_console_sem();
*/
- pr_err("bcache: error on %pU: ", c->sb.set_uuid);
-
va_start(args, fmt);
- vprintk(fmt, args);
- va_end(args);
- pr_err(", disabling caching\n");
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_err("error on %pU: %pV, disabling caching\n",
+ c->sb.set_uuid, &vaf);
+
+ va_end(args);
if (c->on_error == ON_ERROR_PANIC)
panic("panic forced after error\n");
@@ -1606,7 +1610,7 @@ static void cache_set_free(struct closure *cl)
list_del(&c->list);
mutex_unlock(&bch_register_lock);
- pr_info("Cache set %pU unregistered", c->sb.set_uuid);
+ pr_info("Cache set %pU unregistered\n", c->sb.set_uuid);
wake_up(&unregister_wait);
closure_debug_destroy(&c->cl);
@@ -1677,7 +1681,7 @@ static void conditional_stop_bcache_device(struct cache_set *c,
struct cached_dev *dc)
{
if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
- pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.",
+ pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
d->disk->disk_name, c->sb.set_uuid);
bcache_device_stop(d);
} else if (atomic_read(&dc->has_dirty)) {
@@ -1685,7 +1689,7 @@ static void conditional_stop_bcache_device(struct cache_set *c,
* dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
* and dc->has_dirty == 1
*/
- pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
+ pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n",
d->disk->disk_name);
/*
* There might be a small time gap that cache set is
@@ -1707,7 +1711,7 @@ static void conditional_stop_bcache_device(struct cache_set *c,
* dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
* and dc->has_dirty == 0
*/
- pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.",
+ pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n",
d->disk->disk_name);
}
}
@@ -1874,7 +1878,7 @@ static int run_cache_set(struct cache_set *c)
if (bch_journal_read(c, &journal))
goto err;
- pr_debug("btree_journal_read() done");
+ pr_debug("btree_journal_read() done\n");
err = "no journal entries found";
if (list_empty(&journal))
@@ -1920,7 +1924,7 @@ static int run_cache_set(struct cache_set *c)
bch_journal_mark(c, &journal);
bch_initial_gc_finish(c);
- pr_debug("btree_check() done");
+ pr_debug("btree_check() done\n");
/*
* bcache_journal_next() can't happen sooner, or
@@ -1951,7 +1955,7 @@ static int run_cache_set(struct cache_set *c)
if (bch_journal_replay(c, &journal))
goto err;
} else {
- pr_notice("invalidating existing data");
+ pr_notice("invalidating existing data\n");
for_each_cache(ca, c, i) {
unsigned int j;
@@ -2085,7 +2089,7 @@ found:
memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
c->sb.flags = ca->sb.flags;
c->sb.seq = ca->sb.seq;
- pr_debug("set version = %llu", c->sb.version);
+ pr_debug("set version = %llu\n", c->sb.version);
}
kobject_get(&ca->kobj);
@@ -2247,7 +2251,7 @@ err_btree_alloc:
err_free:
module_put(THIS_MODULE);
if (err)
- pr_notice("error %s: %s", ca->cache_dev_name, err);
+ pr_notice("error %s: %s\n", ca->cache_dev_name, err);
return ret;
}
@@ -2301,14 +2305,14 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto out;
}
- pr_info("registered cache device %s", ca->cache_dev_name);
+ pr_info("registered cache device %s\n", ca->cache_dev_name);
out:
kobject_put(&ca->kobj);
err:
if (err)
- pr_notice("error %s: %s", ca->cache_dev_name, err);
+ pr_notice("error %s: %s\n", ca->cache_dev_name, err);
return ret;
}
@@ -2323,6 +2327,7 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
kobj_attribute_write(register, register_bcache);
kobj_attribute_write(register_quiet, register_bcache);
+kobj_attribute_write(register_async, register_bcache);
kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup);
static bool bch_is_open_backing(struct block_device *bdev)
@@ -2358,6 +2363,83 @@ static bool bch_is_open(struct block_device *bdev)
return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
}
+struct async_reg_args {
+ struct work_struct reg_work;
+ char *path;
+ struct cache_sb *sb;
+ struct cache_sb_disk *sb_disk;
+ struct block_device *bdev;
+};
+
+static void register_bdev_worker(struct work_struct *work)
+{
+ int fail = false;
+ struct async_reg_args *args =
+ container_of(work, struct async_reg_args, reg_work);
+ struct cached_dev *dc;
+
+ dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+ if (!dc) {
+ fail = true;
+ put_page(virt_to_page(args->sb_disk));
+ blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ goto out;
+ }
+
+ mutex_lock(&bch_register_lock);
+ if (register_bdev(args->sb, args->sb_disk, args->bdev, dc) < 0)
+ fail = true;
+ mutex_unlock(&bch_register_lock);
+
+out:
+ if (fail)
+ pr_info("error %s: fail to register backing device\n",
+ args->path);
+ kfree(args->sb);
+ kfree(args->path);
+ kfree(args);
+ module_put(THIS_MODULE);
+}
+
+static void register_cache_worker(struct work_struct *work)
+{
+ int fail = false;
+ struct async_reg_args *args =
+ container_of(work, struct async_reg_args, reg_work);
+ struct cache *ca;
+
+ ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+ if (!ca) {
+ fail = true;
+ put_page(virt_to_page(args->sb_disk));
+ blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ goto out;
+ }
+
+ /* blkdev_put() will be called in bch_cache_release() */
+ if (register_cache(args->sb, args->sb_disk, args->bdev, ca) != 0)
+ fail = true;
+
+out:
+ if (fail)
+ pr_info("error %s: fail to register cache device\n",
+ args->path);
+ kfree(args->sb);
+ kfree(args->path);
+ kfree(args);
+ module_put(THIS_MODULE);
+}
+
+static void register_device_aync(struct async_reg_args *args)
+{
+ if (SB_IS_BDEV(args->sb))
+ INIT_WORK(&args->reg_work, register_bdev_worker);
+ else
+ INIT_WORK(&args->reg_work, register_cache_worker);
+
+ queue_work(system_wq, &args->reg_work);
+}
+
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
@@ -2420,6 +2502,26 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
goto out_blkdev_put;
err = "failed to register device";
+ if (attr == &ksysfs_register_async) {
+ /* register in asynchronous way */
+ struct async_reg_args *args =
+ kzalloc(sizeof(struct async_reg_args), GFP_KERNEL);
+
+ if (!args) {
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
+ goto out_put_sb_page;
+ }
+
+ args->path = path;
+ args->sb = sb;
+ args->sb_disk = sb_disk;
+ args->bdev = bdev;
+ register_device_aync(args);
+ /* No wait and returns to user space */
+ goto async_done;
+ }
+
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
@@ -2447,6 +2549,7 @@ done:
kfree(sb);
kfree(path);
module_put(THIS_MODULE);
+async_done:
return size;
out_put_sb_page:
@@ -2461,7 +2564,7 @@ out_free_path:
out_module_put:
module_put(THIS_MODULE);
out:
- pr_info("error %s: %s", path?path:"", err);
+ pr_info("error %s: %s\n", path?path:"", err);
return ret;
}
@@ -2506,7 +2609,7 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
mutex_unlock(&bch_register_lock);
list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
- pr_info("delete pdev %p", pdev);
+ pr_info("delete pdev %p\n", pdev);
list_del(&pdev->list);
bcache_device_stop(&pdev->dc->disk);
kfree(pdev);
@@ -2549,7 +2652,7 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
mutex_unlock(&bch_register_lock);
- pr_info("Stopping all devices:");
+ pr_info("Stopping all devices:\n");
/*
* The reason bch_register_lock is not held to call
@@ -2599,9 +2702,9 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
finish_wait(&unregister_wait, &wait);
if (stopped)
- pr_info("All devices stopped");
+ pr_info("All devices stopped\n");
else
- pr_notice("Timeout waiting for devices to be closed");
+ pr_notice("Timeout waiting for devices to be closed\n");
out:
mutex_unlock(&bch_register_lock);
}
@@ -2637,7 +2740,7 @@ static void check_module_parameters(void)
if (bch_cutoff_writeback_sync == 0)
bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC;
else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) {
- pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u",
+ pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n",
bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX);
bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX;
}
@@ -2645,13 +2748,13 @@ static void check_module_parameters(void)
if (bch_cutoff_writeback == 0)
bch_cutoff_writeback = CUTOFF_WRITEBACK;
else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) {
- pr_warn("set bch_cutoff_writeback (%u) to max value %u",
+ pr_warn("set bch_cutoff_writeback (%u) to max value %u\n",
bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX);
bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX;
}
if (bch_cutoff_writeback > bch_cutoff_writeback_sync) {
- pr_warn("set bch_cutoff_writeback (%u) to %u",
+ pr_warn("set bch_cutoff_writeback (%u) to %u\n",
bch_cutoff_writeback, bch_cutoff_writeback_sync);
bch_cutoff_writeback = bch_cutoff_writeback_sync;
}
@@ -2662,6 +2765,9 @@ static int __init bcache_init(void)
static const struct attribute *files[] = {
&ksysfs_register.attr,
&ksysfs_register_quiet.attr,
+#ifdef CONFIG_BCACHE_ASYNC_REGISTRAION
+ &ksysfs_register_async.attr,
+#endif
&ksysfs_pendings_cleanup.attr,
NULL
};
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 323276994aab..0dadec5a78f6 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -421,7 +421,7 @@ STORE(__cached_dev)
return size;
}
if (v == -ENOENT)
- pr_err("Can't attach %s: cache set not found", buf);
+ pr_err("Can't attach %s: cache set not found\n", buf);
return v;
}
@@ -455,7 +455,7 @@ STORE(bch_cached_dev)
*/
if (dc->writeback_running) {
dc->writeback_running = false;
- pr_err("%s: failed to run non-existent writeback thread",
+ pr_err("%s: failed to run non-existent writeback thread\n",
dc->disk.disk->disk_name);
}
} else
@@ -872,11 +872,11 @@ STORE(__bch_cache_set)
if (v) {
if (test_and_set_bit(CACHE_SET_IO_DISABLE,
&c->flags))
- pr_warn("CACHE_SET_IO_DISABLE already set");
+ pr_warn("CACHE_SET_IO_DISABLE already set\n");
} else {
if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
&c->flags))
- pr_warn("CACHE_SET_IO_DISABLE already cleared");
+ pr_warn("CACHE_SET_IO_DISABLE already cleared\n");
}
}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 3f7641fb28d5..1cf1e5016cb9 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -809,7 +809,7 @@ static int bch_root_node_dirty_init(struct cache_set *c,
schedule_timeout_interruptible(
msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
else if (ret < 0) {
- pr_warn("sectors dirty init failed, ret=%d!", ret);
+ pr_warn("sectors dirty init failed, ret=%d!\n", ret);
break;
}
} while (ret == -EAGAIN);
@@ -917,7 +917,7 @@ void bch_sectors_dirty_init(struct bcache_device *d)
state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
if (!state) {
- pr_warn("sectors dirty init failed: cannot allocate memory");
+ pr_warn("sectors dirty init failed: cannot allocate memory\n");
return;
}
@@ -945,7 +945,7 @@ void bch_sectors_dirty_init(struct bcache_device *d)
&state->infos[i],
name);
if (IS_ERR(state->infos[i].thread)) {
- pr_err("fails to run thread bch_dirty_init[%d]", i);
+ pr_err("fails to run thread bch_dirty_init[%d]\n", i);
for (--i; i >= 0; i--)
kthread_stop(state->infos[i].thread);
goto out;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 2d519c223562..6d1565021d74 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -256,12 +256,35 @@ static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
if (b->block == block)
return b;
- n = (b->block < block) ? n->rb_left : n->rb_right;
+ n = block < b->block ? n->rb_left : n->rb_right;
}
return NULL;
}
+static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
+{
+ struct rb_node *n = c->buffer_tree.rb_node;
+ struct dm_buffer *b;
+ struct dm_buffer *best = NULL;
+
+ while (n) {
+ b = container_of(n, struct dm_buffer, node);
+
+ if (b->block == block)
+ return b;
+
+ if (block <= b->block) {
+ n = n->rb_left;
+ best = b;
+ } else {
+ n = n->rb_right;
+ }
+ }
+
+ return best;
+}
+
static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
{
struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
@@ -276,8 +299,8 @@ static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
}
parent = *new;
- new = (found->block < b->block) ?
- &((*new)->rb_left) : &((*new)->rb_right);
+ new = b->block < found->block ?
+ &found->node.rb_left : &found->node.rb_right;
}
rb_link_node(&b->node, parent, new);
@@ -400,13 +423,13 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
*/
if (gfp_mask & __GFP_NORETRY) {
unsigned noio_flag = memalloc_noio_save();
- void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+ void *ptr = __vmalloc(c->block_size, gfp_mask);
memalloc_noio_restore(noio_flag);
return ptr;
}
- return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+ return __vmalloc(c->block_size, gfp_mask);
}
/*
@@ -631,6 +654,19 @@ dmio:
submit_bio(bio);
}
+static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
+{
+ sector_t sector;
+
+ if (likely(c->sectors_per_block_bits >= 0))
+ sector = block << c->sectors_per_block_bits;
+ else
+ sector = block * (c->block_size >> SECTOR_SHIFT);
+ sector += c->start;
+
+ return sector;
+}
+
static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
{
unsigned n_sectors;
@@ -639,11 +675,7 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff
b->end_io = end_io;
- if (likely(b->c->sectors_per_block_bits >= 0))
- sector = b->block << b->c->sectors_per_block_bits;
- else
- sector = b->block * (b->c->block_size >> SECTOR_SHIFT);
- sector += b->c->start;
+ sector = block_to_sector(b->c, b->block);
if (rw != REQ_OP_WRITE) {
n_sectors = b->c->block_size >> SECTOR_SHIFT;
@@ -1326,6 +1358,30 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
/*
+ * Use dm-io to send a discard request to flush the device.
+ */
+int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
+{
+ struct dm_io_request io_req = {
+ .bi_op = REQ_OP_DISCARD,
+ .bi_op_flags = REQ_SYNC,
+ .mem.type = DM_IO_KMEM,
+ .mem.ptr.addr = NULL,
+ .client = c->dm_io,
+ };
+ struct dm_io_region io_reg = {
+ .bdev = c->bdev,
+ .sector = block_to_sector(c, block),
+ .count = block_to_sector(c, count),
+ };
+
+ BUG_ON(dm_bufio_in_request());
+
+ return dm_io(&io_req, 1, &io_reg, NULL);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
+
+/*
* We first delete any other buffer that may be at that new location.
*
* Then, we write the buffer to the original location if it was dirty.
@@ -1401,6 +1457,14 @@ retry:
}
EXPORT_SYMBOL_GPL(dm_bufio_release_move);
+static void forget_buffer_locked(struct dm_buffer *b)
+{
+ if (likely(!b->hold_count) && likely(!b->state)) {
+ __unlink_buffer(b);
+ __free_buffer_wake(b);
+ }
+}
+
/*
* Free the given buffer.
*
@@ -1414,15 +1478,36 @@ void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
dm_bufio_lock(c);
b = __find(c, block);
- if (b && likely(!b->hold_count) && likely(!b->state)) {
- __unlink_buffer(b);
- __free_buffer_wake(b);
- }
+ if (b)
+ forget_buffer_locked(b);
dm_bufio_unlock(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_forget);
+void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
+{
+ struct dm_buffer *b;
+ sector_t end_block = block + n_blocks;
+
+ while (block < end_block) {
+ dm_bufio_lock(c);
+
+ b = __find_next(c, block);
+ if (b) {
+ block = b->block + 1;
+ forget_buffer_locked(b);
+ }
+
+ dm_bufio_unlock(c);
+
+ if (!b)
+ break;
+ }
+
+}
+EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
+
void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
{
c->minimum_buffers = n;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 3df90daba89e..000ddfab5ba0 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -34,7 +34,9 @@
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
+#include <linux/key-type.h>
#include <keys/user-type.h>
+#include <keys/encrypted-type.h>
#include <linux/device-mapper.h>
@@ -212,7 +214,7 @@ struct crypt_config {
struct mutex bio_alloc_lock;
u8 *authenc_key; /* space for keys in authenc() format (if used) */
- u8 key[0];
+ u8 key[];
};
#define MIN_IOS 64
@@ -2215,12 +2217,47 @@ static bool contains_whitespace(const char *str)
return false;
}
+static int set_key_user(struct crypt_config *cc, struct key *key)
+{
+ const struct user_key_payload *ukp;
+
+ ukp = user_key_payload_locked(key);
+ if (!ukp)
+ return -EKEYREVOKED;
+
+ if (cc->key_size != ukp->datalen)
+ return -EINVAL;
+
+ memcpy(cc->key, ukp->data, cc->key_size);
+
+ return 0;
+}
+
+#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
+static int set_key_encrypted(struct crypt_config *cc, struct key *key)
+{
+ const struct encrypted_key_payload *ekp;
+
+ ekp = key->payload.data[0];
+ if (!ekp)
+ return -EKEYREVOKED;
+
+ if (cc->key_size != ekp->decrypted_datalen)
+ return -EINVAL;
+
+ memcpy(cc->key, ekp->decrypted_data, cc->key_size);
+
+ return 0;
+}
+#endif /* CONFIG_ENCRYPTED_KEYS */
+
static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
{
char *new_key_string, *key_desc;
int ret;
+ struct key_type *type;
struct key *key;
- const struct user_key_payload *ukp;
+ int (*set_key)(struct crypt_config *cc, struct key *key);
/*
* Reject key_string with whitespace. dm core currently lacks code for
@@ -2236,16 +2273,26 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
return -EINVAL;
- if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
- strncmp(key_string, "user:", key_desc - key_string + 1))
+ if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
+ type = &key_type_logon;
+ set_key = set_key_user;
+ } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
+ type = &key_type_user;
+ set_key = set_key_user;
+#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
+ } else if (!strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
+ type = &key_type_encrypted;
+ set_key = set_key_encrypted;
+#endif
+ } else {
return -EINVAL;
+ }
new_key_string = kstrdup(key_string, GFP_KERNEL);
if (!new_key_string)
return -ENOMEM;
- key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
- key_desc + 1, NULL);
+ key = request_key(type, key_desc + 1, NULL);
if (IS_ERR(key)) {
kzfree(new_key_string);
return PTR_ERR(key);
@@ -2253,23 +2300,14 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
down_read(&key->sem);
- ukp = user_key_payload_locked(key);
- if (!ukp) {
- up_read(&key->sem);
- key_put(key);
- kzfree(new_key_string);
- return -EKEYREVOKED;
- }
-
- if (cc->key_size != ukp->datalen) {
+ ret = set_key(cc, key);
+ if (ret < 0) {
up_read(&key->sem);
key_put(key);
kzfree(new_key_string);
- return -EINVAL;
+ return ret;
}
- memcpy(cc->key, ukp->data, cc->key_size);
-
up_read(&key->sem);
key_put(key);
@@ -2323,7 +2361,7 @@ static int get_key_size(char **key_string)
return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
}
-#endif
+#endif /* CONFIG_KEYS */
static int crypt_set_key(struct crypt_config *cc, char *key)
{
@@ -3274,7 +3312,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_segment_size = PAGE_SIZE;
limits->logical_block_size =
- max_t(unsigned short, limits->logical_block_size, cc->sector_size);
+ max_t(unsigned, limits->logical_block_size, cc->sector_size);
limits->physical_block_size =
max_t(unsigned, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
@@ -3282,7 +3320,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 20, 0},
+ .version = {1, 21, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
new file mode 100644
index 000000000000..44451276f128
--- /dev/null
+++ b/drivers/md/dm-ebs-target.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2020 Red Hat GmbH
+ *
+ * This file is released under the GPL.
+ *
+ * Device-mapper target to emulate smaller logical block
+ * size on backing devices exposing (natively) larger ones.
+ *
+ * E.g. 512 byte sector emulation on 4K native disks.
+ */
+
+#include "dm.h"
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/dm-bufio.h>
+
+#define DM_MSG_PREFIX "ebs"
+
+static void ebs_dtr(struct dm_target *ti);
+
+/* Emulated block size context. */
+struct ebs_c {
+ struct dm_dev *dev; /* Underlying device to emulate block size on. */
+ struct dm_bufio_client *bufio; /* Use dm-bufio for read and read-modify-write processing. */
+ struct workqueue_struct *wq; /* Workqueue for ^ processing of bios. */
+ struct work_struct ws; /* Work item used for ^. */
+ struct bio_list bios_in; /* Worker bios input list. */
+ spinlock_t lock; /* Guard bios input list above. */
+ sector_t start; /* <start> table line argument, see ebs_ctr below. */
+ unsigned int e_bs; /* Emulated block size in sectors exposed to upper layer. */
+ unsigned int u_bs; /* Underlying block size in sectors retrievd from/set on lower layer device. */
+ unsigned char block_shift; /* bitshift sectors -> blocks used in dm-bufio API. */
+ bool u_bs_set:1; /* Flag to indicate underlying block size is set on table line. */
+};
+
+static inline sector_t __sector_to_block(struct ebs_c *ec, sector_t sector)
+{
+ return sector >> ec->block_shift;
+}
+
+static inline sector_t __block_mod(sector_t sector, unsigned int bs)
+{
+ return sector & (bs - 1);
+}
+
+/* Return number of blocks for a bio, accounting for misalignement of start and end sectors. */
+static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio)
+{
+ sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
+
+ return __sector_to_block(ec, end_sector) + (__block_mod(end_sector, ec->u_bs) ? 1 : 0);
+}
+
+static inline bool __ebs_check_bs(unsigned int bs)
+{
+ return bs && is_power_of_2(bs);
+}
+
+/*
+ * READ/WRITE:
+ *
+ * copy blocks between bufio blocks and bio vector's (partial/overlapping) pages.
+ */
+static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bvec_iter *iter)
+{
+ int r = 0;
+ unsigned char *ba, *pa;
+ unsigned int cur_len;
+ unsigned int bv_len = bv->bv_len;
+ unsigned int buf_off = to_bytes(__block_mod(iter->bi_sector, ec->u_bs));
+ sector_t block = __sector_to_block(ec, iter->bi_sector);
+ struct dm_buffer *b;
+
+ if (unlikely(!bv->bv_page || !bv_len))
+ return -EIO;
+
+ pa = page_address(bv->bv_page) + bv->bv_offset;
+
+ /* Handle overlapping page <-> blocks */
+ while (bv_len) {
+ cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len);
+
+ /* Avoid reading for writes in case bio vector's page overwrites block completely. */
+ if (rw == READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio))
+ ba = dm_bufio_read(ec->bufio, block, &b);
+ else
+ ba = dm_bufio_new(ec->bufio, block, &b);
+
+ if (unlikely(IS_ERR(ba))) {
+ /*
+ * Carry on with next buffer, if any, to issue all possible
+ * data but return error.
+ */
+ r = PTR_ERR(ba);
+ } else {
+ /* Copy data to/from bio to buffer if read/new was successful above. */
+ ba += buf_off;
+ if (rw == READ) {
+ memcpy(pa, ba, cur_len);
+ flush_dcache_page(bv->bv_page);
+ } else {
+ flush_dcache_page(bv->bv_page);
+ memcpy(ba, pa, cur_len);
+ dm_bufio_mark_partial_buffer_dirty(b, buf_off, buf_off + cur_len);
+ }
+
+ dm_bufio_release(b);
+ }
+
+ pa += cur_len;
+ bv_len -= cur_len;
+ buf_off = 0;
+ block++;
+ }
+
+ return r;
+}
+
+/* READ/WRITE: iterate bio vector's copying between (partial) pages and bufio blocks. */
+static int __ebs_rw_bio(struct ebs_c *ec, int rw, struct bio *bio)
+{
+ int r = 0, rr;
+ struct bio_vec bv;
+ struct bvec_iter iter;
+
+ bio_for_each_bvec(bv, bio, iter) {
+ rr = __ebs_rw_bvec(ec, rw, &bv, &iter);
+ if (rr)
+ r = rr;
+ }
+
+ return r;
+}
+
+/*
+ * Discard bio's blocks, i.e. pass discards down.
+ *
+ * Avoid discarding partial blocks at beginning and end;
+ * return 0 in case no blocks can be discarded as a result.
+ */
+static int __ebs_discard_bio(struct ebs_c *ec, struct bio *bio)
+{
+ sector_t block, blocks, sector = bio->bi_iter.bi_sector;
+
+ block = __sector_to_block(ec, sector);
+ blocks = __nr_blocks(ec, bio);
+
+ /*
+ * Partial first underlying block (__nr_blocks() may have
+ * resulted in one block).
+ */
+ if (__block_mod(sector, ec->u_bs)) {
+ block++;
+ blocks--;
+ }
+
+ /* Partial last underlying block if any. */
+ if (blocks && __block_mod(bio_end_sector(bio), ec->u_bs))
+ blocks--;
+
+ return blocks ? dm_bufio_issue_discard(ec->bufio, block, blocks) : 0;
+}
+
+/* Release blocks them from the bufio cache. */
+static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio)
+{
+ sector_t blocks, sector = bio->bi_iter.bi_sector;
+
+ blocks = __nr_blocks(ec, bio);
+
+ dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks);
+}
+
+/* Worker funtion to process incoming bios. */
+static void __ebs_process_bios(struct work_struct *ws)
+{
+ int r;
+ bool write = false;
+ sector_t block1, block2;
+ struct ebs_c *ec = container_of(ws, struct ebs_c, ws);
+ struct bio *bio;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
+
+ spin_lock_irq(&ec->lock);
+ bios = ec->bios_in;
+ bio_list_init(&ec->bios_in);
+ spin_unlock_irq(&ec->lock);
+
+ /* Prefetch all read and any mis-aligned write buffers */
+ bio_list_for_each(bio, &bios) {
+ block1 = __sector_to_block(ec, bio->bi_iter.bi_sector);
+ if (bio_op(bio) == REQ_OP_READ)
+ dm_bufio_prefetch(ec->bufio, block1, __nr_blocks(ec, bio));
+ else if (bio_op(bio) == REQ_OP_WRITE && !(bio->bi_opf & REQ_PREFLUSH)) {
+ block2 = __sector_to_block(ec, bio_end_sector(bio));
+ if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs))
+ dm_bufio_prefetch(ec->bufio, block1, 1);
+ if (__block_mod(bio_end_sector(bio), ec->u_bs) && block2 != block1)
+ dm_bufio_prefetch(ec->bufio, block2, 1);
+ }
+ }
+
+ bio_list_for_each(bio, &bios) {
+ r = -EIO;
+ if (bio_op(bio) == REQ_OP_READ)
+ r = __ebs_rw_bio(ec, READ, bio);
+ else if (bio_op(bio) == REQ_OP_WRITE) {
+ write = true;
+ r = __ebs_rw_bio(ec, WRITE, bio);
+ } else if (bio_op(bio) == REQ_OP_DISCARD) {
+ __ebs_forget_bio(ec, bio);
+ r = __ebs_discard_bio(ec, bio);
+ }
+
+ if (r < 0)
+ bio->bi_status = errno_to_blk_status(r);
+ }
+
+ /*
+ * We write dirty buffers after processing I/O on them
+ * but before we endio thus addressing REQ_FUA/REQ_SYNC.
+ */
+ r = write ? dm_bufio_write_dirty_buffers(ec->bufio) : 0;
+
+ while ((bio = bio_list_pop(&bios))) {
+ /* Any other request is endioed. */
+ if (unlikely(r && bio_op(bio) == REQ_OP_WRITE))
+ bio_io_error(bio);
+ else
+ bio_endio(bio);
+ }
+}
+
+/*
+ * Construct an emulated block size mapping: <dev_path> <offset> <ebs> [<ubs>]
+ *
+ * <dev_path>: path of the underlying device
+ * <offset>: offset in 512 bytes sectors into <dev_path>
+ * <ebs>: emulated block size in units of 512 bytes exposed to the upper layer
+ * [<ubs>]: underlying block size in units of 512 bytes imposed on the lower layer;
+ * optional, if not supplied, retrieve logical block size from underlying device
+ */
+static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ int r;
+ unsigned short tmp1;
+ unsigned long long tmp;
+ char dummy;
+ struct ebs_c *ec;
+
+ if (argc < 3 || argc > 4) {
+ ti->error = "Invalid argument count";
+ return -EINVAL;
+ }
+
+ ec = ti->private = kzalloc(sizeof(*ec), GFP_KERNEL);
+ if (!ec) {
+ ti->error = "Cannot allocate ebs context";
+ return -ENOMEM;
+ }
+
+ r = -EINVAL;
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 ||
+ tmp != (sector_t)tmp ||
+ (sector_t)tmp >= ti->len) {
+ ti->error = "Invalid device offset sector";
+ goto bad;
+ }
+ ec->start = tmp;
+
+ if (sscanf(argv[2], "%hu%c", &tmp1, &dummy) != 1 ||
+ !__ebs_check_bs(tmp1) ||
+ to_bytes(tmp1) > PAGE_SIZE) {
+ ti->error = "Invalid emulated block size";
+ goto bad;
+ }
+ ec->e_bs = tmp1;
+
+ if (argc > 3) {
+ if (sscanf(argv[3], "%hu%c", &tmp1, &dummy) != 1 || !__ebs_check_bs(tmp1)) {
+ ti->error = "Invalid underlying block size";
+ goto bad;
+ }
+ ec->u_bs = tmp1;
+ ec->u_bs_set = true;
+ } else
+ ec->u_bs_set = false;
+
+ r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ec->dev);
+ if (r) {
+ ti->error = "Device lookup failed";
+ ec->dev = NULL;
+ goto bad;
+ }
+
+ r = -EINVAL;
+ if (!ec->u_bs_set) {
+ ec->u_bs = to_sector(bdev_logical_block_size(ec->dev->bdev));
+ if (!__ebs_check_bs(ec->u_bs)) {
+ ti->error = "Invalid retrieved underlying block size";
+ goto bad;
+ }
+ }
+
+ if (!ec->u_bs_set && ec->e_bs == ec->u_bs)
+ DMINFO("Emulation superfluous: emulated equal to underlying block size");
+
+ if (__block_mod(ec->start, ec->u_bs)) {
+ ti->error = "Device offset must be multiple of underlying block size";
+ goto bad;
+ }
+
+ ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1, 0, NULL, NULL);
+ if (IS_ERR(ec->bufio)) {
+ ti->error = "Cannot create dm bufio client";
+ r = PTR_ERR(ec->bufio);
+ ec->bufio = NULL;
+ goto bad;
+ }
+
+ ec->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+ if (!ec->wq) {
+ ti->error = "Cannot create dm-" DM_MSG_PREFIX " workqueue";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ ec->block_shift = __ffs(ec->u_bs);
+ INIT_WORK(&ec->ws, &__ebs_process_bios);
+ bio_list_init(&ec->bios_in);
+ spin_lock_init(&ec->lock);
+
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
+ ti->num_secure_erase_bios = 0;
+ ti->num_write_same_bios = 0;
+ ti->num_write_zeroes_bios = 0;
+ return 0;
+bad:
+ ebs_dtr(ti);
+ return r;
+}
+
+static void ebs_dtr(struct dm_target *ti)
+{
+ struct ebs_c *ec = ti->private;
+
+ if (ec->wq)
+ destroy_workqueue(ec->wq);
+ if (ec->bufio)
+ dm_bufio_client_destroy(ec->bufio);
+ if (ec->dev)
+ dm_put_device(ti, ec->dev);
+ kfree(ec);
+}
+
+static int ebs_map(struct dm_target *ti, struct bio *bio)
+{
+ struct ebs_c *ec = ti->private;
+
+ bio_set_dev(bio, ec->dev->bdev);
+ bio->bi_iter.bi_sector = ec->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
+
+ if (unlikely(bio->bi_opf & REQ_OP_FLUSH))
+ return DM_MAPIO_REMAPPED;
+ /*
+ * Only queue for bufio processing in case of partial or overlapping buffers
+ * -or-
+ * emulation with ebs == ubs aiming for tests of dm-bufio overhead.
+ */
+ if (likely(__block_mod(bio->bi_iter.bi_sector, ec->u_bs) ||
+ __block_mod(bio_end_sector(bio), ec->u_bs) ||
+ ec->e_bs == ec->u_bs)) {
+ spin_lock_irq(&ec->lock);
+ bio_list_add(&ec->bios_in, bio);
+ spin_unlock_irq(&ec->lock);
+
+ queue_work(ec->wq, &ec->ws);
+
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /* Forget any buffer content relative to this direct backing device I/O. */
+ __ebs_forget_bio(ec, bio);
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static void ebs_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
+{
+ struct ebs_c *ec = ti->private;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ *result = '\0';
+ break;
+ case STATUSTYPE_TABLE:
+ snprintf(result, maxlen, ec->u_bs_set ? "%s %llu %u %u" : "%s %llu %u",
+ ec->dev->name, (unsigned long long) ec->start, ec->e_bs, ec->u_bs);
+ break;
+ }
+}
+
+static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+{
+ struct ebs_c *ec = ti->private;
+ struct dm_dev *dev = ec->dev;
+
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ *bdev = dev->bdev;
+ return !!(ec->start || ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT);
+}
+
+static void ebs_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct ebs_c *ec = ti->private;
+
+ limits->logical_block_size = to_bytes(ec->e_bs);
+ limits->physical_block_size = to_bytes(ec->u_bs);
+ limits->alignment_offset = limits->physical_block_size;
+ blk_limits_io_min(limits, limits->logical_block_size);
+}
+
+static int ebs_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct ebs_c *ec = ti->private;
+
+ return fn(ti, ec->dev, ec->start, ti->len, data);
+}
+
+static struct target_type ebs_target = {
+ .name = "ebs",
+ .version = {1, 0, 1},
+ .features = DM_TARGET_PASSES_INTEGRITY,
+ .module = THIS_MODULE,
+ .ctr = ebs_ctr,
+ .dtr = ebs_dtr,
+ .map = ebs_map,
+ .status = ebs_status,
+ .io_hints = ebs_io_hints,
+ .prepare_ioctl = ebs_prepare_ioctl,
+ .iterate_devices = ebs_iterate_devices,
+};
+
+static int __init dm_ebs_init(void)
+{
+ int r = dm_register_target(&ebs_target);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ return r;
+}
+
+static void dm_ebs_exit(void)
+{
+ dm_unregister_target(&ebs_target);
+}
+
+module_init(dm_ebs_init);
+module_exit(dm_ebs_exit);
+
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_DESCRIPTION(DM_NAME " emulated block size target");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-historical-service-time.c b/drivers/md/dm-historical-service-time.c
new file mode 100644
index 000000000000..186f91e2752c
--- /dev/null
+++ b/drivers/md/dm-historical-service-time.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Historical Service Time
+ *
+ * Keeps a time-weighted exponential moving average of the historical
+ * service time. Estimates future service time based on the historical
+ * service time and the number of outstanding requests.
+ *
+ * Marks paths stale if they have not finished within hst *
+ * num_paths. If a path is stale and unused, we will send a single
+ * request to probe in case the path has improved. This situation
+ * generally arises if the path is so much worse than others that it
+ * will never have the best estimated service time, or if the entire
+ * multipath device is unused. If a path is stale and in use, limit the
+ * number of requests it can receive with the assumption that the path
+ * has become degraded.
+ *
+ * To avoid repeatedly calculating exponents for time weighting, times
+ * are split into HST_WEIGHT_COUNT buckets each (1 >> HST_BUCKET_SHIFT)
+ * ns, and the weighting is pre-calculated.
+ *
+ */
+
+#include "dm.h"
+#include "dm-path-selector.h"
+
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+
+#define DM_MSG_PREFIX "multipath historical-service-time"
+#define HST_MIN_IO 1
+#define HST_VERSION "0.1.1"
+
+#define HST_FIXED_SHIFT 10 /* 10 bits of decimal precision */
+#define HST_FIXED_MAX (ULLONG_MAX >> HST_FIXED_SHIFT)
+#define HST_FIXED_1 (1 << HST_FIXED_SHIFT)
+#define HST_FIXED_95 972
+#define HST_MAX_INFLIGHT HST_FIXED_1
+#define HST_BUCKET_SHIFT 24 /* Buckets are ~ 16ms */
+#define HST_WEIGHT_COUNT 64ULL
+
+struct selector {
+ struct list_head valid_paths;
+ struct list_head failed_paths;
+ int valid_count;
+ spinlock_t lock;
+
+ unsigned int weights[HST_WEIGHT_COUNT];
+ unsigned int threshold_multiplier;
+};
+
+struct path_info {
+ struct list_head list;
+ struct dm_path *path;
+ unsigned int repeat_count;
+
+ spinlock_t lock;
+
+ u64 historical_service_time; /* Fixed point */
+
+ u64 stale_after;
+ u64 last_finish;
+
+ u64 outstanding;
+};
+
+/**
+ * fixed_power - compute: x^n, in O(log n) time
+ *
+ * @x: base of the power
+ * @frac_bits: fractional bits of @x
+ * @n: power to raise @x to.
+ *
+ * By exploiting the relation between the definition of the natural power
+ * function: x^n := x*x*...*x (x multiplied by itself for n times), and
+ * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
+ * (where: n_i \elem {0, 1}, the binary vector representing n),
+ * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
+ * of course trivially computable in O(log_2 n), the length of our binary
+ * vector.
+ *
+ * (see: kernel/sched/loadavg.c)
+ */
+static u64 fixed_power(u64 x, unsigned int frac_bits, unsigned int n)
+{
+ unsigned long result = 1UL << frac_bits;
+
+ if (n) {
+ for (;;) {
+ if (n & 1) {
+ result *= x;
+ result += 1UL << (frac_bits - 1);
+ result >>= frac_bits;
+ }
+ n >>= 1;
+ if (!n)
+ break;
+ x *= x;
+ x += 1UL << (frac_bits - 1);
+ x >>= frac_bits;
+ }
+ }
+
+ return result;
+}
+
+/*
+ * Calculate the next value of an exponential moving average
+ * a_1 = a_0 * e + a * (1 - e)
+ *
+ * @last: [0, ULLONG_MAX >> HST_FIXED_SHIFT]
+ * @next: [0, ULLONG_MAX >> HST_FIXED_SHIFT]
+ * @weight: [0, HST_FIXED_1]
+ *
+ * Note:
+ * To account for multiple periods in the same calculation,
+ * a_n = a_0 * e^n + a * (1 - e^n),
+ * so call fixed_ema(last, next, pow(weight, N))
+ */
+static u64 fixed_ema(u64 last, u64 next, u64 weight)
+{
+ last *= weight;
+ last += next * (HST_FIXED_1 - weight);
+ last += 1ULL << (HST_FIXED_SHIFT - 1);
+ return last >> HST_FIXED_SHIFT;
+}
+
+static struct selector *alloc_selector(void)
+{
+ struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+ if (s) {
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->failed_paths);
+ spin_lock_init(&s->lock);
+ s->valid_count = 0;
+ }
+
+ return s;
+}
+
+/*
+ * Get the weight for a given time span.
+ */
+static u64 hst_weight(struct path_selector *ps, u64 delta)
+{
+ struct selector *s = ps->context;
+ int bucket = clamp(delta >> HST_BUCKET_SHIFT, 0ULL,
+ HST_WEIGHT_COUNT - 1);
+
+ return s->weights[bucket];
+}
+
+/*
+ * Set up the weights array.
+ *
+ * weights[len-1] = 0
+ * weights[n] = base ^ (n + 1)
+ */
+static void hst_set_weights(struct path_selector *ps, unsigned int base)
+{
+ struct selector *s = ps->context;
+ int i;
+
+ if (base >= HST_FIXED_1)
+ return;
+
+ for (i = 0; i < HST_WEIGHT_COUNT - 1; i++)
+ s->weights[i] = fixed_power(base, HST_FIXED_SHIFT, i + 1);
+ s->weights[HST_WEIGHT_COUNT - 1] = 0;
+}
+
+static int hst_create(struct path_selector *ps, unsigned int argc, char **argv)
+{
+ struct selector *s;
+ unsigned int base_weight = HST_FIXED_95;
+ unsigned int threshold_multiplier = 0;
+ char dummy;
+
+ /*
+ * Arguments: [<base_weight> [<threshold_multiplier>]]
+ * <base_weight>: Base weight for ema [0, 1024) 10-bit fixed point. A
+ * value of 0 will completely ignore any history.
+ * If not given, default (HST_FIXED_95) is used.
+ * <threshold_multiplier>: Minimum threshold multiplier for paths to
+ * be considered different. That is, a path is
+ * considered different iff (p1 > N * p2) where p1
+ * is the path with higher service time. A threshold
+ * of 1 or 0 has no effect. Defaults to 0.
+ */
+ if (argc > 2)
+ return -EINVAL;
+
+ if (argc && (sscanf(argv[0], "%u%c", &base_weight, &dummy) != 1 ||
+ base_weight >= HST_FIXED_1)) {
+ return -EINVAL;
+ }
+
+ if (argc > 1 && (sscanf(argv[1], "%u%c",
+ &threshold_multiplier, &dummy) != 1)) {
+ return -EINVAL;
+ }
+
+ s = alloc_selector();
+ if (!s)
+ return -ENOMEM;
+
+ ps->context = s;
+
+ hst_set_weights(ps, base_weight);
+ s->threshold_multiplier = threshold_multiplier;
+ return 0;
+}
+
+static void free_paths(struct list_head *paths)
+{
+ struct path_info *pi, *next;
+
+ list_for_each_entry_safe(pi, next, paths, list) {
+ list_del(&pi->list);
+ kfree(pi);
+ }
+}
+
+static void hst_destroy(struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+
+ free_paths(&s->valid_paths);
+ free_paths(&s->failed_paths);
+ kfree(s);
+ ps->context = NULL;
+}
+
+static int hst_status(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned int maxlen)
+{
+ unsigned int sz = 0;
+ struct path_info *pi;
+
+ if (!path) {
+ struct selector *s = ps->context;
+
+ DMEMIT("2 %u %u ", s->weights[0], s->threshold_multiplier);
+ } else {
+ pi = path->pscontext;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%llu %llu %llu ", pi->historical_service_time,
+ pi->outstanding, pi->stale_after);
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("0 ");
+ break;
+ }
+ }
+
+ return sz;
+}
+
+static int hst_add_path(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi;
+ unsigned int repeat_count = HST_MIN_IO;
+ char dummy;
+ unsigned long flags;
+
+ /*
+ * Arguments: [<repeat_count>]
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (HST_MIN_IO) is used.
+ */
+ if (argc > 1) {
+ *error = "historical-service-time ps: incorrect number of arguments";
+ return -EINVAL;
+ }
+
+ if (argc && (sscanf(argv[0], "%u%c", &repeat_count, &dummy) != 1)) {
+ *error = "historical-service-time ps: invalid repeat count";
+ return -EINVAL;
+ }
+
+ /* allocate the path */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi) {
+ *error = "historical-service-time ps: Error allocating path context";
+ return -ENOMEM;
+ }
+
+ pi->path = path;
+ pi->repeat_count = repeat_count;
+
+ pi->historical_service_time = HST_FIXED_1;
+
+ spin_lock_init(&pi->lock);
+ pi->outstanding = 0;
+
+ pi->stale_after = 0;
+ pi->last_finish = 0;
+
+ path->pscontext = pi;
+
+ spin_lock_irqsave(&s->lock, flags);
+ list_add_tail(&pi->list, &s->valid_paths);
+ s->valid_count++;
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return 0;
+}
+
+static void hst_fail_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+ list_move(&pi->list, &s->failed_paths);
+ s->valid_count--;
+ spin_unlock_irqrestore(&s->lock, flags);
+}
+
+static int hst_reinstate_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+ list_move_tail(&pi->list, &s->valid_paths);
+ s->valid_count++;
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return 0;
+}
+
+static void hst_fill_compare(struct path_info *pi, u64 *hst,
+ u64 *out, u64 *stale)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pi->lock, flags);
+ *hst = pi->historical_service_time;
+ *out = pi->outstanding;
+ *stale = pi->stale_after;
+ spin_unlock_irqrestore(&pi->lock, flags);
+}
+
+/*
+ * Compare the estimated service time of 2 paths, pi1 and pi2,
+ * for the incoming I/O.
+ *
+ * Returns:
+ * < 0 : pi1 is better
+ * 0 : no difference between pi1 and pi2
+ * > 0 : pi2 is better
+ *
+ */
+static long long hst_compare(struct path_info *pi1, struct path_info *pi2,
+ u64 time_now, struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+ u64 hst1, hst2;
+ long long out1, out2, stale1, stale2;
+ int pi2_better, over_threshold;
+
+ hst_fill_compare(pi1, &hst1, &out1, &stale1);
+ hst_fill_compare(pi2, &hst2, &out2, &stale2);
+
+ /* Check here if estimated latency for two paths are too similar.
+ * If this is the case, we skip extra calculation and just compare
+ * outstanding requests. In this case, any unloaded paths will
+ * be preferred.
+ */
+ if (hst1 > hst2)
+ over_threshold = hst1 > (s->threshold_multiplier * hst2);
+ else
+ over_threshold = hst2 > (s->threshold_multiplier * hst1);
+
+ if (!over_threshold)
+ return out1 - out2;
+
+ /*
+ * If an unloaded path is stale, choose it. If both paths are unloaded,
+ * choose path that is the most stale.
+ * (If one path is loaded, choose the other)
+ */
+ if ((!out1 && stale1 < time_now) || (!out2 && stale2 < time_now) ||
+ (!out1 && !out2))
+ return (!out2 * stale1) - (!out1 * stale2);
+
+ /* Compare estimated service time. If outstanding is the same, we
+ * don't need to multiply
+ */
+ if (out1 == out2) {
+ pi2_better = hst1 > hst2;
+ } else {
+ /* Potential overflow with out >= 1024 */
+ if (unlikely(out1 >= HST_MAX_INFLIGHT ||
+ out2 >= HST_MAX_INFLIGHT)) {
+ /* If over 1023 in-flights, we may overflow if hst
+ * is at max. (With this shift we still overflow at
+ * 1048576 in-flights, which is high enough).
+ */
+ hst1 >>= HST_FIXED_SHIFT;
+ hst2 >>= HST_FIXED_SHIFT;
+ }
+ pi2_better = (1 + out1) * hst1 > (1 + out2) * hst2;
+ }
+
+ /* In the case that the 'winner' is stale, limit to equal usage. */
+ if (pi2_better) {
+ if (stale2 < time_now)
+ return out1 - out2;
+ return 1;
+ }
+ if (stale1 < time_now)
+ return out1 - out2;
+ return -1;
+}
+
+static struct dm_path *hst_select_path(struct path_selector *ps,
+ size_t nr_bytes)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = NULL, *best = NULL;
+ u64 time_now = sched_clock();
+ struct dm_path *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+ if (list_empty(&s->valid_paths))
+ goto out;
+
+ list_for_each_entry(pi, &s->valid_paths, list) {
+ if (!best || (hst_compare(pi, best, time_now, ps) < 0))
+ best = pi;
+ }
+
+ if (!best)
+ goto out;
+
+ /* Move last used path to end (least preferred in case of ties) */
+ list_move_tail(&best->list, &s->valid_paths);
+
+ ret = best->path;
+
+out:
+ spin_unlock_irqrestore(&s->lock, flags);
+ return ret;
+}
+
+static int hst_start_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pi->lock, flags);
+ pi->outstanding++;
+ spin_unlock_irqrestore(&pi->lock, flags);
+
+ return 0;
+}
+
+static u64 path_service_time(struct path_info *pi, u64 start_time)
+{
+ u64 sched_now = ktime_get_ns();
+
+ /* if a previous disk request has finished after this IO was
+ * sent to the hardware, pretend the submission happened
+ * serially.
+ */
+ if (time_after64(pi->last_finish, start_time))
+ start_time = pi->last_finish;
+
+ pi->last_finish = sched_now;
+ if (time_before64(sched_now, start_time))
+ return 0;
+
+ return sched_now - start_time;
+}
+
+static int hst_end_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes, u64 start_time)
+{
+ struct path_info *pi = path->pscontext;
+ struct selector *s = ps->context;
+ unsigned long flags;
+ u64 st;
+
+ spin_lock_irqsave(&pi->lock, flags);
+
+ st = path_service_time(pi, start_time);
+ pi->outstanding--;
+ pi->historical_service_time =
+ fixed_ema(pi->historical_service_time,
+ min(st * HST_FIXED_1, HST_FIXED_MAX),
+ hst_weight(ps, st));
+
+ /*
+ * On request end, mark path as fresh. If a path hasn't
+ * finished any requests within the fresh period, the estimated
+ * service time is considered too optimistic and we limit the
+ * maximum requests on that path.
+ */
+ pi->stale_after = pi->last_finish +
+ (s->valid_count * (pi->historical_service_time >> HST_FIXED_SHIFT));
+
+ spin_unlock_irqrestore(&pi->lock, flags);
+
+ return 0;
+}
+
+static struct path_selector_type hst_ps = {
+ .name = "historical-service-time",
+ .module = THIS_MODULE,
+ .table_args = 1,
+ .info_args = 3,
+ .create = hst_create,
+ .destroy = hst_destroy,
+ .status = hst_status,
+ .add_path = hst_add_path,
+ .fail_path = hst_fail_path,
+ .reinstate_path = hst_reinstate_path,
+ .select_path = hst_select_path,
+ .start_io = hst_start_io,
+ .end_io = hst_end_io,
+};
+
+static int __init dm_hst_init(void)
+{
+ int r = dm_register_path_selector(&hst_ps);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ DMINFO("version " HST_VERSION " loaded");
+
+ return r;
+}
+
+static void __exit dm_hst_exit(void)
+{
+ int r = dm_unregister_path_selector(&hst_ps);
+
+ if (r < 0)
+ DMERR("unregister failed %d", r);
+}
+
+module_init(dm_hst_init);
+module_exit(dm_hst_exit);
+
+MODULE_DESCRIPTION(DM_NAME " measured service time oriented path selector");
+MODULE_AUTHOR("Khazhismel Kumykov <khazhy@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 4094c47eca7f..81dc5ff08909 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -92,7 +92,7 @@ struct journal_entry {
} s;
__u64 sector;
} u;
- commit_id_t last_bytes[0];
+ commit_id_t last_bytes[];
/* __u8 tag[0]; */
};
@@ -1553,8 +1553,6 @@ static void integrity_metadata(struct work_struct *w)
char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
sector_t sector;
unsigned sectors_to_process;
- sector_t save_metadata_block;
- unsigned save_metadata_offset;
if (unlikely(ic->mode == 'R'))
goto skip_io;
@@ -1605,8 +1603,6 @@ static void integrity_metadata(struct work_struct *w)
goto skip_io;
}
- save_metadata_block = dio->metadata_block;
- save_metadata_offset = dio->metadata_offset;
sector = dio->range.logical_sector;
sectors_to_process = dio->range.n_sectors;
@@ -2657,7 +2653,7 @@ static void bitmap_flush_work(struct work_struct *work)
dm_integrity_flush_buffers(ic);
if (ic->meta_dev)
- blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
+ blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
limit = ic->provided_data_sectors;
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 8ea20b56b4d6..e3d35c6c9f71 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -127,7 +127,7 @@ struct pending_block {
char *data;
u32 datalen;
struct list_head list;
- struct bio_vec vecs[0];
+ struct bio_vec vecs[];
};
struct per_bio_data {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3e500098132f..78cff42d987e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -439,7 +439,7 @@ failed:
}
/*
- * dm_report_EIO() is a macro instead of a function to make pr_debug()
+ * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
* report the function name and line number of the function from which
* it has been invoked.
*/
@@ -447,43 +447,25 @@ failed:
do { \
struct mapped_device *md = dm_table_get_md((m)->ti->table); \
\
- pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
- dm_device_name(md), \
- test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
- test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
- dm_noflush_suspending((m)->ti)); \
+ DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
+ dm_device_name(md), \
+ test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
+ dm_noflush_suspending((m)->ti)); \
} while (0)
/*
* Check whether bios must be queued in the device-mapper core rather
* than here in the target.
- *
- * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
- * the same value then we are not between multipath_presuspend()
- * and multipath_resume() calls and we have no need to check
- * for the DMF_NOFLUSH_SUSPENDING flag.
*/
-static bool __must_push_back(struct multipath *m, unsigned long flags)
+static bool __must_push_back(struct multipath *m)
{
- return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
- test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
- dm_noflush_suspending(m->ti));
+ return dm_noflush_suspending(m->ti);
}
-/*
- * Following functions use READ_ONCE to get atomic access to
- * all m->flags to avoid taking spinlock
- */
static bool must_push_back_rq(struct multipath *m)
{
- unsigned long flags = READ_ONCE(m->flags);
- return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
-}
-
-static bool must_push_back_bio(struct multipath *m)
-{
- unsigned long flags = READ_ONCE(m->flags);
- return __must_push_back(m, flags);
+ return test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m);
}
/*
@@ -567,7 +549,8 @@ static void multipath_release_clone(struct request *clone,
if (pgpath && pgpath->pg->ps.type->end_io)
pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
&pgpath->path,
- mpio->nr_bytes);
+ mpio->nr_bytes,
+ clone->io_start_time_ns);
}
blk_put_request(clone);
@@ -619,7 +602,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio,
return DM_MAPIO_SUBMITTED;
if (!pgpath) {
- if (must_push_back_bio(m))
+ if (__must_push_back(m))
return DM_MAPIO_REQUEUE;
dm_report_EIO(m);
return DM_MAPIO_KILL;
@@ -709,15 +692,38 @@ static void process_queued_bios(struct work_struct *work)
* If we run out of usable paths, should we queue I/O or error it?
*/
static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
- bool save_old_value)
+ bool save_old_value, const char *caller)
{
unsigned long flags;
+ bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
+ const char *dm_dev_name = dm_device_name(dm_table_get_md(m->ti->table));
+
+ DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
+ dm_dev_name, __func__, caller, queue_if_no_path, save_old_value);
spin_lock_irqsave(&m->lock, flags);
- assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
- (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
- (!save_old_value && queue_if_no_path));
+
+ queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+
+ if (save_old_value) {
+ if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
+ DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
+ dm_dev_name);
+ } else
+ assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
+ } else if (!queue_if_no_path && saved_queue_if_no_path_bit) {
+ /* due to "fail_if_no_path" message, need to honor it. */
+ clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ }
assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
+
+ DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
+ dm_dev_name, __func__,
+ test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
+ dm_noflush_suspending(m->ti));
+
spin_unlock_irqrestore(&m->lock, flags);
if (!queue_if_no_path) {
@@ -738,7 +744,7 @@ static void queue_if_no_path_timeout_work(struct timer_list *t)
struct mapped_device *md = dm_table_get_md(m->ti->table);
DMWARN("queue_if_no_path timeout on %s, failing queued IO", dm_device_name(md));
- queue_if_no_path(m, false, false);
+ queue_if_no_path(m, false, false, __func__);
}
/*
@@ -1078,7 +1084,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
argc--;
if (!strcasecmp(arg_name, "queue_if_no_path")) {
- r = queue_if_no_path(m, true, false);
+ r = queue_if_no_path(m, true, false, __func__);
continue;
}
@@ -1279,7 +1285,9 @@ static int fail_path(struct pgpath *pgpath)
if (!pgpath->is_active)
goto out;
- DMWARN("Failing path %s.", pgpath->path.dev->name);
+ DMWARN("%s: Failing path %s.",
+ dm_device_name(dm_table_get_md(m->ti->table)),
+ pgpath->path.dev->name);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
pgpath->is_active = false;
@@ -1318,7 +1326,9 @@ static int reinstate_path(struct pgpath *pgpath)
if (pgpath->is_active)
goto out;
- DMWARN("Reinstating path %s.", pgpath->path.dev->name);
+ DMWARN("%s: Reinstating path %s.",
+ dm_device_name(dm_table_get_md(m->ti->table)),
+ pgpath->path.dev->name);
r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
if (r)
@@ -1617,7 +1627,8 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
struct path_selector *ps = &pgpath->pg->ps;
if (ps->type->end_io)
- ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
+ ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
+ clone->io_start_time_ns);
}
return r;
@@ -1640,7 +1651,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
- if (must_push_back_bio(m)) {
+ if (__must_push_back(m)) {
r = DM_ENDIO_REQUEUE;
} else {
dm_report_EIO(m);
@@ -1661,23 +1672,27 @@ done:
struct path_selector *ps = &pgpath->pg->ps;
if (ps->type->end_io)
- ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
+ ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
+ dm_start_time_ns_from_clone(clone));
}
return r;
}
/*
- * Suspend can't complete until all the I/O is processed so if
- * the last path fails we must error any remaining I/O.
- * Note that if the freeze_bdev fails while suspending, the
- * queue_if_no_path state is lost - userspace should reset it.
+ * Suspend with flush can't complete until all the I/O is processed
+ * so if the last path fails we must error any remaining I/O.
+ * - Note that if the freeze_bdev fails while suspending, the
+ * queue_if_no_path state is lost - userspace should reset it.
+ * Otherwise, during noflush suspend, queue_if_no_path will not change.
*/
static void multipath_presuspend(struct dm_target *ti)
{
struct multipath *m = ti->private;
- queue_if_no_path(m, false, true);
+ /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
+ if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
+ queue_if_no_path(m, false, true, __func__);
}
static void multipath_postsuspend(struct dm_target *ti)
@@ -1698,8 +1713,16 @@ static void multipath_resume(struct dm_target *ti)
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
- test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
+ if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
+ set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ }
+
+ DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
+ dm_device_name(dm_table_get_md(m->ti->table)), __func__,
+ test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
+
spin_unlock_irqrestore(&m->lock, flags);
}
@@ -1859,13 +1882,13 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
if (argc == 1) {
if (!strcasecmp(argv[0], "queue_if_no_path")) {
- r = queue_if_no_path(m, true, false);
+ r = queue_if_no_path(m, true, false, __func__);
spin_lock_irqsave(&m->lock, flags);
enable_nopath_timeout(m);
spin_unlock_irqrestore(&m->lock, flags);
goto out;
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
- r = queue_if_no_path(m, false, false);
+ r = queue_if_no_path(m, false, false, __func__);
disable_nopath_timeout(m);
goto out;
}
@@ -1918,7 +1941,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
int r;
current_pgpath = READ_ONCE(m->current_pgpath);
- if (!current_pgpath)
+ if (!current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
current_pgpath = choose_pgpath(m, 0);
if (current_pgpath) {
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
index b6eb5365b1a4..c47bc0e20275 100644
--- a/drivers/md/dm-path-selector.h
+++ b/drivers/md/dm-path-selector.h
@@ -74,7 +74,7 @@ struct path_selector_type {
int (*start_io) (struct path_selector *ps, struct dm_path *path,
size_t nr_bytes);
int (*end_io) (struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes);
+ size_t nr_bytes, u64 start_time);
};
/* Register a path selector */
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index 969c4f1a3633..5fd018d18418 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -227,7 +227,7 @@ static int ql_start_io(struct path_selector *ps, struct dm_path *path,
}
static int ql_end_io(struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes)
+ size_t nr_bytes, u64 start_time)
{
struct path_info *pi = path->pscontext;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9a18bef0a5ff..10e8b2fe787b 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -254,7 +254,7 @@ struct raid_set {
int mode;
} journal_dev;
- struct raid_dev dev[0];
+ struct raid_dev dev[];
};
static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 089aed57e083..2f655d9f4200 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -83,7 +83,7 @@ struct mirror_set {
struct work_struct trigger_event;
unsigned nr_mirrors;
- struct mirror mirror[0];
+ struct mirror mirror[];
};
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 3f8577e2c13b..f60c02512121 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -547,7 +547,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
md->tag_set->ops = &dm_mq_ops;
md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
md->tag_set->numa_node = md->numa_node_id;
- md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
+ md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
md->tag_set->driver_data = md;
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
index f006a9005593..9cfda665e9eb 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-service-time.c
@@ -309,7 +309,7 @@ static int st_start_io(struct path_selector *ps, struct dm_path *path,
}
static int st_end_io(struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes)
+ size_t nr_bytes, u64 start_time)
{
struct path_info *pi = path->pscontext;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 71417048256a..35d368c418d0 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -56,7 +56,7 @@ struct dm_stat {
size_t percpu_alloc_size;
size_t histogram_alloc_size;
struct dm_stat_percpu *stat_percpu[NR_CPUS];
- struct dm_stat_shared stat_shared[0];
+ struct dm_stat_shared stat_shared[];
};
#define STAT_PRECISE_TIMESTAMPS 1
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index fa813c0f993d..151d022b032d 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -41,7 +41,7 @@ struct stripe_c {
/* Work struct used for triggering events*/
struct work_struct trigger_event;
- struct stripe stripe[0];
+ struct stripe stripe[];
};
/*
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 8a0f057b8122..bff4c7fa1cd2 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -53,7 +53,7 @@ struct switch_ctx {
/*
* Array of dm devices to switch between.
*/
- struct switch_path path_list[0];
+ struct switch_path path_list[];
};
static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_paths,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 0a2cc197f62b..8277b959e00b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -279,7 +279,6 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q;
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
sector_t dev_size =
@@ -288,22 +287,6 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
- /*
- * Some devices exist without request functions,
- * such as loop devices not yet bound to backing files.
- * Forbid the use of such devices.
- */
- q = bdev_get_queue(bdev);
- if (!q || !q->make_request_fn) {
- DMWARN("%s: %s is not yet initialised: "
- "start=%llu, len=%llu, dev_size=%llu",
- dm_device_name(ti->table->md), bdevname(bdev, b),
- (unsigned long long)start,
- (unsigned long long)len,
- (unsigned long long)dev_size);
- return 1;
- }
-
if (!dev_size)
return 0;
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 613c171b1b6d..74f3c506f084 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -234,10 +234,6 @@ static int persistent_memory_claim(struct dm_writecache *wc)
wc->memory_vmapped = false;
- if (!wc->ssd_dev->dax_dev) {
- r = -EOPNOTSUPP;
- goto err1;
- }
s = wc->memory_map_size;
p = s >> PAGE_SHIFT;
if (!p) {
@@ -1143,6 +1139,42 @@ static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
return r;
}
+static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
+{
+ /*
+ * clflushopt performs better with block size 1024, 2048, 4096
+ * non-temporal stores perform better with block size 512
+ *
+ * block size 512 1024 2048 4096
+ * movnti 496 MB/s 642 MB/s 725 MB/s 744 MB/s
+ * clflushopt 373 MB/s 688 MB/s 1.1 GB/s 1.2 GB/s
+ *
+ * We see that movnti performs better for 512-byte blocks, and
+ * clflushopt performs better for 1024-byte and larger blocks. So, we
+ * prefer clflushopt for sizes >= 768.
+ *
+ * NOTE: this happens to be the case now (with dm-writecache's single
+ * threaded model) but re-evaluate this once memcpy_flushcache() is
+ * enabled to use movdir64b which might invalidate this performance
+ * advantage seen with cache-allocating-writes plus flushing.
+ */
+#ifdef CONFIG_X86
+ if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) &&
+ likely(boot_cpu_data.x86_clflush_size == 64) &&
+ likely(size >= 768)) {
+ do {
+ memcpy((void *)dest, (void *)source, 64);
+ clflushopt((void *)dest);
+ dest += 64;
+ source += 64;
+ size -= 64;
+ } while (size >= 64);
+ return;
+ }
+#endif
+ memcpy_flushcache(dest, source, size);
+}
+
static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
{
void *buf;
@@ -1168,7 +1200,7 @@ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data
}
} else {
flush_dcache_page(bio_page(bio));
- memcpy_flushcache(data, buf, size);
+ memcpy_flushcache_optimized(data, buf, size);
}
bvec_kunmap_irq(buf, &flags);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 369de15c4e80..130b5a6d9f12 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -16,7 +16,7 @@
/*
* Metadata version.
*/
-#define DMZ_META_VER 1
+#define DMZ_META_VER 2
/*
* On-disk super block magic.
@@ -69,8 +69,17 @@ struct dmz_super {
/* Checksum */
__le32 crc; /* 48 */
+ /* DM-Zoned label */
+ u8 dmz_label[32]; /* 80 */
+
+ /* DM-Zoned UUID */
+ u8 dmz_uuid[16]; /* 96 */
+
+ /* Device UUID */
+ u8 dev_uuid[16]; /* 112 */
+
/* Padding to full 512B sector */
- u8 reserved[464]; /* 512 */
+ u8 reserved[400]; /* 512 */
};
/*
@@ -122,8 +131,10 @@ enum {
*/
struct dmz_sb {
sector_t block;
+ struct dmz_dev *dev;
struct dmz_mblock *mblk;
struct dmz_super *sb;
+ struct dm_zone *zone;
};
/*
@@ -131,28 +142,41 @@ struct dmz_sb {
*/
struct dmz_metadata {
struct dmz_dev *dev;
+ unsigned int nr_devs;
+
+ char devname[BDEVNAME_SIZE];
+ char label[BDEVNAME_SIZE];
+ uuid_t uuid;
sector_t zone_bitmap_size;
unsigned int zone_nr_bitmap_blocks;
unsigned int zone_bits_per_mblk;
+ sector_t zone_nr_blocks;
+ sector_t zone_nr_blocks_shift;
+
+ sector_t zone_nr_sectors;
+ sector_t zone_nr_sectors_shift;
+
unsigned int nr_bitmap_blocks;
unsigned int nr_map_blocks;
+ unsigned int nr_zones;
unsigned int nr_useable_zones;
unsigned int nr_meta_blocks;
unsigned int nr_meta_zones;
unsigned int nr_data_zones;
+ unsigned int nr_cache_zones;
unsigned int nr_rnd_zones;
unsigned int nr_reserved_seq;
unsigned int nr_chunks;
/* Zone information array */
- struct dm_zone *zones;
+ struct xarray zones;
- struct dm_zone *sb_zone;
struct dmz_sb sb[2];
unsigned int mblk_primary;
+ unsigned int sb_version;
u64 sb_gen;
unsigned int min_nr_mblks;
unsigned int max_nr_mblks;
@@ -168,15 +192,11 @@ struct dmz_metadata {
/* Zone allocation management */
struct mutex map_lock;
struct dmz_mblock **map_mblk;
- unsigned int nr_rnd;
- atomic_t unmap_nr_rnd;
- struct list_head unmap_rnd_list;
- struct list_head map_rnd_list;
- unsigned int nr_seq;
- atomic_t unmap_nr_seq;
- struct list_head unmap_seq_list;
- struct list_head map_seq_list;
+ unsigned int nr_cache;
+ atomic_t unmap_nr_cache;
+ struct list_head unmap_cache_list;
+ struct list_head map_cache_list;
atomic_t nr_reserved_seq_zones;
struct list_head reserved_seq_zones_list;
@@ -184,22 +204,65 @@ struct dmz_metadata {
wait_queue_head_t free_wq;
};
+#define dmz_zmd_info(zmd, format, args...) \
+ DMINFO("(%s): " format, (zmd)->label, ## args)
+
+#define dmz_zmd_err(zmd, format, args...) \
+ DMERR("(%s): " format, (zmd)->label, ## args)
+
+#define dmz_zmd_warn(zmd, format, args...) \
+ DMWARN("(%s): " format, (zmd)->label, ## args)
+
+#define dmz_zmd_debug(zmd, format, args...) \
+ DMDEBUG("(%s): " format, (zmd)->label, ## args)
/*
* Various accessors
*/
-unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone)
+static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- return ((unsigned int)(zone - zmd->zones));
+ if (WARN_ON(!zone))
+ return 0;
+
+ return zone->id - zone->dev->zone_offset;
}
sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_sectors_shift;
+ unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
+
+ return (sector_t)zone_id << zmd->zone_nr_sectors_shift;
}
sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_blocks_shift;
+ unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
+
+ return (sector_t)zone_id << zmd->zone_nr_blocks_shift;
+}
+
+unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_blocks;
+}
+
+unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_blocks_shift;
+}
+
+unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_sectors;
+}
+
+unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_sectors_shift;
+}
+
+unsigned int dmz_nr_zones(struct dmz_metadata *zmd)
+{
+ return zmd->nr_zones;
}
unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
@@ -207,14 +270,88 @@ unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
return zmd->nr_chunks;
}
-unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd)
+unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd, int idx)
{
- return zmd->nr_rnd;
+ return zmd->dev[idx].nr_rnd;
}
-unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd)
+unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx)
{
- return atomic_read(&zmd->unmap_nr_rnd);
+ return atomic_read(&zmd->dev[idx].unmap_nr_rnd);
+}
+
+unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd)
+{
+ return zmd->nr_cache;
+}
+
+unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd)
+{
+ return atomic_read(&zmd->unmap_nr_cache);
+}
+
+unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx)
+{
+ return zmd->dev[idx].nr_seq;
+}
+
+unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx)
+{
+ return atomic_read(&zmd->dev[idx].unmap_nr_seq);
+}
+
+static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
+{
+ return xa_load(&zmd->zones, zone_id);
+}
+
+static struct dm_zone *dmz_insert(struct dmz_metadata *zmd,
+ unsigned int zone_id, struct dmz_dev *dev)
+{
+ struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL);
+
+ if (!zone)
+ return ERR_PTR(-ENOMEM);
+
+ if (xa_insert(&zmd->zones, zone_id, zone, GFP_KERNEL)) {
+ kfree(zone);
+ return ERR_PTR(-EBUSY);
+ }
+
+ INIT_LIST_HEAD(&zone->link);
+ atomic_set(&zone->refcount, 0);
+ zone->id = zone_id;
+ zone->chunk = DMZ_MAP_UNMAPPED;
+ zone->dev = dev;
+
+ return zone;
+}
+
+const char *dmz_metadata_label(struct dmz_metadata *zmd)
+{
+ return (const char *)zmd->label;
+}
+
+bool dmz_check_dev(struct dmz_metadata *zmd)
+{
+ unsigned int i;
+
+ for (i = 0; i < zmd->nr_devs; i++) {
+ if (!dmz_check_bdev(&zmd->dev[i]))
+ return false;
+ }
+ return true;
+}
+
+bool dmz_dev_is_dying(struct dmz_metadata *zmd)
+{
+ unsigned int i;
+
+ for (i = 0; i < zmd->nr_devs; i++) {
+ if (dmz_bdev_is_dying(&zmd->dev[i]))
+ return true;
+ }
+ return false;
}
/*
@@ -402,9 +539,10 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
{
struct dmz_mblock *mblk, *m;
sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
+ struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
struct bio *bio;
- if (dmz_bdev_is_dying(zmd->dev))
+ if (dmz_bdev_is_dying(dev))
return ERR_PTR(-EIO);
/* Get a new block and a BIO to read it */
@@ -440,7 +578,7 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
/* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, zmd->dev->bdev);
+ bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
@@ -537,6 +675,7 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
sector_t mblk_no)
{
struct dmz_mblock *mblk;
+ struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
/* Check rbtree */
spin_lock(&zmd->mblk_lock);
@@ -555,7 +694,7 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
dmz_release_mblock(zmd, mblk);
- dmz_check_bdev(zmd->dev);
+ dmz_check_bdev(dev);
return ERR_PTR(-EIO);
}
@@ -579,10 +718,11 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
unsigned int set)
{
+ struct dmz_dev *dev = zmd->sb[set].dev;
sector_t block = zmd->sb[set].block + mblk->no;
struct bio *bio;
- if (dmz_bdev_is_dying(zmd->dev))
+ if (dmz_bdev_is_dying(dev))
return -EIO;
bio = bio_alloc(GFP_NOIO, 1);
@@ -594,7 +734,7 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, zmd->dev->bdev);
+ bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
@@ -607,13 +747,16 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
/*
* Read/write a metadata block.
*/
-static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
- struct page *page)
+static int dmz_rdwr_block(struct dmz_dev *dev, int op,
+ sector_t block, struct page *page)
{
struct bio *bio;
int ret;
- if (dmz_bdev_is_dying(zmd->dev))
+ if (WARN_ON(!dev))
+ return -EIO;
+
+ if (dmz_bdev_is_dying(dev))
return -EIO;
bio = bio_alloc(GFP_NOIO, 1);
@@ -621,14 +764,14 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
return -ENOMEM;
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, zmd->dev->bdev);
+ bio_set_dev(bio, dev->bdev);
bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio);
bio_put(bio);
if (ret)
- dmz_check_bdev(zmd->dev);
+ dmz_check_bdev(dev);
return ret;
}
@@ -637,18 +780,32 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
*/
static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
{
- sector_t block = zmd->sb[set].block;
struct dmz_mblock *mblk = zmd->sb[set].mblk;
struct dmz_super *sb = zmd->sb[set].sb;
+ struct dmz_dev *dev = zmd->sb[set].dev;
+ sector_t sb_block;
u64 sb_gen = zmd->sb_gen + 1;
int ret;
sb->magic = cpu_to_le32(DMZ_MAGIC);
- sb->version = cpu_to_le32(DMZ_META_VER);
+
+ sb->version = cpu_to_le32(zmd->sb_version);
+ if (zmd->sb_version > 1) {
+ BUILD_BUG_ON(UUID_SIZE != 16);
+ export_uuid(sb->dmz_uuid, &zmd->uuid);
+ memcpy(sb->dmz_label, zmd->label, BDEVNAME_SIZE);
+ export_uuid(sb->dev_uuid, &dev->uuid);
+ }
sb->gen = cpu_to_le64(sb_gen);
- sb->sb_block = cpu_to_le64(block);
+ /*
+ * The metadata always references the absolute block address,
+ * ie relative to the entire block range, not the per-device
+ * block address.
+ */
+ sb_block = zmd->sb[set].zone->id << zmd->zone_nr_blocks_shift;
+ sb->sb_block = cpu_to_le64(sb_block);
sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks);
sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq);
sb->nr_chunks = cpu_to_le32(zmd->nr_chunks);
@@ -659,9 +816,10 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
sb->crc = 0;
sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE));
- ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
+ ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
+ mblk->page);
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+ ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
return ret;
}
@@ -674,6 +832,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
unsigned int set)
{
struct dmz_mblock *mblk;
+ struct dmz_dev *dev = zmd->sb[set].dev;
struct blk_plug plug;
int ret = 0, nr_mblks_submitted = 0;
@@ -695,7 +854,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
clear_bit(DMZ_META_ERROR, &mblk->state);
- dmz_check_bdev(zmd->dev);
+ dmz_check_bdev(dev);
ret = -EIO;
}
nr_mblks_submitted--;
@@ -703,7 +862,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
/* Flush drive cache (this will also sync data) */
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+ ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
return ret;
}
@@ -740,6 +899,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
{
struct dmz_mblock *mblk;
struct list_head write_list;
+ struct dmz_dev *dev;
int ret;
if (WARN_ON(!zmd))
@@ -753,6 +913,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
* from modifying metadata.
*/
down_write(&zmd->mblk_sem);
+ dev = zmd->sb[zmd->mblk_primary].dev;
/*
* This is called from the target flush work and reclaim work.
@@ -760,7 +921,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
*/
dmz_lock_flush(zmd);
- if (dmz_bdev_is_dying(zmd->dev)) {
+ if (dmz_bdev_is_dying(dev)) {
ret = -EIO;
goto out;
}
@@ -772,7 +933,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+ ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
goto err;
}
@@ -821,7 +982,7 @@ err:
list_splice(&write_list, &zmd->mblk_dirty_list);
spin_unlock(&zmd->mblk_lock);
}
- if (!dmz_check_bdev(zmd->dev))
+ if (!dmz_check_bdev(dev))
ret = -EIO;
goto out;
}
@@ -829,12 +990,31 @@ err:
/*
* Check super block.
*/
-static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb)
+static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
+ bool tertiary)
{
+ struct dmz_super *sb = dsb->sb;
+ struct dmz_dev *dev = dsb->dev;
unsigned int nr_meta_zones, nr_data_zones;
- struct dmz_dev *dev = zmd->dev;
u32 crc, stored_crc;
- u64 gen;
+ u64 gen, sb_block;
+
+ if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
+ dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
+ DMZ_MAGIC, le32_to_cpu(sb->magic));
+ return -ENXIO;
+ }
+
+ zmd->sb_version = le32_to_cpu(sb->version);
+ if (zmd->sb_version > DMZ_META_VER) {
+ dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
+ DMZ_META_VER, zmd->sb_version);
+ return -EINVAL;
+ }
+ if (zmd->sb_version < 2 && tertiary) {
+ dmz_dev_err(dev, "Tertiary superblocks are not supported");
+ return -EINVAL;
+ }
gen = le64_to_cpu(sb->gen);
stored_crc = le32_to_cpu(sb->crc);
@@ -846,20 +1026,57 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb)
return -ENXIO;
}
- if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
- dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
- DMZ_MAGIC, le32_to_cpu(sb->magic));
- return -ENXIO;
- }
+ sb_block = le64_to_cpu(sb->sb_block);
+ if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift ) {
+ dmz_dev_err(dev, "Invalid superblock position "
+ "(is %llu expected %llu)",
+ sb_block,
+ (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
+ return -EINVAL;
+ }
+ if (zmd->sb_version > 1) {
+ uuid_t sb_uuid;
+
+ import_uuid(&sb_uuid, sb->dmz_uuid);
+ if (uuid_is_null(&sb_uuid)) {
+ dmz_dev_err(dev, "NULL DM-Zoned uuid");
+ return -ENXIO;
+ } else if (uuid_is_null(&zmd->uuid)) {
+ uuid_copy(&zmd->uuid, &sb_uuid);
+ } else if (!uuid_equal(&zmd->uuid, &sb_uuid)) {
+ dmz_dev_err(dev, "mismatching DM-Zoned uuid, "
+ "is %pUl expected %pUl",
+ &sb_uuid, &zmd->uuid);
+ return -ENXIO;
+ }
+ if (!strlen(zmd->label))
+ memcpy(zmd->label, sb->dmz_label, BDEVNAME_SIZE);
+ else if (memcmp(zmd->label, sb->dmz_label, BDEVNAME_SIZE)) {
+ dmz_dev_err(dev, "mismatching DM-Zoned label, "
+ "is %s expected %s",
+ sb->dmz_label, zmd->label);
+ return -ENXIO;
+ }
+ import_uuid(&dev->uuid, sb->dev_uuid);
+ if (uuid_is_null(&dev->uuid)) {
+ dmz_dev_err(dev, "NULL device uuid");
+ return -ENXIO;
+ }
- if (le32_to_cpu(sb->version) != DMZ_META_VER) {
- dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
- DMZ_META_VER, le32_to_cpu(sb->version));
- return -ENXIO;
+ if (tertiary) {
+ /*
+ * Generation number should be 0, but it doesn't
+ * really matter if it isn't.
+ */
+ if (gen != 0)
+ dmz_dev_warn(dev, "Invalid generation %llu",
+ gen);
+ return 0;
+ }
}
- nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + dev->zone_nr_blocks - 1)
- >> dev->zone_nr_blocks_shift;
+ nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
+ >> zmd->zone_nr_blocks_shift;
if (!nr_meta_zones ||
nr_meta_zones >= zmd->nr_rnd_zones) {
dmz_dev_err(dev, "Invalid number of metadata blocks");
@@ -895,10 +1112,13 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb)
/*
* Read the first or second super block from disk.
*/
-static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set)
+static int dmz_read_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
{
- return dmz_rdwr_block(zmd, REQ_OP_READ, zmd->sb[set].block,
- zmd->sb[set].mblk->page);
+ dmz_zmd_debug(zmd, "read superblock set %d dev %s block %llu",
+ set, sb->dev->name, sb->block);
+
+ return dmz_rdwr_block(sb->dev, REQ_OP_READ,
+ sb->block, sb->mblk->page);
}
/*
@@ -908,8 +1128,9 @@ static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set)
*/
static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
{
- unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
+ unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
struct dmz_mblock *mblk;
+ unsigned int zone_id = zmd->sb[0].zone->id;
int i;
/* Allocate a block */
@@ -922,24 +1143,29 @@ static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
/* Bad first super block: search for the second one */
zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
- for (i = 0; i < zmd->nr_rnd_zones - 1; i++) {
- if (dmz_read_sb(zmd, 1) != 0)
+ zmd->sb[1].zone = dmz_get(zmd, zone_id + 1);
+ zmd->sb[1].dev = zmd->sb[0].dev;
+ for (i = 1; i < zmd->nr_rnd_zones; i++) {
+ if (dmz_read_sb(zmd, &zmd->sb[1], 1) != 0)
break;
if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC)
return 0;
zmd->sb[1].block += zone_nr_blocks;
+ zmd->sb[1].zone = dmz_get(zmd, zone_id + i);
}
dmz_free_mblock(zmd, mblk);
zmd->sb[1].mblk = NULL;
+ zmd->sb[1].zone = NULL;
+ zmd->sb[1].dev = NULL;
return -EIO;
}
/*
- * Read the first or second super block from disk.
+ * Read a super block from disk.
*/
-static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set)
+static int dmz_get_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
{
struct dmz_mblock *mblk;
int ret;
@@ -949,14 +1175,14 @@ static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set)
if (!mblk)
return -ENOMEM;
- zmd->sb[set].mblk = mblk;
- zmd->sb[set].sb = mblk->data;
+ sb->mblk = mblk;
+ sb->sb = mblk->data;
/* Read super block */
- ret = dmz_read_sb(zmd, set);
+ ret = dmz_read_sb(zmd, sb, set);
if (ret) {
dmz_free_mblock(zmd, mblk);
- zmd->sb[set].mblk = NULL;
+ sb->mblk = NULL;
return ret;
}
@@ -972,14 +1198,13 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
struct page *page;
int i, ret;
- dmz_dev_warn(zmd->dev, "Metadata set %u invalid: recovering", dst_set);
+ dmz_dev_warn(zmd->sb[dst_set].dev,
+ "Metadata set %u invalid: recovering", dst_set);
if (dst_set == 0)
- zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
- else {
- zmd->sb[1].block = zmd->sb[0].block +
- (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
- }
+ zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
+ else
+ zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
page = alloc_page(GFP_NOIO);
if (!page)
@@ -987,11 +1212,11 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
/* Copy metadata blocks */
for (i = 1; i < zmd->nr_meta_blocks; i++) {
- ret = dmz_rdwr_block(zmd, REQ_OP_READ,
+ ret = dmz_rdwr_block(zmd->sb[src_set].dev, REQ_OP_READ,
zmd->sb[src_set].block + i, page);
if (ret)
goto out;
- ret = dmz_rdwr_block(zmd, REQ_OP_WRITE,
+ ret = dmz_rdwr_block(zmd->sb[dst_set].dev, REQ_OP_WRITE,
zmd->sb[dst_set].block + i, page);
if (ret)
goto out;
@@ -1023,53 +1248,73 @@ static int dmz_load_sb(struct dmz_metadata *zmd)
u64 sb_gen[2] = {0, 0};
int ret;
+ if (!zmd->sb[0].zone) {
+ dmz_zmd_err(zmd, "Primary super block zone not set");
+ return -ENXIO;
+ }
+
/* Read and check the primary super block */
- zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
- ret = dmz_get_sb(zmd, 0);
+ zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
+ zmd->sb[0].dev = zmd->sb[0].zone->dev;
+ ret = dmz_get_sb(zmd, &zmd->sb[0], 0);
if (ret) {
- dmz_dev_err(zmd->dev, "Read primary super block failed");
+ dmz_dev_err(zmd->sb[0].dev, "Read primary super block failed");
return ret;
}
- ret = dmz_check_sb(zmd, zmd->sb[0].sb);
+ ret = dmz_check_sb(zmd, &zmd->sb[0], false);
/* Read and check secondary super block */
if (ret == 0) {
sb_good[0] = true;
- zmd->sb[1].block = zmd->sb[0].block +
- (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
- ret = dmz_get_sb(zmd, 1);
+ if (!zmd->sb[1].zone) {
+ unsigned int zone_id =
+ zmd->sb[0].zone->id + zmd->nr_meta_zones;
+
+ zmd->sb[1].zone = dmz_get(zmd, zone_id);
+ }
+ zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
+ zmd->sb[1].dev = zmd->sb[0].dev;
+ ret = dmz_get_sb(zmd, &zmd->sb[1], 1);
} else
ret = dmz_lookup_secondary_sb(zmd);
if (ret) {
- dmz_dev_err(zmd->dev, "Read secondary super block failed");
+ dmz_dev_err(zmd->sb[1].dev, "Read secondary super block failed");
return ret;
}
- ret = dmz_check_sb(zmd, zmd->sb[1].sb);
+ ret = dmz_check_sb(zmd, &zmd->sb[1], false);
if (ret == 0)
sb_good[1] = true;
/* Use highest generation sb first */
if (!sb_good[0] && !sb_good[1]) {
- dmz_dev_err(zmd->dev, "No valid super block found");
+ dmz_zmd_err(zmd, "No valid super block found");
return -EIO;
}
if (sb_good[0])
sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen);
- else
+ else {
ret = dmz_recover_mblocks(zmd, 0);
+ if (ret) {
+ dmz_dev_err(zmd->sb[0].dev,
+ "Recovery of superblock 0 failed");
+ return -EIO;
+ }
+ }
if (sb_good[1])
sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen);
- else
+ else {
ret = dmz_recover_mblocks(zmd, 1);
- if (ret) {
- dmz_dev_err(zmd->dev, "Recovery failed");
- return -EIO;
+ if (ret) {
+ dmz_dev_err(zmd->sb[1].dev,
+ "Recovery of superblock 1 failed");
+ return -EIO;
+ }
}
if (sb_gen[0] >= sb_gen[1]) {
@@ -1080,32 +1325,70 @@ static int dmz_load_sb(struct dmz_metadata *zmd)
zmd->mblk_primary = 1;
}
- dmz_dev_debug(zmd->dev, "Using super block %u (gen %llu)",
+ dmz_dev_debug(zmd->sb[zmd->mblk_primary].dev,
+ "Using super block %u (gen %llu)",
zmd->mblk_primary, zmd->sb_gen);
- return 0;
+ if (zmd->sb_version > 1) {
+ int i;
+ struct dmz_sb *sb;
+
+ sb = kzalloc(sizeof(struct dmz_sb), GFP_KERNEL);
+ if (!sb)
+ return -ENOMEM;
+ for (i = 1; i < zmd->nr_devs; i++) {
+ sb->block = 0;
+ sb->zone = dmz_get(zmd, zmd->dev[i].zone_offset);
+ sb->dev = &zmd->dev[i];
+ if (!dmz_is_meta(sb->zone)) {
+ dmz_dev_err(sb->dev,
+ "Tertiary super block zone %u not marked as metadata zone",
+ sb->zone->id);
+ ret = -EINVAL;
+ goto out_kfree;
+ }
+ ret = dmz_get_sb(zmd, sb, i + 1);
+ if (ret) {
+ dmz_dev_err(sb->dev,
+ "Read tertiary super block failed");
+ dmz_free_mblock(zmd, sb->mblk);
+ goto out_kfree;
+ }
+ ret = dmz_check_sb(zmd, sb, true);
+ dmz_free_mblock(zmd, sb->mblk);
+ if (ret == -EINVAL)
+ goto out_kfree;
+ }
+ out_kfree:
+ kfree(sb);
+ }
+ return ret;
}
/*
* Initialize a zone descriptor.
*/
-static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
+static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data)
{
- struct dmz_metadata *zmd = data;
- struct dm_zone *zone = &zmd->zones[idx];
- struct dmz_dev *dev = zmd->dev;
+ struct dmz_dev *dev = data;
+ struct dmz_metadata *zmd = dev->metadata;
+ int idx = num + dev->zone_offset;
+ struct dm_zone *zone;
+
+ zone = dmz_insert(zmd, idx, dev);
+ if (IS_ERR(zone))
+ return PTR_ERR(zone);
- /* Ignore the eventual last runt (smaller) zone */
- if (blkz->len != dev->zone_nr_sectors) {
- if (blkz->start + blkz->len == dev->capacity)
+ if (blkz->len != zmd->zone_nr_sectors) {
+ if (zmd->sb_version > 1) {
+ /* Ignore the eventual runt (smaller) zone */
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ return 0;
+ } else if (blkz->start + blkz->len == dev->capacity)
return 0;
return -ENXIO;
}
- INIT_LIST_HEAD(&zone->link);
- atomic_set(&zone->refcount, 0);
- zone->chunk = DMZ_MAP_UNMAPPED;
-
switch (blkz->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
set_bit(DMZ_RND, &zone->flags);
@@ -1131,13 +1414,45 @@ static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
zmd->nr_useable_zones++;
if (dmz_is_rnd(zone)) {
zmd->nr_rnd_zones++;
- if (!zmd->sb_zone) {
- /* Super block zone */
- zmd->sb_zone = zone;
+ if (zmd->nr_devs == 1 && !zmd->sb[0].zone) {
+ /* Primary super block zone */
+ zmd->sb[0].zone = zone;
}
}
+ if (zmd->nr_devs > 1 && num == 0) {
+ /*
+ * Tertiary superblock zones are always at the
+ * start of the zoned devices, so mark them
+ * as metadata zone.
+ */
+ set_bit(DMZ_META, &zone->flags);
+ }
}
+ return 0;
+}
+
+static int dmz_emulate_zones(struct dmz_metadata *zmd, struct dmz_dev *dev)
+{
+ int idx;
+ sector_t zone_offset = 0;
+ for(idx = 0; idx < dev->nr_zones; idx++) {
+ struct dm_zone *zone;
+
+ zone = dmz_insert(zmd, idx, dev);
+ if (IS_ERR(zone))
+ return PTR_ERR(zone);
+ set_bit(DMZ_CACHE, &zone->flags);
+ zone->wp_block = 0;
+ zmd->nr_cache_zones++;
+ zmd->nr_useable_zones++;
+ if (dev->capacity - zone_offset < zmd->zone_nr_sectors) {
+ /* Disable runt zone */
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ break;
+ }
+ zone_offset += zmd->zone_nr_sectors;
+ }
return 0;
}
@@ -1146,8 +1461,15 @@ static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
*/
static void dmz_drop_zones(struct dmz_metadata *zmd)
{
- kfree(zmd->zones);
- zmd->zones = NULL;
+ int idx;
+
+ for(idx = 0; idx < zmd->nr_zones; idx++) {
+ struct dm_zone *zone = xa_load(&zmd->zones, idx);
+
+ kfree(zone);
+ xa_erase(&zmd->zones, idx);
+ }
+ xa_destroy(&zmd->zones);
}
/*
@@ -1156,32 +1478,87 @@ static void dmz_drop_zones(struct dmz_metadata *zmd)
*/
static int dmz_init_zones(struct dmz_metadata *zmd)
{
- struct dmz_dev *dev = zmd->dev;
- int ret;
+ int i, ret;
+ struct dmz_dev *zoned_dev = &zmd->dev[0];
/* Init */
- zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
+ zmd->zone_nr_sectors = zmd->dev[0].zone_nr_sectors;
+ zmd->zone_nr_sectors_shift = ilog2(zmd->zone_nr_sectors);
+ zmd->zone_nr_blocks = dmz_sect2blk(zmd->zone_nr_sectors);
+ zmd->zone_nr_blocks_shift = ilog2(zmd->zone_nr_blocks);
+ zmd->zone_bitmap_size = zmd->zone_nr_blocks >> 3;
zmd->zone_nr_bitmap_blocks =
max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
- zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks,
+ zmd->zone_bits_per_mblk = min_t(sector_t, zmd->zone_nr_blocks,
DMZ_BLOCK_SIZE_BITS);
/* Allocate zone array */
- zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
- if (!zmd->zones)
- return -ENOMEM;
+ zmd->nr_zones = 0;
+ for (i = 0; i < zmd->nr_devs; i++) {
+ struct dmz_dev *dev = &zmd->dev[i];
+
+ dev->metadata = zmd;
+ zmd->nr_zones += dev->nr_zones;
+
+ atomic_set(&dev->unmap_nr_rnd, 0);
+ INIT_LIST_HEAD(&dev->unmap_rnd_list);
+ INIT_LIST_HEAD(&dev->map_rnd_list);
+
+ atomic_set(&dev->unmap_nr_seq, 0);
+ INIT_LIST_HEAD(&dev->unmap_seq_list);
+ INIT_LIST_HEAD(&dev->map_seq_list);
+ }
+
+ if (!zmd->nr_zones) {
+ DMERR("(%s): No zones found", zmd->devname);
+ return -ENXIO;
+ }
+ xa_init(&zmd->zones);
+
+ DMDEBUG("(%s): Using %zu B for zone information",
+ zmd->devname, sizeof(struct dm_zone) * zmd->nr_zones);
- dmz_dev_info(dev, "Using %zu B for zone information",
- sizeof(struct dm_zone) * dev->nr_zones);
+ if (zmd->nr_devs > 1) {
+ ret = dmz_emulate_zones(zmd, &zmd->dev[0]);
+ if (ret < 0) {
+ DMDEBUG("(%s): Failed to emulate zones, error %d",
+ zmd->devname, ret);
+ dmz_drop_zones(zmd);
+ return ret;
+ }
+
+ /*
+ * Primary superblock zone is always at zone 0 when multiple
+ * drives are present.
+ */
+ zmd->sb[0].zone = dmz_get(zmd, 0);
+
+ for (i = 1; i < zmd->nr_devs; i++) {
+ zoned_dev = &zmd->dev[i];
+
+ ret = blkdev_report_zones(zoned_dev->bdev, 0,
+ BLK_ALL_ZONES,
+ dmz_init_zone, zoned_dev);
+ if (ret < 0) {
+ DMDEBUG("(%s): Failed to report zones, error %d",
+ zmd->devname, ret);
+ dmz_drop_zones(zmd);
+ return ret;
+ }
+ }
+ return 0;
+ }
/*
* Get zone information and initialize zone descriptors. At the same
* time, determine where the super block should be: first block of the
* first randomly writable zone.
*/
- ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES, dmz_init_zone,
- zmd);
+ ret = blkdev_report_zones(zoned_dev->bdev, 0, BLK_ALL_ZONES,
+ dmz_init_zone, zoned_dev);
if (ret < 0) {
+ DMDEBUG("(%s): Failed to report zones, error %d",
+ zmd->devname, ret);
dmz_drop_zones(zmd);
return ret;
}
@@ -1213,9 +1590,13 @@ static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
*/
static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
{
+ struct dmz_dev *dev = zone->dev;
unsigned int noio_flag;
int ret;
+ if (dev->flags & DMZ_BDEV_REGULAR)
+ return 0;
+
/*
* Get zone information from disk. Since blkdev_report_zones() uses
* GFP_KERNEL by default for memory allocations, set the per-task
@@ -1223,16 +1604,16 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
* GFP_NOIO was specified.
*/
noio_flag = memalloc_noio_save();
- ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), 1,
+ ret = blkdev_report_zones(dev->bdev, dmz_start_sect(zmd, zone), 1,
dmz_update_zone_cb, zone);
memalloc_noio_restore(noio_flag);
if (ret == 0)
ret = -EIO;
if (ret < 0) {
- dmz_dev_err(zmd->dev, "Get zone %u report failed",
- dmz_id(zmd, zone));
- dmz_check_bdev(zmd->dev);
+ dmz_dev_err(dev, "Get zone %u report failed",
+ zone->id);
+ dmz_check_bdev(dev);
return ret;
}
@@ -1246,6 +1627,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
struct dm_zone *zone)
{
+ struct dmz_dev *dev = zone->dev;
unsigned int wp = 0;
int ret;
@@ -1254,8 +1636,8 @@ static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
if (ret)
return ret;
- dmz_dev_warn(zmd->dev, "Processing zone %u write error (zone wp %u/%u)",
- dmz_id(zmd, zone), zone->wp_block, wp);
+ dmz_dev_warn(dev, "Processing zone %u write error (zone wp %u/%u)",
+ zone->id, zone->wp_block, wp);
if (zone->wp_block < wp) {
dmz_invalidate_blocks(zmd, zone, zone->wp_block,
@@ -1265,11 +1647,6 @@ static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
return 0;
}
-static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
-{
- return &zmd->zones[zone_id];
-}
-
/*
* Reset a zone write pointer.
*/
@@ -1287,14 +1664,14 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
return 0;
if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
- struct dmz_dev *dev = zmd->dev;
+ struct dmz_dev *dev = zone->dev;
ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
dmz_start_sect(zmd, zone),
- dev->zone_nr_sectors, GFP_NOIO);
+ zmd->zone_nr_sectors, GFP_NOIO);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
- dmz_id(zmd, zone), ret);
+ zone->id, ret);
return ret;
}
}
@@ -1313,7 +1690,6 @@ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
*/
static int dmz_load_mapping(struct dmz_metadata *zmd)
{
- struct dmz_dev *dev = zmd->dev;
struct dm_zone *dzone, *bzone;
struct dmz_mblock *dmap_mblk = NULL;
struct dmz_map *dmap;
@@ -1345,36 +1721,48 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
if (dzone_id == DMZ_MAP_UNMAPPED)
goto next;
- if (dzone_id >= dev->nr_zones) {
- dmz_dev_err(dev, "Chunk %u mapping: invalid data zone ID %u",
+ if (dzone_id >= zmd->nr_zones) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: invalid data zone ID %u",
chunk, dzone_id);
return -EIO;
}
dzone = dmz_get(zmd, dzone_id);
+ if (!dzone) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: data zone %u not present",
+ chunk, dzone_id);
+ return -EIO;
+ }
set_bit(DMZ_DATA, &dzone->flags);
dzone->chunk = chunk;
dmz_get_zone_weight(zmd, dzone);
- if (dmz_is_rnd(dzone))
- list_add_tail(&dzone->link, &zmd->map_rnd_list);
+ if (dmz_is_cache(dzone))
+ list_add_tail(&dzone->link, &zmd->map_cache_list);
+ else if (dmz_is_rnd(dzone))
+ list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
else
- list_add_tail(&dzone->link, &zmd->map_seq_list);
+ list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
/* Check buffer zone */
bzone_id = le32_to_cpu(dmap[e].bzone_id);
if (bzone_id == DMZ_MAP_UNMAPPED)
goto next;
- if (bzone_id >= dev->nr_zones) {
- dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone ID %u",
+ if (bzone_id >= zmd->nr_zones) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone ID %u",
chunk, bzone_id);
return -EIO;
}
bzone = dmz_get(zmd, bzone_id);
- if (!dmz_is_rnd(bzone)) {
- dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone %u",
+ if (!bzone) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: buffer zone %u not present",
+ chunk, bzone_id);
+ return -EIO;
+ }
+ if (!dmz_is_rnd(bzone) && !dmz_is_cache(bzone)) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone %u",
chunk, bzone_id);
return -EIO;
}
@@ -1385,7 +1773,10 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
bzone->bzone = dzone;
dzone->bzone = bzone;
dmz_get_zone_weight(zmd, bzone);
- list_add_tail(&bzone->link, &zmd->map_rnd_list);
+ if (dmz_is_cache(bzone))
+ list_add_tail(&bzone->link, &zmd->map_cache_list);
+ else
+ list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
next:
chunk++;
e++;
@@ -1398,15 +1789,21 @@ next:
* fully initialized. All remaining zones are unmapped data
* zones. Finish initializing those here.
*/
- for (i = 0; i < dev->nr_zones; i++) {
+ for (i = 0; i < zmd->nr_zones; i++) {
dzone = dmz_get(zmd, i);
+ if (!dzone)
+ continue;
if (dmz_is_meta(dzone))
continue;
+ if (dmz_is_offline(dzone))
+ continue;
- if (dmz_is_rnd(dzone))
- zmd->nr_rnd++;
+ if (dmz_is_cache(dzone))
+ zmd->nr_cache++;
+ else if (dmz_is_rnd(dzone))
+ dzone->dev->nr_rnd++;
else
- zmd->nr_seq++;
+ dzone->dev->nr_seq++;
if (dmz_is_data(dzone)) {
/* Already initialized */
@@ -1416,16 +1813,22 @@ next:
/* Unmapped data zone */
set_bit(DMZ_DATA, &dzone->flags);
dzone->chunk = DMZ_MAP_UNMAPPED;
- if (dmz_is_rnd(dzone)) {
- list_add_tail(&dzone->link, &zmd->unmap_rnd_list);
- atomic_inc(&zmd->unmap_nr_rnd);
+ if (dmz_is_cache(dzone)) {
+ list_add_tail(&dzone->link, &zmd->unmap_cache_list);
+ atomic_inc(&zmd->unmap_nr_cache);
+ } else if (dmz_is_rnd(dzone)) {
+ list_add_tail(&dzone->link,
+ &dzone->dev->unmap_rnd_list);
+ atomic_inc(&dzone->dev->unmap_nr_rnd);
} else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) {
list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list);
+ set_bit(DMZ_RESERVED, &dzone->flags);
atomic_inc(&zmd->nr_reserved_seq_zones);
- zmd->nr_seq--;
+ dzone->dev->nr_seq--;
} else {
- list_add_tail(&dzone->link, &zmd->unmap_seq_list);
- atomic_inc(&zmd->unmap_nr_seq);
+ list_add_tail(&dzone->link,
+ &dzone->dev->unmap_seq_list);
+ atomic_inc(&dzone->dev->unmap_nr_seq);
}
}
@@ -1459,10 +1862,13 @@ static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
list_del_init(&zone->link);
if (dmz_is_seq(zone)) {
/* LRU rotate sequential zone */
- list_add_tail(&zone->link, &zmd->map_seq_list);
+ list_add_tail(&zone->link, &zone->dev->map_seq_list);
+ } else if (dmz_is_cache(zone)) {
+ /* LRU rotate cache zone */
+ list_add_tail(&zone->link, &zmd->map_cache_list);
} else {
/* LRU rotate random zone */
- list_add_tail(&zone->link, &zmd->map_rnd_list);
+ list_add_tail(&zone->link, &zone->dev->map_rnd_list);
}
}
@@ -1529,58 +1935,76 @@ static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
{
dmz_unlock_map(zmd);
dmz_unlock_metadata(zmd);
+ set_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
+ clear_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
dmz_lock_metadata(zmd);
dmz_lock_map(zmd);
}
/*
- * Select a random write zone for reclaim.
+ * Select a cache or random write zone for reclaim.
*/
-static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
+static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
+ unsigned int idx, bool idle)
{
struct dm_zone *dzone = NULL;
- struct dm_zone *zone;
-
- if (list_empty(&zmd->map_rnd_list))
- return ERR_PTR(-EBUSY);
+ struct dm_zone *zone, *last = NULL;
+ struct list_head *zone_list;
+
+ /* If we have cache zones select from the cache zone list */
+ if (zmd->nr_cache) {
+ zone_list = &zmd->map_cache_list;
+ /* Try to relaim random zones, too, when idle */
+ if (idle && list_empty(zone_list))
+ zone_list = &zmd->dev[idx].map_rnd_list;
+ } else
+ zone_list = &zmd->dev[idx].map_rnd_list;
- list_for_each_entry(zone, &zmd->map_rnd_list, link) {
- if (dmz_is_buf(zone))
+ list_for_each_entry(zone, zone_list, link) {
+ if (dmz_is_buf(zone)) {
dzone = zone->bzone;
- else
+ if (dzone->dev->dev_idx != idx)
+ continue;
+ if (!last) {
+ last = dzone;
+ continue;
+ }
+ if (last->weight < dzone->weight)
+ continue;
+ dzone = last;
+ } else
dzone = zone;
if (dmz_lock_zone_reclaim(dzone))
return dzone;
}
- return ERR_PTR(-EBUSY);
+ return NULL;
}
/*
* Select a buffered sequential zone for reclaim.
*/
-static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
+static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd,
+ unsigned int idx)
{
struct dm_zone *zone;
- if (list_empty(&zmd->map_seq_list))
- return ERR_PTR(-EBUSY);
-
- list_for_each_entry(zone, &zmd->map_seq_list, link) {
+ list_for_each_entry(zone, &zmd->dev[idx].map_seq_list, link) {
if (!zone->bzone)
continue;
if (dmz_lock_zone_reclaim(zone))
return zone;
}
- return ERR_PTR(-EBUSY);
+ return NULL;
}
/*
* Select a zone for reclaim.
*/
-struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
+struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
+ unsigned int dev_idx, bool idle)
{
struct dm_zone *zone;
@@ -1594,9 +2018,9 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
*/
dmz_lock_map(zmd);
if (list_empty(&zmd->reserved_seq_zones_list))
- zone = dmz_get_seq_zone_for_reclaim(zmd);
+ zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
else
- zone = dmz_get_rnd_zone_for_reclaim(zmd);
+ zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
dmz_unlock_map(zmd);
return zone;
@@ -1616,6 +2040,7 @@ struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chu
unsigned int dzone_id;
struct dm_zone *dzone = NULL;
int ret = 0;
+ int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
dmz_lock_map(zmd);
again:
@@ -1630,9 +2055,9 @@ again:
goto out;
/* Allocate a random zone */
- dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
+ dzone = dmz_alloc_zone(zmd, 0, alloc_flags);
if (!dzone) {
- if (dmz_bdev_is_dying(zmd->dev)) {
+ if (dmz_dev_is_dying(zmd)) {
dzone = ERR_PTR(-EIO);
goto out;
}
@@ -1645,6 +2070,10 @@ again:
} else {
/* The chunk is already mapped: get the mapping zone */
dzone = dmz_get(zmd, dzone_id);
+ if (!dzone) {
+ dzone = ERR_PTR(-EIO);
+ goto out;
+ }
if (dzone->chunk != chunk) {
dzone = ERR_PTR(-EIO);
goto out;
@@ -1723,6 +2152,7 @@ struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
struct dm_zone *dzone)
{
struct dm_zone *bzone;
+ int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
dmz_lock_map(zmd);
again:
@@ -1731,9 +2161,9 @@ again:
goto out;
/* Allocate a random zone */
- bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
+ bzone = dmz_alloc_zone(zmd, 0, alloc_flags);
if (!bzone) {
- if (dmz_bdev_is_dying(zmd->dev)) {
+ if (dmz_dev_is_dying(zmd)) {
bzone = ERR_PTR(-EIO);
goto out;
}
@@ -1742,14 +2172,16 @@ again:
}
/* Update the chunk mapping */
- dmz_set_chunk_mapping(zmd, dzone->chunk, dmz_id(zmd, dzone),
- dmz_id(zmd, bzone));
+ dmz_set_chunk_mapping(zmd, dzone->chunk, dzone->id, bzone->id);
set_bit(DMZ_BUF, &bzone->flags);
bzone->chunk = dzone->chunk;
bzone->bzone = dzone;
dzone->bzone = bzone;
- list_add_tail(&bzone->link, &zmd->map_rnd_list);
+ if (dmz_is_cache(bzone))
+ list_add_tail(&bzone->link, &zmd->map_cache_list);
+ else
+ list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
out:
dmz_unlock_map(zmd);
@@ -1760,46 +2192,68 @@ out:
* Get an unmapped (free) zone.
* This must be called with the mapping lock held.
*/
-struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags)
+struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
+ unsigned long flags)
{
struct list_head *list;
struct dm_zone *zone;
+ int i = 0;
- if (flags & DMZ_ALLOC_RND)
- list = &zmd->unmap_rnd_list;
- else
- list = &zmd->unmap_seq_list;
again:
+ if (flags & DMZ_ALLOC_CACHE)
+ list = &zmd->unmap_cache_list;
+ else if (flags & DMZ_ALLOC_RND)
+ list = &zmd->dev[dev_idx].unmap_rnd_list;
+ else
+ list = &zmd->dev[dev_idx].unmap_seq_list;
+
if (list_empty(list)) {
/*
- * No free zone: if this is for reclaim, allow using the
- * reserved sequential zones.
+ * No free zone: return NULL if this is for not reclaim.
*/
- if (!(flags & DMZ_ALLOC_RECLAIM) ||
- list_empty(&zmd->reserved_seq_zones_list))
+ if (!(flags & DMZ_ALLOC_RECLAIM))
return NULL;
+ /*
+ * Try to allocate from other devices
+ */
+ if (i < zmd->nr_devs) {
+ dev_idx = (dev_idx + 1) % zmd->nr_devs;
+ i++;
+ goto again;
+ }
- zone = list_first_entry(&zmd->reserved_seq_zones_list,
- struct dm_zone, link);
- list_del_init(&zone->link);
- atomic_dec(&zmd->nr_reserved_seq_zones);
+ /*
+ * Fallback to the reserved sequential zones
+ */
+ zone = list_first_entry_or_null(&zmd->reserved_seq_zones_list,
+ struct dm_zone, link);
+ if (zone) {
+ list_del_init(&zone->link);
+ atomic_dec(&zmd->nr_reserved_seq_zones);
+ }
return zone;
}
zone = list_first_entry(list, struct dm_zone, link);
list_del_init(&zone->link);
- if (dmz_is_rnd(zone))
- atomic_dec(&zmd->unmap_nr_rnd);
+ if (dmz_is_cache(zone))
+ atomic_dec(&zmd->unmap_nr_cache);
+ else if (dmz_is_rnd(zone))
+ atomic_dec(&zone->dev->unmap_nr_rnd);
else
- atomic_dec(&zmd->unmap_nr_seq);
+ atomic_dec(&zone->dev->unmap_nr_seq);
if (dmz_is_offline(zone)) {
- dmz_dev_warn(zmd->dev, "Zone %u is offline", dmz_id(zmd, zone));
+ dmz_zmd_warn(zmd, "Zone %u is offline", zone->id);
+ zone = NULL;
+ goto again;
+ }
+ if (dmz_is_meta(zone)) {
+ dmz_zmd_warn(zmd, "Zone %u has metadata", zone->id);
zone = NULL;
goto again;
}
-
return zone;
}
@@ -1814,16 +2268,18 @@ void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
dmz_reset_zone(zmd, zone);
/* Return the zone to its type unmap list */
- if (dmz_is_rnd(zone)) {
- list_add_tail(&zone->link, &zmd->unmap_rnd_list);
- atomic_inc(&zmd->unmap_nr_rnd);
- } else if (atomic_read(&zmd->nr_reserved_seq_zones) <
- zmd->nr_reserved_seq) {
+ if (dmz_is_cache(zone)) {
+ list_add_tail(&zone->link, &zmd->unmap_cache_list);
+ atomic_inc(&zmd->unmap_nr_cache);
+ } else if (dmz_is_rnd(zone)) {
+ list_add_tail(&zone->link, &zone->dev->unmap_rnd_list);
+ atomic_inc(&zone->dev->unmap_nr_rnd);
+ } else if (dmz_is_reserved(zone)) {
list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
atomic_inc(&zmd->nr_reserved_seq_zones);
} else {
- list_add_tail(&zone->link, &zmd->unmap_seq_list);
- atomic_inc(&zmd->unmap_nr_seq);
+ list_add_tail(&zone->link, &zone->dev->unmap_seq_list);
+ atomic_inc(&zone->dev->unmap_nr_seq);
}
wake_up_all(&zmd->free_wq);
@@ -1837,13 +2293,15 @@ void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone,
unsigned int chunk)
{
/* Set the chunk mapping */
- dmz_set_chunk_mapping(zmd, chunk, dmz_id(zmd, dzone),
+ dmz_set_chunk_mapping(zmd, chunk, dzone->id,
DMZ_MAP_UNMAPPED);
dzone->chunk = chunk;
- if (dmz_is_rnd(dzone))
- list_add_tail(&dzone->link, &zmd->map_rnd_list);
+ if (dmz_is_cache(dzone))
+ list_add_tail(&dzone->link, &zmd->map_cache_list);
+ else if (dmz_is_rnd(dzone))
+ list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
else
- list_add_tail(&dzone->link, &zmd->map_seq_list);
+ list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
}
/*
@@ -1865,7 +2323,7 @@ void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
* Unmapping the chunk buffer zone: clear only
* the chunk buffer mapping
*/
- dzone_id = dmz_id(zmd, zone->bzone);
+ dzone_id = zone->bzone->id;
zone->bzone->bzone = NULL;
zone->bzone = NULL;
@@ -1927,7 +2385,7 @@ static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd,
sector_t chunk_block)
{
sector_t bitmap_block = 1 + zmd->nr_map_blocks +
- (sector_t)(dmz_id(zmd, zone) * zmd->zone_nr_bitmap_blocks) +
+ (sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) +
(chunk_block >> DMZ_BLOCK_SHIFT_BITS);
return dmz_get_mblock(zmd, bitmap_block);
@@ -1943,7 +2401,7 @@ int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
sector_t chunk_block = 0;
/* Get the zones bitmap blocks */
- while (chunk_block < zmd->dev->zone_nr_blocks) {
+ while (chunk_block < zmd->zone_nr_blocks) {
from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
if (IS_ERR(from_mblk))
return PTR_ERR(from_mblk);
@@ -1978,7 +2436,7 @@ int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
int ret;
/* Get the zones bitmap blocks */
- while (chunk_block < zmd->dev->zone_nr_blocks) {
+ while (chunk_block < zmd->zone_nr_blocks) {
/* Get a valid region from the source zone */
ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
if (ret <= 0)
@@ -2002,12 +2460,12 @@ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
sector_t chunk_block, unsigned int nr_blocks)
{
unsigned int count, bit, nr_bits;
- unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
+ unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
struct dmz_mblock *mblk;
unsigned int n = 0;
- dmz_dev_debug(zmd->dev, "=> VALIDATE zone %u, block %llu, %u blocks",
- dmz_id(zmd, zone), (unsigned long long)chunk_block,
+ dmz_zmd_debug(zmd, "=> VALIDATE zone %u, block %llu, %u blocks",
+ zone->id, (unsigned long long)chunk_block,
nr_blocks);
WARN_ON(chunk_block + nr_blocks > zone_nr_blocks);
@@ -2036,8 +2494,8 @@ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
if (likely(zone->weight + n <= zone_nr_blocks))
zone->weight += n;
else {
- dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be <= %u",
- dmz_id(zmd, zone), zone->weight,
+ dmz_zmd_warn(zmd, "Zone %u: weight %u should be <= %u",
+ zone->id, zone->weight,
zone_nr_blocks - n);
zone->weight = zone_nr_blocks;
}
@@ -2086,10 +2544,10 @@ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
struct dmz_mblock *mblk;
unsigned int n = 0;
- dmz_dev_debug(zmd->dev, "=> INVALIDATE zone %u, block %llu, %u blocks",
- dmz_id(zmd, zone), (u64)chunk_block, nr_blocks);
+ dmz_zmd_debug(zmd, "=> INVALIDATE zone %u, block %llu, %u blocks",
+ zone->id, (u64)chunk_block, nr_blocks);
- WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
+ WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
while (nr_blocks) {
/* Get bitmap block */
@@ -2116,8 +2574,8 @@ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
if (zone->weight >= n)
zone->weight -= n;
else {
- dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be >= %u",
- dmz_id(zmd, zone), zone->weight, n);
+ dmz_zmd_warn(zmd, "Zone %u: weight %u should be >= %u",
+ zone->id, zone->weight, n);
zone->weight = 0;
}
@@ -2133,7 +2591,7 @@ static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
struct dmz_mblock *mblk;
int ret;
- WARN_ON(chunk_block >= zmd->dev->zone_nr_blocks);
+ WARN_ON(chunk_block >= zmd->zone_nr_blocks);
/* Get bitmap block */
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
@@ -2163,7 +2621,7 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
unsigned long *bitmap;
int n = 0;
- WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
+ WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
while (nr_blocks) {
/* Get bitmap block */
@@ -2207,7 +2665,7 @@ int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
/* The block is valid: get the number of valid blocks from block */
return dmz_to_next_set_block(zmd, zone, chunk_block,
- zmd->dev->zone_nr_blocks - chunk_block, 0);
+ zmd->zone_nr_blocks - chunk_block, 0);
}
/*
@@ -2223,7 +2681,7 @@ int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
int ret;
ret = dmz_to_next_set_block(zmd, zone, start_block,
- zmd->dev->zone_nr_blocks - start_block, 1);
+ zmd->zone_nr_blocks - start_block, 1);
if (ret < 0)
return ret;
@@ -2231,7 +2689,7 @@ int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
*chunk_block = start_block;
return dmz_to_next_set_block(zmd, zone, start_block,
- zmd->dev->zone_nr_blocks - start_block, 0);
+ zmd->zone_nr_blocks - start_block, 0);
}
/*
@@ -2270,7 +2728,7 @@ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
struct dmz_mblock *mblk;
sector_t chunk_block = 0;
unsigned int bit, nr_bits;
- unsigned int nr_blocks = zmd->dev->zone_nr_blocks;
+ unsigned int nr_blocks = zmd->zone_nr_blocks;
void *bitmap;
int n = 0;
@@ -2326,7 +2784,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
while (!list_empty(&zmd->mblk_dirty_list)) {
mblk = list_first_entry(&zmd->mblk_dirty_list,
struct dmz_mblock, link);
- dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
+ dmz_zmd_warn(zmd, "mblock %llu still in dirty list (ref %u)",
(u64)mblk->no, mblk->ref);
list_del_init(&mblk->link);
rb_erase(&mblk->node, &zmd->mblk_rbtree);
@@ -2344,7 +2802,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
/* Sanity checks: the mblock rbtree should now be empty */
root = &zmd->mblk_rbtree;
rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
- dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
+ dmz_zmd_warn(zmd, "mblock %llu ref %u still in rbtree",
(u64)mblk->no, mblk->ref);
mblk->ref = 0;
dmz_free_mblock(zmd, mblk);
@@ -2357,13 +2815,42 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
mutex_destroy(&zmd->map_lock);
}
+static void dmz_print_dev(struct dmz_metadata *zmd, int num)
+{
+ struct dmz_dev *dev = &zmd->dev[num];
+
+ if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE)
+ dmz_dev_info(dev, "Regular block device");
+ else
+ dmz_dev_info(dev, "Host-%s zoned block device",
+ bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
+ "aware" : "managed");
+ if (zmd->sb_version > 1) {
+ sector_t sector_offset =
+ dev->zone_offset << zmd->zone_nr_sectors_shift;
+
+ dmz_dev_info(dev, " %llu 512-byte logical sectors (offset %llu)",
+ (u64)dev->capacity, (u64)sector_offset);
+ dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors (offset %llu)",
+ dev->nr_zones, (u64)zmd->zone_nr_sectors,
+ (u64)dev->zone_offset);
+ } else {
+ dmz_dev_info(dev, " %llu 512-byte logical sectors",
+ (u64)dev->capacity);
+ dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
+ dev->nr_zones, (u64)zmd->zone_nr_sectors);
+ }
+}
+
/*
* Initialize the zoned metadata.
*/
-int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
+int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
+ struct dmz_metadata **metadata,
+ const char *devname)
{
struct dmz_metadata *zmd;
- unsigned int i, zid;
+ unsigned int i;
struct dm_zone *zone;
int ret;
@@ -2371,7 +2858,9 @@ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
if (!zmd)
return -ENOMEM;
+ strcpy(zmd->devname, devname);
zmd->dev = dev;
+ zmd->nr_devs = num_dev;
zmd->mblk_rbtree = RB_ROOT;
init_rwsem(&zmd->mblk_sem);
mutex_init(&zmd->mblk_flush_lock);
@@ -2380,13 +2869,10 @@ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
INIT_LIST_HEAD(&zmd->mblk_dirty_list);
mutex_init(&zmd->map_lock);
- atomic_set(&zmd->unmap_nr_rnd, 0);
- INIT_LIST_HEAD(&zmd->unmap_rnd_list);
- INIT_LIST_HEAD(&zmd->map_rnd_list);
- atomic_set(&zmd->unmap_nr_seq, 0);
- INIT_LIST_HEAD(&zmd->unmap_seq_list);
- INIT_LIST_HEAD(&zmd->map_seq_list);
+ atomic_set(&zmd->unmap_nr_cache, 0);
+ INIT_LIST_HEAD(&zmd->unmap_cache_list);
+ INIT_LIST_HEAD(&zmd->map_cache_list);
atomic_set(&zmd->nr_reserved_seq_zones, 0);
INIT_LIST_HEAD(&zmd->reserved_seq_zones_list);
@@ -2404,14 +2890,22 @@ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
goto err;
/* Set metadata zones starting from sb_zone */
- zid = dmz_id(zmd, zmd->sb_zone);
for (i = 0; i < zmd->nr_meta_zones << 1; i++) {
- zone = dmz_get(zmd, zid + i);
- if (!dmz_is_rnd(zone))
+ zone = dmz_get(zmd, zmd->sb[0].zone->id + i);
+ if (!zone) {
+ dmz_zmd_err(zmd,
+ "metadata zone %u not present", i);
+ ret = -ENXIO;
+ goto err;
+ }
+ if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) {
+ dmz_zmd_err(zmd,
+ "metadata zone %d is not random", i);
+ ret = -ENXIO;
goto err;
+ }
set_bit(DMZ_META, &zone->flags);
}
-
/* Load mapping table */
ret = dmz_load_mapping(zmd);
if (ret)
@@ -2432,34 +2926,38 @@ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
/* Metadata cache shrinker */
ret = register_shrinker(&zmd->mblk_shrinker);
if (ret) {
- dmz_dev_err(dev, "Register metadata cache shrinker failed");
+ dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
goto err;
}
- dmz_dev_info(dev, "Host-%s zoned block device",
- bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
- "aware" : "managed");
- dmz_dev_info(dev, " %llu 512-byte logical sectors",
- (u64)dev->capacity);
- dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
- dev->nr_zones, (u64)dev->zone_nr_sectors);
- dmz_dev_info(dev, " %u metadata zones",
- zmd->nr_meta_zones * 2);
- dmz_dev_info(dev, " %u data zones for %u chunks",
- zmd->nr_data_zones, zmd->nr_chunks);
- dmz_dev_info(dev, " %u random zones (%u unmapped)",
- zmd->nr_rnd, atomic_read(&zmd->unmap_nr_rnd));
- dmz_dev_info(dev, " %u sequential zones (%u unmapped)",
- zmd->nr_seq, atomic_read(&zmd->unmap_nr_seq));
- dmz_dev_info(dev, " %u reserved sequential data zones",
- zmd->nr_reserved_seq);
-
- dmz_dev_debug(dev, "Format:");
- dmz_dev_debug(dev, "%u metadata blocks per set (%u max cache)",
+ dmz_zmd_info(zmd, "DM-Zoned metadata version %d", zmd->sb_version);
+ for (i = 0; i < zmd->nr_devs; i++)
+ dmz_print_dev(zmd, i);
+
+ dmz_zmd_info(zmd, " %u zones of %llu 512-byte logical sectors",
+ zmd->nr_zones, (u64)zmd->zone_nr_sectors);
+ dmz_zmd_debug(zmd, " %u metadata zones",
+ zmd->nr_meta_zones * 2);
+ dmz_zmd_debug(zmd, " %u data zones for %u chunks",
+ zmd->nr_data_zones, zmd->nr_chunks);
+ dmz_zmd_debug(zmd, " %u cache zones (%u unmapped)",
+ zmd->nr_cache, atomic_read(&zmd->unmap_nr_cache));
+ for (i = 0; i < zmd->nr_devs; i++) {
+ dmz_zmd_debug(zmd, " %u random zones (%u unmapped)",
+ dmz_nr_rnd_zones(zmd, i),
+ dmz_nr_unmap_rnd_zones(zmd, i));
+ dmz_zmd_debug(zmd, " %u sequential zones (%u unmapped)",
+ dmz_nr_seq_zones(zmd, i),
+ dmz_nr_unmap_seq_zones(zmd, i));
+ }
+ dmz_zmd_debug(zmd, " %u reserved sequential data zones",
+ zmd->nr_reserved_seq);
+ dmz_zmd_debug(zmd, "Format:");
+ dmz_zmd_debug(zmd, "%u metadata blocks per set (%u max cache)",
zmd->nr_meta_blocks, zmd->max_nr_mblks);
- dmz_dev_debug(dev, " %u data zone mapping blocks",
+ dmz_zmd_debug(zmd, " %u data zone mapping blocks",
zmd->nr_map_blocks);
- dmz_dev_debug(dev, " %u bitmap blocks",
+ dmz_zmd_debug(zmd, " %u bitmap blocks",
zmd->nr_bitmap_blocks);
*metadata = zmd;
@@ -2488,30 +2986,28 @@ void dmz_dtr_metadata(struct dmz_metadata *zmd)
*/
int dmz_resume_metadata(struct dmz_metadata *zmd)
{
- struct dmz_dev *dev = zmd->dev;
struct dm_zone *zone;
sector_t wp_block;
unsigned int i;
int ret;
/* Check zones */
- for (i = 0; i < dev->nr_zones; i++) {
+ for (i = 0; i < zmd->nr_zones; i++) {
zone = dmz_get(zmd, i);
if (!zone) {
- dmz_dev_err(dev, "Unable to get zone %u", i);
+ dmz_zmd_err(zmd, "Unable to get zone %u", i);
return -EIO;
}
-
wp_block = zone->wp_block;
ret = dmz_update_zone(zmd, zone);
if (ret) {
- dmz_dev_err(dev, "Broken zone %u", i);
+ dmz_zmd_err(zmd, "Broken zone %u", i);
return ret;
}
if (dmz_is_offline(zone)) {
- dmz_dev_warn(dev, "Zone %u is offline", i);
+ dmz_zmd_warn(zmd, "Zone %u is offline", i);
continue;
}
@@ -2519,11 +3015,11 @@ int dmz_resume_metadata(struct dmz_metadata *zmd)
if (!dmz_is_seq(zone))
zone->wp_block = 0;
else if (zone->wp_block != wp_block) {
- dmz_dev_err(dev, "Zone %u: Invalid wp (%llu / %llu)",
+ dmz_zmd_err(zmd, "Zone %u: Invalid wp (%llu / %llu)",
i, (u64)zone->wp_block, (u64)wp_block);
zone->wp_block = wp_block;
dmz_invalidate_blocks(zmd, zone, zone->wp_block,
- dev->zone_nr_blocks - zone->wp_block);
+ zmd->zone_nr_blocks - zone->wp_block);
}
}
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index e7ace908a9b7..2261b4dd60b7 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -13,7 +13,6 @@
struct dmz_reclaim {
struct dmz_metadata *metadata;
- struct dmz_dev *dev;
struct delayed_work work;
struct workqueue_struct *wq;
@@ -22,6 +21,8 @@ struct dmz_reclaim {
struct dm_kcopyd_throttle kc_throttle;
int kc_err;
+ int dev_idx;
+
unsigned long flags;
/* Last target access time */
@@ -44,13 +45,13 @@ enum {
* Percentage of unmapped (free) random zones below which reclaim starts
* even if the target is busy.
*/
-#define DMZ_RECLAIM_LOW_UNMAP_RND 30
+#define DMZ_RECLAIM_LOW_UNMAP_ZONES 30
/*
* Percentage of unmapped (free) random zones above which reclaim will
* stop if the target is busy.
*/
-#define DMZ_RECLAIM_HIGH_UNMAP_RND 50
+#define DMZ_RECLAIM_HIGH_UNMAP_ZONES 50
/*
* Align a sequential zone write pointer to chunk_block.
@@ -59,6 +60,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
sector_t block)
{
struct dmz_metadata *zmd = zrc->metadata;
+ struct dmz_dev *dev = zone->dev;
sector_t wp_block = zone->wp_block;
unsigned int nr_blocks;
int ret;
@@ -74,15 +76,15 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
* pointer and the requested position.
*/
nr_blocks = block - wp_block;
- ret = blkdev_issue_zeroout(zrc->dev->bdev,
+ ret = blkdev_issue_zeroout(dev->bdev,
dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
if (ret) {
- dmz_dev_err(zrc->dev,
+ dmz_dev_err(dev,
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
- dmz_id(zmd, zone), (unsigned long long)wp_block,
+ zone->id, (unsigned long long)wp_block,
(unsigned long long)block, nr_blocks, ret);
- dmz_check_bdev(zrc->dev);
+ dmz_check_bdev(dev);
return ret;
}
@@ -116,7 +118,6 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
struct dm_zone *src_zone, struct dm_zone *dst_zone)
{
struct dmz_metadata *zmd = zrc->metadata;
- struct dmz_dev *dev = zrc->dev;
struct dm_io_region src, dst;
sector_t block = 0, end_block;
sector_t nr_blocks;
@@ -128,7 +129,7 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
if (dmz_is_seq(src_zone))
end_block = src_zone->wp_block;
else
- end_block = dev->zone_nr_blocks;
+ end_block = dmz_zone_nr_blocks(zmd);
src_zone_block = dmz_start_block(zmd, src_zone);
dst_zone_block = dmz_start_block(zmd, dst_zone);
@@ -136,9 +137,14 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
while (block < end_block) {
- if (dev->flags & DMZ_BDEV_DYING)
+ if (src_zone->dev->flags & DMZ_BDEV_DYING)
+ return -EIO;
+ if (dst_zone->dev->flags & DMZ_BDEV_DYING)
return -EIO;
+ if (dmz_reclaim_should_terminate(src_zone))
+ return -EINTR;
+
/* Get a valid region from the source zone */
ret = dmz_first_valid_block(zmd, src_zone, &block);
if (ret <= 0)
@@ -156,11 +162,11 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
return ret;
}
- src.bdev = dev->bdev;
+ src.bdev = src_zone->dev->bdev;
src.sector = dmz_blk2sect(src_zone_block + block);
src.count = dmz_blk2sect(nr_blocks);
- dst.bdev = dev->bdev;
+ dst.bdev = dst_zone->dev->bdev;
dst.sector = dmz_blk2sect(dst_zone_block + block);
dst.count = src.count;
@@ -194,10 +200,10 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
struct dmz_metadata *zmd = zrc->metadata;
int ret;
- dmz_dev_debug(zrc->dev,
- "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
- dzone->chunk, dmz_id(zmd, bzone), dmz_weight(bzone),
- dmz_id(zmd, dzone), dmz_weight(dzone));
+ DMDEBUG("(%s/%u): Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ dzone->chunk, bzone->id, dmz_weight(bzone),
+ dzone->id, dmz_weight(dzone));
/* Flush data zone into the buffer zone */
ret = dmz_reclaim_copy(zrc, bzone, dzone);
@@ -210,7 +216,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
if (ret == 0) {
/* Free the buffer zone */
- dmz_invalidate_blocks(zmd, bzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_invalidate_blocks(zmd, bzone, 0, dmz_zone_nr_blocks(zmd));
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, bzone);
dmz_unlock_zone_reclaim(dzone);
@@ -233,10 +239,10 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
struct dmz_metadata *zmd = zrc->metadata;
int ret = 0;
- dmz_dev_debug(zrc->dev,
- "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
- chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
- dmz_id(zmd, bzone), dmz_weight(bzone));
+ DMDEBUG("(%s/%u): Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ chunk, dzone->id, dmz_weight(dzone),
+ bzone->id, dmz_weight(bzone));
/* Flush data zone into the buffer zone */
ret = dmz_reclaim_copy(zrc, dzone, bzone);
@@ -252,7 +258,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
* Free the data zone and remap the chunk to
* the buffer zone.
*/
- dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, bzone);
dmz_unmap_zone(zmd, dzone);
@@ -277,18 +283,26 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
struct dm_zone *szone = NULL;
struct dmz_metadata *zmd = zrc->metadata;
int ret;
+ int alloc_flags = DMZ_ALLOC_SEQ;
- /* Get a free sequential zone */
+ /* Get a free random or sequential zone */
dmz_lock_map(zmd);
- szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM);
+again:
+ szone = dmz_alloc_zone(zmd, zrc->dev_idx,
+ alloc_flags | DMZ_ALLOC_RECLAIM);
+ if (!szone && alloc_flags == DMZ_ALLOC_SEQ && dmz_nr_cache_zones(zmd)) {
+ alloc_flags = DMZ_ALLOC_RND;
+ goto again;
+ }
dmz_unlock_map(zmd);
if (!szone)
return -ENOSPC;
- dmz_dev_debug(zrc->dev,
- "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
- chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
- dmz_id(zmd, szone));
+ DMDEBUG("(%s/%u): Chunk %u, move %s zone %u (weight %u) to %s zone %u",
+ dmz_metadata_label(zmd), zrc->dev_idx, chunk,
+ dmz_is_cache(dzone) ? "cache" : "rnd",
+ dzone->id, dmz_weight(dzone),
+ dmz_is_rnd(szone) ? "rnd" : "seq", szone->id);
/* Flush the random data zone into the sequential zone */
ret = dmz_reclaim_copy(zrc, dzone, szone);
@@ -306,7 +320,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
dmz_unlock_map(zmd);
} else {
/* Free the data zone and remap the chunk */
- dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, dzone);
dmz_unlock_zone_reclaim(dzone);
@@ -337,6 +351,14 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
}
/*
+ * Test if the target device is idle.
+ */
+static inline int dmz_target_idle(struct dmz_reclaim *zrc)
+{
+ return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
+}
+
+/*
* Find a candidate zone for reclaim and process it.
*/
static int dmz_do_reclaim(struct dmz_reclaim *zrc)
@@ -348,13 +370,16 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
int ret;
/* Get a data zone */
- dzone = dmz_get_zone_for_reclaim(zmd);
- if (IS_ERR(dzone))
- return PTR_ERR(dzone);
+ dzone = dmz_get_zone_for_reclaim(zmd, zrc->dev_idx,
+ dmz_target_idle(zrc));
+ if (!dzone) {
+ DMDEBUG("(%s/%u): No zone found to reclaim",
+ dmz_metadata_label(zmd), zrc->dev_idx);
+ return -EBUSY;
+ }
start = jiffies;
-
- if (dmz_is_rnd(dzone)) {
+ if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) {
if (!dmz_weight(dzone)) {
/* Empty zone */
dmz_reclaim_empty(zrc, dzone);
@@ -395,54 +420,80 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
}
out:
if (ret) {
+ if (ret == -EINTR)
+ DMDEBUG("(%s/%u): reclaim zone %u interrupted",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ rzone->id);
+ else
+ DMDEBUG("(%s/%u): Failed to reclaim zone %u, err %d",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ rzone->id, ret);
dmz_unlock_zone_reclaim(dzone);
return ret;
}
ret = dmz_flush_metadata(zrc->metadata);
if (ret) {
- dmz_dev_debug(zrc->dev,
- "Metadata flush for zone %u failed, err %d\n",
- dmz_id(zmd, rzone), ret);
+ DMDEBUG("(%s/%u): Metadata flush for zone %u failed, err %d",
+ dmz_metadata_label(zmd), zrc->dev_idx, rzone->id, ret);
return ret;
}
- dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
- dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
+ DMDEBUG("(%s/%u): Reclaimed zone %u in %u ms",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ rzone->id, jiffies_to_msecs(jiffies - start));
return 0;
}
-/*
- * Test if the target device is idle.
- */
-static inline int dmz_target_idle(struct dmz_reclaim *zrc)
+static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
{
- return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
+ struct dmz_metadata *zmd = zrc->metadata;
+ unsigned int nr_cache = dmz_nr_cache_zones(zmd);
+ unsigned int nr_unmap, nr_zones;
+
+ if (nr_cache) {
+ nr_zones = nr_cache;
+ nr_unmap = dmz_nr_unmap_cache_zones(zmd);
+ } else {
+ nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
+ nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
+ }
+ return nr_unmap * 100 / nr_zones;
}
/*
* Test if reclaim is necessary.
*/
-static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
+static bool dmz_should_reclaim(struct dmz_reclaim *zrc, unsigned int p_unmap)
{
- struct dmz_metadata *zmd = zrc->metadata;
- unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
- unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
- unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
+ unsigned int nr_reclaim;
+
+ nr_reclaim = dmz_nr_rnd_zones(zrc->metadata, zrc->dev_idx);
+
+ if (dmz_nr_cache_zones(zrc->metadata)) {
+ /*
+ * The first device in a multi-device
+ * setup only contains cache zones, so
+ * never start reclaim there.
+ */
+ if (zrc->dev_idx == 0)
+ return false;
+ nr_reclaim += dmz_nr_cache_zones(zrc->metadata);
+ }
/* Reclaim when idle */
- if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd)
+ if (dmz_target_idle(zrc) && nr_reclaim)
return true;
- /* If there are still plenty of random zones, do not reclaim */
- if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND)
+ /* If there are still plenty of cache zones, do not reclaim */
+ if (p_unmap >= DMZ_RECLAIM_HIGH_UNMAP_ZONES)
return false;
/*
- * If the percentage of unmapped random zones is low,
+ * If the percentage of unmapped cache zones is low,
* reclaim even if the target is busy.
*/
- return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
+ return p_unmap <= DMZ_RECLAIM_LOW_UNMAP_ZONES;
}
/*
@@ -452,14 +503,14 @@ static void dmz_reclaim_work(struct work_struct *work)
{
struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
struct dmz_metadata *zmd = zrc->metadata;
- unsigned int nr_rnd, nr_unmap_rnd;
- unsigned int p_unmap_rnd;
+ unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0;
int ret;
- if (dmz_bdev_is_dying(zrc->dev))
+ if (dmz_dev_is_dying(zmd))
return;
- if (!dmz_should_reclaim(zrc)) {
+ p_unmap = dmz_reclaim_percentage(zrc);
+ if (!dmz_should_reclaim(zrc, p_unmap)) {
mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
return;
}
@@ -470,27 +521,29 @@ static void dmz_reclaim_work(struct work_struct *work)
* and slower if there are still some free random zones to avoid
* as much as possible to negatively impact the user workload.
*/
- nr_rnd = dmz_nr_rnd_zones(zmd);
- nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
- p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
- if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
+ if (dmz_target_idle(zrc) || p_unmap < DMZ_RECLAIM_LOW_UNMAP_ZONES / 2) {
/* Idle or very low percentage: go fast */
zrc->kc_throttle.throttle = 100;
} else {
/* Busy but we still have some random zone: throttle */
- zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2);
+ zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
}
- dmz_dev_debug(zrc->dev,
- "Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
- zrc->kc_throttle.throttle,
- (dmz_target_idle(zrc) ? "Idle" : "Busy"),
- p_unmap_rnd, nr_unmap_rnd, nr_rnd);
+ nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
+ nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
+
+ DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ zrc->kc_throttle.throttle,
+ (dmz_target_idle(zrc) ? "Idle" : "Busy"),
+ p_unmap, dmz_nr_unmap_cache_zones(zmd),
+ dmz_nr_cache_zones(zmd),
+ dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx),
+ dmz_nr_rnd_zones(zmd, zrc->dev_idx));
ret = dmz_do_reclaim(zrc);
- if (ret) {
- dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
- if (!dmz_check_bdev(zrc->dev))
+ if (ret && ret != -EINTR) {
+ if (!dmz_check_dev(zmd))
return;
}
@@ -500,8 +553,8 @@ static void dmz_reclaim_work(struct work_struct *work)
/*
* Initialize reclaim.
*/
-int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
- struct dmz_reclaim **reclaim)
+int dmz_ctr_reclaim(struct dmz_metadata *zmd,
+ struct dmz_reclaim **reclaim, int idx)
{
struct dmz_reclaim *zrc;
int ret;
@@ -510,9 +563,9 @@ int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
if (!zrc)
return -ENOMEM;
- zrc->dev = dev;
zrc->metadata = zmd;
zrc->atime = jiffies;
+ zrc->dev_idx = idx;
/* Reclaim kcopyd client */
zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
@@ -524,8 +577,8 @@ int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
/* Reclaim work */
INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
- zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
- dev->name);
+ zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s_%d", WQ_MEM_RECLAIM,
+ dmz_metadata_label(zmd), idx);
if (!zrc->wq) {
ret = -ENOMEM;
goto err;
@@ -583,7 +636,8 @@ void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
*/
void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
{
- if (dmz_should_reclaim(zrc))
+ unsigned int p_unmap = dmz_reclaim_percentage(zrc);
+
+ if (dmz_should_reclaim(zrc, p_unmap))
mod_delayed_work(zrc->wq, &zrc->work, 0);
}
-
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index f4f83d39b3dc..a907a9446c0b 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -17,7 +17,7 @@
* Zone BIO context.
*/
struct dmz_bioctx {
- struct dmz_target *target;
+ struct dmz_dev *dev;
struct dm_zone *zone;
struct bio *bio;
refcount_t ref;
@@ -38,9 +38,10 @@ struct dm_chunk_work {
* Target descriptor.
*/
struct dmz_target {
- struct dm_dev *ddev;
+ struct dm_dev **ddev;
+ unsigned int nr_ddevs;
- unsigned long flags;
+ unsigned int flags;
/* Zoned block device information */
struct dmz_dev *dev;
@@ -48,9 +49,6 @@ struct dmz_target {
/* For metadata handling */
struct dmz_metadata *metadata;
- /* For reclaim */
- struct dmz_reclaim *reclaim;
-
/* For chunk work */
struct radix_tree_root chunk_rxtree;
struct workqueue_struct *chunk_wq;
@@ -76,12 +74,13 @@ struct dmz_target {
*/
static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_bioctx *bioctx =
+ dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
bio->bi_status = status;
- if (bio->bi_status != BLK_STS_OK)
- bioctx->target->dev->flags |= DMZ_CHECK_BDEV;
+ if (bioctx->dev && bio->bi_status != BLK_STS_OK)
+ bioctx->dev->flags |= DMZ_CHECK_BDEV;
if (refcount_dec_and_test(&bioctx->ref)) {
struct dm_zone *zone = bioctx->zone;
@@ -118,14 +117,20 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio, sector_t chunk_block,
unsigned int nr_blocks)
{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_bioctx *bioctx =
+ dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_dev *dev = zone->dev;
struct bio *clone;
+ if (dev->flags & DMZ_BDEV_DYING)
+ return -EIO;
+
clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
if (!clone)
return -ENOMEM;
- bio_set_dev(clone, dmz->dev->bdev);
+ bio_set_dev(clone, dev->bdev);
+ bioctx->dev = dev;
clone->bi_iter.bi_sector =
dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
@@ -165,7 +170,8 @@ static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio)
{
- sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
+ struct dmz_metadata *zmd = dmz->metadata;
+ sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
unsigned int nr_blocks = dmz_bio_blocks(bio);
sector_t end_block = chunk_block + nr_blocks;
struct dm_zone *rzone, *bzone;
@@ -177,19 +183,22 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
return 0;
}
- dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks",
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- (dmz_is_rnd(zone) ? "RND" : "SEQ"),
- dmz_id(dmz->metadata, zone),
- (unsigned long long)chunk_block, nr_blocks);
+ DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks",
+ dmz_metadata_label(zmd),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ (dmz_is_rnd(zone) ? "RND" :
+ (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
+ zone->id,
+ (unsigned long long)chunk_block, nr_blocks);
/* Check block validity to determine the read location */
bzone = zone->bzone;
while (chunk_block < end_block) {
nr_blocks = 0;
- if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
+ chunk_block < zone->wp_block) {
/* Test block validity in the data zone */
- ret = dmz_block_valid(dmz->metadata, zone, chunk_block);
+ ret = dmz_block_valid(zmd, zone, chunk_block);
if (ret < 0)
return ret;
if (ret > 0) {
@@ -204,7 +213,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
* Check the buffer zone, if there is one.
*/
if (!nr_blocks && bzone) {
- ret = dmz_block_valid(dmz->metadata, bzone, chunk_block);
+ ret = dmz_block_valid(zmd, bzone, chunk_block);
if (ret < 0)
return ret;
if (ret > 0) {
@@ -216,8 +225,10 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
if (nr_blocks) {
/* Valid blocks found: read them */
- nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
- ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
+ nr_blocks = min_t(unsigned int, nr_blocks,
+ end_block - chunk_block);
+ ret = dmz_submit_bio(dmz, rzone, bio,
+ chunk_block, nr_blocks);
if (ret)
return ret;
chunk_block += nr_blocks;
@@ -308,25 +319,30 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio)
{
- sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
+ struct dmz_metadata *zmd = dmz->metadata;
+ sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
unsigned int nr_blocks = dmz_bio_blocks(bio);
if (!zone)
return -ENOSPC;
- dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- (dmz_is_rnd(zone) ? "RND" : "SEQ"),
- dmz_id(dmz->metadata, zone),
- (unsigned long long)chunk_block, nr_blocks);
+ DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
+ dmz_metadata_label(zmd),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ (dmz_is_rnd(zone) ? "RND" :
+ (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
+ zone->id,
+ (unsigned long long)chunk_block, nr_blocks);
- if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
+ chunk_block == zone->wp_block) {
/*
* zone is a random zone or it is a sequential zone
* and the BIO is aligned to the zone write pointer:
* direct write the zone.
*/
- return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks);
+ return dmz_handle_direct_write(dmz, zone, bio,
+ chunk_block, nr_blocks);
}
/*
@@ -345,7 +361,7 @@ static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
struct dmz_metadata *zmd = dmz->metadata;
sector_t block = dmz_bio_block(bio);
unsigned int nr_blocks = dmz_bio_blocks(bio);
- sector_t chunk_block = dmz_chunk_block(dmz->dev, block);
+ sector_t chunk_block = dmz_chunk_block(zmd, block);
int ret = 0;
/* For unmapped chunks, there is nothing to do */
@@ -355,16 +371,18 @@ static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
if (dmz_is_readonly(zone))
return -EROFS;
- dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- dmz_id(zmd, zone),
- (unsigned long long)chunk_block, nr_blocks);
+ DMDEBUG("(%s): DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
+ dmz_metadata_label(dmz->metadata),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ zone->id,
+ (unsigned long long)chunk_block, nr_blocks);
/*
* Invalidate blocks in the data zone and its
* buffer zone if one is mapped.
*/
- if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
+ chunk_block < zone->wp_block)
ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
if (ret == 0 && zone->bzone)
ret = dmz_invalidate_blocks(zmd, zone->bzone,
@@ -378,31 +396,28 @@ static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
struct bio *bio)
{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_bioctx *bioctx =
+ dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
struct dmz_metadata *zmd = dmz->metadata;
struct dm_zone *zone;
- int ret;
+ int i, ret;
/*
* Write may trigger a zone allocation. So make sure the
* allocation can succeed.
*/
if (bio_op(bio) == REQ_OP_WRITE)
- dmz_schedule_reclaim(dmz->reclaim);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_schedule_reclaim(dmz->dev[i].reclaim);
dmz_lock_metadata(zmd);
- if (dmz->dev->flags & DMZ_BDEV_DYING) {
- ret = -EIO;
- goto out;
- }
-
/*
* Get the data zone mapping the chunk. There may be no
* mapping for read and discard. If a mapping is obtained,
+ the zone returned will be set to active state.
*/
- zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
+ zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio),
bio_op(bio));
if (IS_ERR(zone)) {
ret = PTR_ERR(zone);
@@ -413,6 +428,7 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
if (zone) {
dmz_activate_zone(zone);
bioctx->zone = zone;
+ dmz_reclaim_bio_acc(zone->dev->reclaim);
}
switch (bio_op(bio)) {
@@ -427,8 +443,8 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
ret = dmz_handle_discard(dmz, zone, bio);
break;
default:
- dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x",
- bio_op(bio));
+ DMERR("(%s): Unsupported BIO operation 0x%x",
+ dmz_metadata_label(dmz->metadata), bio_op(bio));
ret = -EIO;
}
@@ -502,7 +518,8 @@ static void dmz_flush_work(struct work_struct *work)
/* Flush dirty metadata blocks */
ret = dmz_flush_metadata(dmz->metadata);
if (ret)
- dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
+ DMDEBUG("(%s): Metadata flush failed, rc=%d",
+ dmz_metadata_label(dmz->metadata), ret);
/* Process queued flush requests */
while (1) {
@@ -525,7 +542,7 @@ static void dmz_flush_work(struct work_struct *work)
*/
static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
{
- unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
+ unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio);
struct dm_chunk_work *cw;
int ret = 0;
@@ -558,7 +575,6 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
bio_list_add(&cw->bio_list, bio);
- dmz_reclaim_bio_acc(dmz->reclaim);
if (queue_work(dmz->chunk_wq, &cw->work))
dmz_get_chunk_work(cw);
out:
@@ -618,23 +634,22 @@ bool dmz_check_bdev(struct dmz_dev *dmz_dev)
static int dmz_map(struct dm_target *ti, struct bio *bio)
{
struct dmz_target *dmz = ti->private;
- struct dmz_dev *dev = dmz->dev;
+ struct dmz_metadata *zmd = dmz->metadata;
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
sector_t sector = bio->bi_iter.bi_sector;
unsigned int nr_sectors = bio_sectors(bio);
sector_t chunk_sector;
int ret;
- if (dmz_bdev_is_dying(dmz->dev))
+ if (dmz_dev_is_dying(zmd))
return DM_MAPIO_KILL;
- dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
- bio_op(bio), (unsigned long long)sector, nr_sectors,
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
- (unsigned int)dmz_bio_blocks(bio));
-
- bio_set_dev(bio, dev->bdev);
+ DMDEBUG("(%s): BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
+ dmz_metadata_label(zmd),
+ bio_op(bio), (unsigned long long)sector, nr_sectors,
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ (unsigned long long)dmz_chunk_block(zmd, dmz_bio_block(bio)),
+ (unsigned int)dmz_bio_blocks(bio));
if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
return DM_MAPIO_REMAPPED;
@@ -644,7 +659,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_KILL;
/* Initialize the BIO context */
- bioctx->target = dmz;
+ bioctx->dev = NULL;
bioctx->zone = NULL;
bioctx->bio = bio;
refcount_set(&bioctx->ref, 1);
@@ -659,17 +674,17 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
}
/* Split zone BIOs to fit entirely into a zone */
- chunk_sector = sector & (dev->zone_nr_sectors - 1);
- if (chunk_sector + nr_sectors > dev->zone_nr_sectors)
- dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
+ chunk_sector = sector & (dmz_zone_nr_sectors(zmd) - 1);
+ if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd))
+ dm_accept_partial_bio(bio, dmz_zone_nr_sectors(zmd) - chunk_sector);
/* Now ready to handle this BIO */
ret = dmz_queue_chunk_work(dmz, bio);
if (ret) {
- dmz_dev_debug(dmz->dev,
- "BIO op %d, can't process chunk %llu, err %i\n",
- bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
- ret);
+ DMDEBUG("(%s): BIO op %d, can't process chunk %llu, err %i",
+ dmz_metadata_label(zmd),
+ bio_op(bio), (u64)dmz_bio_chunk(zmd, bio),
+ ret);
return DM_MAPIO_REQUEUE;
}
@@ -679,64 +694,65 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
/*
* Get zoned device information.
*/
-static int dmz_get_zoned_device(struct dm_target *ti, char *path)
+static int dmz_get_zoned_device(struct dm_target *ti, char *path,
+ int idx, int nr_devs)
{
struct dmz_target *dmz = ti->private;
- struct request_queue *q;
+ struct dm_dev *ddev;
struct dmz_dev *dev;
- sector_t aligned_capacity;
int ret;
+ struct block_device *bdev;
/* Get the target device */
- ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev);
+ ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &ddev);
if (ret) {
ti->error = "Get target device failed";
- dmz->ddev = NULL;
return ret;
}
- dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL);
- if (!dev) {
- ret = -ENOMEM;
- goto err;
+ bdev = ddev->bdev;
+ if (bdev_zoned_model(bdev) == BLK_ZONED_NONE) {
+ if (nr_devs == 1) {
+ ti->error = "Invalid regular device";
+ goto err;
+ }
+ if (idx != 0) {
+ ti->error = "First device must be a regular device";
+ goto err;
+ }
+ if (dmz->ddev[0]) {
+ ti->error = "Too many regular devices";
+ goto err;
+ }
+ dev = &dmz->dev[idx];
+ dev->flags = DMZ_BDEV_REGULAR;
+ } else {
+ if (dmz->ddev[idx]) {
+ ti->error = "Too many zoned devices";
+ goto err;
+ }
+ if (nr_devs > 1 && idx == 0) {
+ ti->error = "First device must be a regular device";
+ goto err;
+ }
+ dev = &dmz->dev[idx];
}
-
- dev->bdev = dmz->ddev->bdev;
+ dev->bdev = bdev;
+ dev->dev_idx = idx;
(void)bdevname(dev->bdev, dev->name);
- if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) {
- ti->error = "Not a zoned block device";
- ret = -EINVAL;
+ dev->capacity = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ if (ti->begin) {
+ ti->error = "Partial mapping is not supported";
goto err;
}
- q = bdev_get_queue(dev->bdev);
- dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
- aligned_capacity = dev->capacity &
- ~((sector_t)blk_queue_zone_sectors(q) - 1);
- if (ti->begin ||
- ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
- ti->error = "Partial mapping not supported";
- ret = -EINVAL;
- goto err;
- }
-
- dev->zone_nr_sectors = blk_queue_zone_sectors(q);
- dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
-
- dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
- dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
-
- dev->nr_zones = blkdev_nr_zones(dev->bdev->bd_disk);
-
- dmz->dev = dev;
+ dmz->ddev[idx] = ddev;
return 0;
err:
- dm_put_device(ti, dmz->ddev);
- kfree(dev);
-
- return ret;
+ dm_put_device(ti, ddev);
+ return -EINVAL;
}
/*
@@ -745,10 +761,78 @@ err:
static void dmz_put_zoned_device(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
+ int i;
- dm_put_device(ti, dmz->ddev);
- kfree(dmz->dev);
- dmz->dev = NULL;
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ if (dmz->ddev[i]) {
+ dm_put_device(ti, dmz->ddev[i]);
+ dmz->ddev[i] = NULL;
+ }
+ }
+}
+
+static int dmz_fixup_devices(struct dm_target *ti)
+{
+ struct dmz_target *dmz = ti->private;
+ struct dmz_dev *reg_dev, *zoned_dev;
+ struct request_queue *q;
+ sector_t zone_nr_sectors = 0;
+ int i;
+
+ /*
+ * When we have more than on devices, the first one must be a
+ * regular block device and the others zoned block devices.
+ */
+ if (dmz->nr_ddevs > 1) {
+ reg_dev = &dmz->dev[0];
+ if (!(reg_dev->flags & DMZ_BDEV_REGULAR)) {
+ ti->error = "Primary disk is not a regular device";
+ return -EINVAL;
+ }
+ for (i = 1; i < dmz->nr_ddevs; i++) {
+ zoned_dev = &dmz->dev[i];
+ if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
+ ti->error = "Secondary disk is not a zoned device";
+ return -EINVAL;
+ }
+ q = bdev_get_queue(zoned_dev->bdev);
+ if (zone_nr_sectors &&
+ zone_nr_sectors != blk_queue_zone_sectors(q)) {
+ ti->error = "Zone nr sectors mismatch";
+ return -EINVAL;
+ }
+ zone_nr_sectors = blk_queue_zone_sectors(q);
+ zoned_dev->zone_nr_sectors = zone_nr_sectors;
+ zoned_dev->nr_zones =
+ blkdev_nr_zones(zoned_dev->bdev->bd_disk);
+ }
+ } else {
+ reg_dev = NULL;
+ zoned_dev = &dmz->dev[0];
+ if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
+ ti->error = "Disk is not a zoned device";
+ return -EINVAL;
+ }
+ q = bdev_get_queue(zoned_dev->bdev);
+ zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q);
+ zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk);
+ }
+
+ if (reg_dev) {
+ sector_t zone_offset;
+
+ reg_dev->zone_nr_sectors = zone_nr_sectors;
+ reg_dev->nr_zones =
+ DIV_ROUND_UP_SECTOR_T(reg_dev->capacity,
+ reg_dev->zone_nr_sectors);
+ reg_dev->zone_offset = 0;
+ zone_offset = reg_dev->nr_zones;
+ for (i = 1; i < dmz->nr_ddevs; i++) {
+ dmz->dev[i].zone_offset = zone_offset;
+ zone_offset += dmz->dev[i].nr_zones;
+ }
+ }
+ return 0;
}
/*
@@ -757,11 +841,10 @@ static void dmz_put_zoned_device(struct dm_target *ti)
static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dmz_target *dmz;
- struct dmz_dev *dev;
- int ret;
+ int ret, i;
/* Check arguments */
- if (argc != 1) {
+ if (argc < 1) {
ti->error = "Invalid argument count";
return -EINVAL;
}
@@ -772,25 +855,42 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Unable to allocate the zoned target descriptor";
return -ENOMEM;
}
+ dmz->dev = kcalloc(argc, sizeof(struct dmz_dev), GFP_KERNEL);
+ if (!dmz->dev) {
+ ti->error = "Unable to allocate the zoned device descriptors";
+ kfree(dmz);
+ return -ENOMEM;
+ }
+ dmz->ddev = kcalloc(argc, sizeof(struct dm_dev *), GFP_KERNEL);
+ if (!dmz->ddev) {
+ ti->error = "Unable to allocate the dm device descriptors";
+ ret = -ENOMEM;
+ goto err;
+ }
+ dmz->nr_ddevs = argc;
+
ti->private = dmz;
/* Get the target zoned block device */
- ret = dmz_get_zoned_device(ti, argv[0]);
- if (ret) {
- dmz->ddev = NULL;
- goto err;
+ for (i = 0; i < argc; i++) {
+ ret = dmz_get_zoned_device(ti, argv[i], i, argc);
+ if (ret)
+ goto err_dev;
}
+ ret = dmz_fixup_devices(ti);
+ if (ret)
+ goto err_dev;
/* Initialize metadata */
- dev = dmz->dev;
- ret = dmz_ctr_metadata(dev, &dmz->metadata);
+ ret = dmz_ctr_metadata(dmz->dev, argc, &dmz->metadata,
+ dm_table_device_name(ti->table));
if (ret) {
ti->error = "Metadata initialization failed";
goto err_dev;
}
/* Set target (no write same support) */
- ti->max_io_len = dev->zone_nr_sectors << 9;
+ ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata) << 9;
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_zeroes_bios = 1;
@@ -799,7 +899,8 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->discards_supported = true;
/* The exposed capacity is the number of chunks that can be mapped */
- ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
+ ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) <<
+ dmz_zone_nr_sectors_shift(dmz->metadata);
/* Zone BIO */
ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
@@ -811,8 +912,9 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Chunk BIO work */
mutex_init(&dmz->chunk_lock);
INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
- dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
- 0, dev->name);
+ dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
+ dmz_metadata_label(dmz->metadata));
if (!dmz->chunk_wq) {
ti->error = "Create chunk workqueue failed";
ret = -ENOMEM;
@@ -824,7 +926,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bio_list_init(&dmz->flush_list);
INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
- dev->name);
+ dmz_metadata_label(dmz->metadata));
if (!dmz->flush_wq) {
ti->error = "Create flush workqueue failed";
ret = -ENOMEM;
@@ -833,15 +935,18 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
/* Initialize reclaim */
- ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim);
- if (ret) {
- ti->error = "Zone reclaim initialization failed";
- goto err_fwq;
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ ret = dmz_ctr_reclaim(dmz->metadata, &dmz->dev[i].reclaim, i);
+ if (ret) {
+ ti->error = "Zone reclaim initialization failed";
+ goto err_fwq;
+ }
}
- dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)",
- (unsigned long long)ti->len,
- (unsigned long long)dmz_sect2blk(ti->len));
+ DMINFO("(%s): Target device: %llu 512-byte logical sectors (%llu blocks)",
+ dmz_metadata_label(dmz->metadata),
+ (unsigned long long)ti->len,
+ (unsigned long long)dmz_sect2blk(ti->len));
return 0;
err_fwq:
@@ -856,6 +961,7 @@ err_meta:
err_dev:
dmz_put_zoned_device(ti);
err:
+ kfree(dmz->dev);
kfree(dmz);
return ret;
@@ -867,11 +973,13 @@ err:
static void dmz_dtr(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
+ int i;
flush_workqueue(dmz->chunk_wq);
destroy_workqueue(dmz->chunk_wq);
- dmz_dtr_reclaim(dmz->reclaim);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_dtr_reclaim(dmz->dev[i].reclaim);
cancel_delayed_work_sync(&dmz->flush_work);
destroy_workqueue(dmz->flush_wq);
@@ -886,6 +994,7 @@ static void dmz_dtr(struct dm_target *ti)
mutex_destroy(&dmz->chunk_lock);
+ kfree(dmz->dev);
kfree(dmz);
}
@@ -895,7 +1004,7 @@ static void dmz_dtr(struct dm_target *ti)
static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct dmz_target *dmz = ti->private;
- unsigned int chunk_sectors = dmz->dev->zone_nr_sectors;
+ unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata);
limits->logical_block_size = DMZ_BLOCK_SIZE;
limits->physical_block_size = DMZ_BLOCK_SIZE;
@@ -923,11 +1032,12 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct dmz_target *dmz = ti->private;
+ struct dmz_dev *dev = &dmz->dev[0];
- if (!dmz_check_bdev(dmz->dev))
+ if (!dmz_check_bdev(dev))
return -EIO;
- *bdev = dmz->dev->bdev;
+ *bdev = dev->bdev;
return 0;
}
@@ -938,9 +1048,11 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
static void dmz_suspend(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
+ int i;
flush_workqueue(dmz->chunk_wq);
- dmz_suspend_reclaim(dmz->reclaim);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_suspend_reclaim(dmz->dev[i].reclaim);
cancel_delayed_work_sync(&dmz->flush_work);
}
@@ -950,24 +1062,95 @@ static void dmz_suspend(struct dm_target *ti)
static void dmz_resume(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
+ int i;
queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
- dmz_resume_reclaim(dmz->reclaim);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_resume_reclaim(dmz->dev[i].reclaim);
}
static int dmz_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct dmz_target *dmz = ti->private;
- struct dmz_dev *dev = dmz->dev;
- sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
+ unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
+ sector_t capacity;
+ int i, r;
+
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
+ r = fn(ti, dmz->ddev[i], 0, capacity, data);
+ if (r)
+ break;
+ }
+ return r;
+}
+
+static void dmz_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result,
+ unsigned int maxlen)
+{
+ struct dmz_target *dmz = ti->private;
+ ssize_t sz = 0;
+ char buf[BDEVNAME_SIZE];
+ struct dmz_dev *dev;
+ int i;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%u zones %u/%u cache",
+ dmz_nr_zones(dmz->metadata),
+ dmz_nr_unmap_cache_zones(dmz->metadata),
+ dmz_nr_cache_zones(dmz->metadata));
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ /*
+ * For a multi-device setup the first device
+ * contains only cache zones.
+ */
+ if ((i == 0) &&
+ (dmz_nr_cache_zones(dmz->metadata) > 0))
+ continue;
+ DMEMIT(" %u/%u random %u/%u sequential",
+ dmz_nr_unmap_rnd_zones(dmz->metadata, i),
+ dmz_nr_rnd_zones(dmz->metadata, i),
+ dmz_nr_unmap_seq_zones(dmz->metadata, i),
+ dmz_nr_seq_zones(dmz->metadata, i));
+ }
+ break;
+ case STATUSTYPE_TABLE:
+ dev = &dmz->dev[0];
+ format_dev_t(buf, dev->bdev->bd_dev);
+ DMEMIT("%s", buf);
+ for (i = 1; i < dmz->nr_ddevs; i++) {
+ dev = &dmz->dev[i];
+ format_dev_t(buf, dev->bdev->bd_dev);
+ DMEMIT(" %s", buf);
+ }
+ break;
+ }
+ return;
+}
+
+static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
+{
+ struct dmz_target *dmz = ti->private;
+ int r = -EINVAL;
+
+ if (!strcasecmp(argv[0], "reclaim")) {
+ int i;
- return fn(ti, dmz->ddev, 0, capacity, data);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_schedule_reclaim(dmz->dev[i].reclaim);
+ r = 0;
+ } else
+ DMERR("unrecognized message %s", argv[0]);
+ return r;
}
static struct target_type dmz_type = {
.name = "zoned",
- .version = {1, 1, 0},
+ .version = {2, 0, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
.module = THIS_MODULE,
.ctr = dmz_ctr,
@@ -978,6 +1161,8 @@ static struct target_type dmz_type = {
.postsuspend = dmz_suspend,
.resume = dmz_resume,
.iterate_devices = dmz_iterate_devices,
+ .status = dmz_status,
+ .message = dmz_message,
};
static int __init dmz_init(void)
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index 5b5e493d479c..22f11440b423 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -45,34 +45,50 @@
#define dmz_bio_block(bio) dmz_sect2blk((bio)->bi_iter.bi_sector)
#define dmz_bio_blocks(bio) dmz_sect2blk(bio_sectors(bio))
+struct dmz_metadata;
+struct dmz_reclaim;
+
/*
* Zoned block device information.
*/
struct dmz_dev {
struct block_device *bdev;
+ struct dmz_metadata *metadata;
+ struct dmz_reclaim *reclaim;
char name[BDEVNAME_SIZE];
+ uuid_t uuid;
sector_t capacity;
+ unsigned int dev_idx;
+
unsigned int nr_zones;
+ unsigned int zone_offset;
unsigned int flags;
sector_t zone_nr_sectors;
- unsigned int zone_nr_sectors_shift;
- sector_t zone_nr_blocks;
- sector_t zone_nr_blocks_shift;
+ unsigned int nr_rnd;
+ atomic_t unmap_nr_rnd;
+ struct list_head unmap_rnd_list;
+ struct list_head map_rnd_list;
+
+ unsigned int nr_seq;
+ atomic_t unmap_nr_seq;
+ struct list_head unmap_seq_list;
+ struct list_head map_seq_list;
};
-#define dmz_bio_chunk(dev, bio) ((bio)->bi_iter.bi_sector >> \
- (dev)->zone_nr_sectors_shift)
-#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
+#define dmz_bio_chunk(zmd, bio) ((bio)->bi_iter.bi_sector >> \
+ dmz_zone_nr_sectors_shift(zmd))
+#define dmz_chunk_block(zmd, b) ((b) & (dmz_zone_nr_blocks(zmd) - 1))
/* Device flags. */
#define DMZ_BDEV_DYING (1 << 0)
#define DMZ_CHECK_BDEV (2 << 0)
+#define DMZ_BDEV_REGULAR (4 << 0)
/*
* Zone descriptor.
@@ -81,12 +97,18 @@ struct dm_zone {
/* For listing the zone depending on its state */
struct list_head link;
+ /* Device containing this zone */
+ struct dmz_dev *dev;
+
/* Zone type and state */
unsigned long flags;
/* Zone activation reference count */
atomic_t refcount;
+ /* Zone id */
+ unsigned int id;
+
/* Zone write pointer block (relative to the zone start block) */
unsigned int wp_block;
@@ -109,6 +131,7 @@ struct dm_zone {
*/
enum {
/* Zone write type */
+ DMZ_CACHE,
DMZ_RND,
DMZ_SEQ,
@@ -120,22 +143,28 @@ enum {
DMZ_META,
DMZ_DATA,
DMZ_BUF,
+ DMZ_RESERVED,
/* Zone internal state */
DMZ_RECLAIM,
DMZ_SEQ_WRITE_ERR,
+ DMZ_RECLAIM_TERMINATE,
};
/*
* Zone data accessors.
*/
+#define dmz_is_cache(z) test_bit(DMZ_CACHE, &(z)->flags)
#define dmz_is_rnd(z) test_bit(DMZ_RND, &(z)->flags)
#define dmz_is_seq(z) test_bit(DMZ_SEQ, &(z)->flags)
#define dmz_is_empty(z) ((z)->wp_block == 0)
#define dmz_is_offline(z) test_bit(DMZ_OFFLINE, &(z)->flags)
#define dmz_is_readonly(z) test_bit(DMZ_READ_ONLY, &(z)->flags)
#define dmz_in_reclaim(z) test_bit(DMZ_RECLAIM, &(z)->flags)
+#define dmz_is_reserved(z) test_bit(DMZ_RESERVED, &(z)->flags)
#define dmz_seq_write_err(z) test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)
+#define dmz_reclaim_should_terminate(z) \
+ test_bit(DMZ_RECLAIM_TERMINATE, &(z)->flags)
#define dmz_is_meta(z) test_bit(DMZ_META, &(z)->flags)
#define dmz_is_buf(z) test_bit(DMZ_BUF, &(z)->flags)
@@ -158,13 +187,11 @@ enum {
#define dmz_dev_debug(dev, format, args...) \
DMDEBUG("(%s): " format, (dev)->name, ## args)
-struct dmz_metadata;
-struct dmz_reclaim;
-
/*
* Functions defined in dm-zoned-metadata.c
*/
-int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **zmd);
+int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
+ struct dmz_metadata **zmd, const char *devname);
void dmz_dtr_metadata(struct dmz_metadata *zmd);
int dmz_resume_metadata(struct dmz_metadata *zmd);
@@ -175,23 +202,38 @@ void dmz_unlock_metadata(struct dmz_metadata *zmd);
void dmz_lock_flush(struct dmz_metadata *zmd);
void dmz_unlock_flush(struct dmz_metadata *zmd);
int dmz_flush_metadata(struct dmz_metadata *zmd);
+const char *dmz_metadata_label(struct dmz_metadata *zmd);
-unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone);
sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone);
sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone);
unsigned int dmz_nr_chunks(struct dmz_metadata *zmd);
+bool dmz_check_dev(struct dmz_metadata *zmd);
+bool dmz_dev_is_dying(struct dmz_metadata *zmd);
+
#define DMZ_ALLOC_RND 0x01
-#define DMZ_ALLOC_RECLAIM 0x02
+#define DMZ_ALLOC_CACHE 0x02
+#define DMZ_ALLOC_SEQ 0x04
+#define DMZ_ALLOC_RECLAIM 0x10
-struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags);
+struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd,
+ unsigned int dev_idx, unsigned long flags);
void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
unsigned int chunk);
void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
-unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
-unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd, int idx);
+unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx);
+unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx);
+unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx);
+unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd);
/*
* Activate a zone (increment its reference count).
@@ -201,26 +243,10 @@ static inline void dmz_activate_zone(struct dm_zone *zone)
atomic_inc(&zone->refcount);
}
-/*
- * Deactivate a zone. This decrement the zone reference counter
- * indicating that all BIOs to the zone have completed when the count is 0.
- */
-static inline void dmz_deactivate_zone(struct dm_zone *zone)
-{
- atomic_dec(&zone->refcount);
-}
-
-/*
- * Test if a zone is active, that is, has a refcount > 0.
- */
-static inline bool dmz_is_active(struct dm_zone *zone)
-{
- return atomic_read(&zone->refcount);
-}
-
int dmz_lock_zone_reclaim(struct dm_zone *zone);
void dmz_unlock_zone_reclaim(struct dm_zone *zone);
-struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd);
+struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
+ unsigned int dev_idx, bool idle);
struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
unsigned int chunk, int op);
@@ -244,8 +270,7 @@ int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
/*
* Functions defined in dm-zoned-reclaim.c
*/
-int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
- struct dmz_reclaim **zrc);
+int dmz_ctr_reclaim(struct dmz_metadata *zmd, struct dmz_reclaim **zrc, int idx);
void dmz_dtr_reclaim(struct dmz_reclaim *zrc);
void dmz_suspend_reclaim(struct dmz_reclaim *zrc);
void dmz_resume_reclaim(struct dmz_reclaim *zrc);
@@ -258,4 +283,22 @@ void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
bool dmz_check_bdev(struct dmz_dev *dmz_dev);
+/*
+ * Deactivate a zone. This decrement the zone reference counter
+ * indicating that all BIOs to the zone have completed when the count is 0.
+ */
+static inline void dmz_deactivate_zone(struct dm_zone *zone)
+{
+ dmz_reclaim_bio_acc(zone->dev->reclaim);
+ atomic_dec(&zone->refcount);
+}
+
+/*
+ * Test if a zone is active, that is, has a refcount > 0.
+ */
+static inline bool dmz_is_active(struct dm_zone *zone)
+{
+ return atomic_read(&zone->refcount);
+}
+
#endif /* DM_ZONED_H */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index db9e46114653..109e81f33edb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -26,6 +26,7 @@
#include <linux/pr.h>
#include <linux/refcount.h>
#include <linux/part_stat.h>
+#include <linux/blk-crypto.h>
#define DM_MSG_PREFIX "core"
@@ -675,16 +676,21 @@ static bool md_in_flight(struct mapped_device *md)
return md_in_flight_bios(md);
}
+u64 dm_start_time_ns_from_clone(struct bio *bio)
+{
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ struct dm_io *io = tio->io;
+
+ return jiffies_to_nsecs(io->start_time);
+}
+EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
+
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
- io->start_time = jiffies;
-
- generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
- &dm_disk(md)->part0);
-
+ io->start_time = bio_start_io_acct(bio);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
@@ -697,8 +703,7 @@ static void end_io_acct(struct dm_io *io)
struct bio *bio = io->orig_bio;
unsigned long duration = jiffies - io->start_time;
- generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
- io->start_time);
+ bio_end_io_acct(bio, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
@@ -1334,6 +1339,8 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
__bio_clone_fast(clone, bio);
+ bio_crypt_clone(clone, bio, GFP_NOIO);
+
if (bio_integrity(bio)) {
int r;
@@ -1788,6 +1795,18 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
int srcu_idx;
struct dm_table *map;
+ if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
+ /*
+ * We are called with a live reference on q_usage_counter, but
+ * that one will be released as soon as we return. Grab an
+ * extra one as blk_mq_make_request expects to be able to
+ * consume a reference (which lives until the request is freed
+ * in case a request is allocated).
+ */
+ percpu_ref_get(&q->q_usage_counter);
+ return blk_mq_make_request(q, bio);
+ }
+
map = dm_get_live_table(md, &srcu_idx);
/* if we're suspended, we have to queue this io for later */
@@ -2600,7 +2619,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
if (noflush)
set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
else
- pr_debug("%s: suspending with flush\n", dm_device_name(md));
+ DMDEBUG("%s: suspending with flush", dm_device_name(md));
/*
* This gets reverted if there's an error later and the targets
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index b952bd45bd6a..95a5f3757fa3 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -324,14 +324,6 @@ static void end_bitmap_write(struct buffer_head *bh, int uptodate)
wake_up(&bitmap->write_wait);
}
-/* copied from buffer.c */
-static void
-__clear_page_buffers(struct page *page)
-{
- ClearPagePrivate(page);
- set_page_private(page, 0);
- put_page(page);
-}
static void free_buffers(struct page *page)
{
struct buffer_head *bh;
@@ -345,7 +337,7 @@ static void free_buffers(struct page *page)
free_buffer_head(bh);
bh = next;
}
- __clear_page_buffers(page);
+ detach_page_private(page);
put_page(page);
}
@@ -374,7 +366,7 @@ static int read_page(struct file *file, unsigned long index,
ret = -ENOMEM;
goto out;
}
- attach_page_buffers(page, bh);
+ attach_page_private(page, bh);
blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
while (bh) {
block = blk_cur;
diff --git a/drivers/md/md-linear.h b/drivers/md/md-linear.h
index 8381d651d4ed..24e97db50ebb 100644
--- a/drivers/md/md-linear.h
+++ b/drivers/md/md-linear.h
@@ -12,6 +12,6 @@ struct linear_conf
struct rcu_head rcu;
sector_t array_sectors;
int raid_disks; /* a copy of mddev->raid_disks */
- struct dev_info disks[0];
+ struct dev_info disks[];
};
#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 271e8a587354..f567f536b529 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c